repo_name
stringlengths 8
75
| hexsha
stringlengths 40
40
| code
stringlengths 447
163k
| apis
sequence | file_path
stringlengths 7
127
| api_extract
stringlengths 346
104k
|
---|---|---|---|---|---|
kalosisz/tensorflow | b7ecd75b24f577b73500024fe91d2ea0c806d05a | # Lint as: python2, python3
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for lite.py functionality related to TensorFlow 2.0."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl.testing import parameterized
from six.moves import zip
from tensorflow.lite.python.interpreter import Interpreter
from tensorflow.python.eager import def_function
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_util
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import tracking
class ModelTest(test_util.TensorFlowTestCase, parameterized.TestCase):
"""Base test class for TensorFlow Lite 2.x model tests."""
def _evaluateTFLiteModel(self, tflite_model, input_data, input_shapes=None):
"""Evaluates the model on the `input_data`.
Args:
tflite_model: TensorFlow Lite model.
input_data: List of EagerTensor const ops containing the input data for
each input tensor.
input_shapes: List of tuples representing the `shape_signature` and the
new shape of each input tensor that has unknown dimensions.
Returns:
[np.ndarray]
"""
interpreter = Interpreter(model_content=tflite_model)
input_details = interpreter.get_input_details()
if input_shapes:
for idx, (shape_signature, final_shape) in enumerate(input_shapes):
self.assertTrue(
(input_details[idx]['shape_signature'] == shape_signature).all())
index = input_details[idx]['index']
interpreter.resize_tensor_input(index, final_shape, strict=True)
interpreter.allocate_tensors()
output_details = interpreter.get_output_details()
input_details = interpreter.get_input_details()
for input_tensor, tensor_data in zip(input_details, input_data):
interpreter.set_tensor(input_tensor['index'], tensor_data.numpy())
interpreter.invoke()
return [
interpreter.get_tensor(details['index']) for details in output_details
]
def _evaluateTFLiteModelUsingSignatureDef(self, tflite_model, signature_key,
inputs):
"""Evaluates the model on the `inputs`.
Args:
tflite_model: TensorFlow Lite model.
signature_key: Signature key.
inputs: Map from input tensor names in the SignatureDef to tensor value.
Returns:
Dictionary of outputs.
Key is the output name in the SignatureDef 'signature_key'
Value is the output value
"""
interpreter = Interpreter(model_content=tflite_model)
signature_runner = interpreter.get_signature_runner(signature_key)
return signature_runner(**inputs)
def _getSimpleVariableModel(self):
root = tracking.AutoTrackable()
root.v1 = variables.Variable(3.)
root.v2 = variables.Variable(2.)
root.f = def_function.function(lambda x: root.v1 * root.v2 * x)
return root
def _getSimpleModelWithVariables(self):
class SimpleModelWithOneVariable(tracking.AutoTrackable):
"""Basic model with 1 variable."""
def __init__(self):
super(SimpleModelWithOneVariable, self).__init__()
self.var = variables.Variable(array_ops.zeros((1, 10), name='var'))
@def_function.function
def assign_add(self, x):
self.var.assign_add(x)
return self.var
return SimpleModelWithOneVariable()
def _getMultiFunctionModel(self):
class BasicModel(tracking.AutoTrackable):
"""Basic model with multiple functions."""
def __init__(self):
self.y = None
self.z = None
@def_function.function
def add(self, x):
if self.y is None:
self.y = variables.Variable(2.)
return x + self.y
@def_function.function
def sub(self, x):
if self.z is None:
self.z = variables.Variable(3.)
return x - self.z
@def_function.function
def mul_add(self, x, y):
if self.z is None:
self.z = variables.Variable(3.)
return x * self.z + y
return BasicModel()
def _getMultiFunctionModelWithSharedWeight(self):
class BasicModelWithSharedWeight(tracking.AutoTrackable):
"""Model with multiple functions and a shared weight."""
def __init__(self):
self.weight = constant_op.constant([1.0],
shape=(1, 512, 512, 1),
dtype=dtypes.float32)
@def_function.function
def add(self, x):
return x + self.weight
@def_function.function
def sub(self, x):
return x - self.weight
@def_function.function
def mul(self, x):
return x * self.weight
return BasicModelWithSharedWeight()
def _assertValidDebugInfo(self, debug_info):
"""Verify the DebugInfo is valid."""
file_names = set()
for file_path in debug_info.files:
file_names.add(os.path.basename(file_path))
# To make the test independent on how the nodes are created, we only assert
# the name of this test file.
self.assertIn('lite_v2_test.py', file_names)
self.assertNotIn('lite_test.py', file_names)
| [
"tensorflow.python.eager.def_function.function",
"tensorflow.python.training.tracking.tracking.AutoTrackable",
"tensorflow.lite.python.interpreter.Interpreter",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.framework.constant_op.constant"
] | tensorflow/lite/python/lite_v2_test_util.py | [(53, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (66, 'six.moves.zip', 'zip', (['input_details', 'input_data'], {}), False, 'from six.moves import zip\n'), (87, 'tensorflow.lite.python.interpreter.Interpreter', 'Interpreter', ([], {'model_content': 'tflite_model'}), False, 'from tensorflow.lite.python.interpreter import Interpreter\n'), (92, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (93, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (94, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (95, 'tensorflow.python.eager.def_function.function', 'def_function.function', (['(lambda x: root.v1 * root.v2 * x)'], {}), False, 'from tensorflow.python.eager import def_function\n'), (149, 'tensorflow.python.framework.constant_op.constant', 'constant_op.constant', (['[1.0]'], {'shape': '(1, 512, 512, 1)', 'dtype': 'dtypes.float32'}), False, 'from tensorflow.python.framework import constant_op\n'), (171, 'os.path.basename', 'os.path.basename', (['file_path'], {}), False, 'import os\n'), (105, 'tensorflow.python.ops.array_ops.zeros', 'array_ops.zeros', (['(1, 10)'], {'name': '"""var"""'}), False, 'from tensorflow.python.ops import array_ops\n'), (126, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(2.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (132, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (138, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(3.0)'], {}), False, 'from tensorflow.python.ops import variables\n')] |
ESWZY/federated | 1693d5fdd25938dc9aadede8d103ed117d1d34c9 | # Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example showing how to run a multi-machine simulation.
In order to run this example, you must have a running instance of the
Executor Service, either locally or on Kubernetes.
The model trains EMNIST for a small number of rounds, but uses a RemoteExecutor
to distribute the work to the ExecutorService.
"""
import collections
import warnings
from absl import app
from absl import flags
import grpc
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
FLAGS = flags.FLAGS
flags.DEFINE_string('host', None, 'The host to connect to.')
flags.mark_flag_as_required('host')
flags.DEFINE_string('port', '8000', 'The port to connect to.')
flags.DEFINE_integer('n_clients', 10, 'Number of clients.')
flags.DEFINE_integer('n_rounds', 3, 'Number of rounds.')
def preprocess(dataset):
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], [-1])),
('y', tf.reshape(element['label'], [1])),
])
return dataset.repeat(NUM_EPOCHS).map(element_fn).batch(BATCH_SIZE)
def make_federated_data(client_data, client_ids):
return [
preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids
]
NUM_EPOCHS = 10
BATCH_SIZE = 20
def make_remote_executor(inferred_cardinalities):
"""Make remote executor."""
def create_worker_stack(ex):
ex = tff.framework.ThreadDelegatingExecutor(ex)
return tff.framework.ReferenceResolvingExecutor(ex)
client_ex = []
num_clients = inferred_cardinalities.get(tff.CLIENTS, None)
if num_clients:
print('Inferred that there are {} clients'.format(num_clients))
else:
print('No CLIENTS placement provided')
for _ in range(num_clients or 0):
channel = grpc.insecure_channel('{}:{}'.format(FLAGS.host, FLAGS.port))
remote_ex = tff.framework.RemoteExecutor(channel)
worker_stack = create_worker_stack(remote_ex)
client_ex.append(worker_stack)
federating_strategy_factory = tff.framework.FederatedResolvingStrategy.factory(
{
tff.SERVER: create_worker_stack(tff.framework.EagerTFExecutor()),
tff.CLIENTS: client_ex,
})
unplaced_ex = create_worker_stack(tff.framework.EagerTFExecutor())
federating_ex = tff.framework.FederatingExecutor(federating_strategy_factory,
unplaced_ex)
return tff.framework.ReferenceResolvingExecutor(federating_ex)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
warnings.simplefilter('ignore')
np.random.seed(0)
emnist_train, _ = tff.simulation.datasets.emnist.load_data()
sample_clients = emnist_train.client_ids[0:FLAGS.n_clients]
federated_train_data = make_federated_data(emnist_train, sample_clients)
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
preprocessed_example_dataset = preprocess(example_dataset)
input_spec = preprocessed_example_dataset.element_spec
def model_fn():
model = tf.keras.models.Sequential([
tf.keras.layers.Input(shape=(784,)),
tf.keras.layers.Dense(10, kernel_initializer='zeros'),
tf.keras.layers.Softmax(),
])
return tff.learning.from_keras_model(
model,
input_spec=input_spec,
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
iterative_process = tff.learning.build_federated_averaging_process(
model_fn,
client_optimizer_fn=lambda: tf.keras.optimizers.SGD(learning_rate=0.02))
factory = tff.framework.ResourceManagingExecutorFactory(make_remote_executor)
context = tff.framework.ExecutionContext(factory)
tff.framework.set_default_context(context)
state = iterative_process.initialize()
state, metrics = iterative_process.next(state, federated_train_data)
print('round 1, metrics={}'.format(metrics))
for round_num in range(2, FLAGS.n_rounds + 1):
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
if __name__ == '__main__':
app.run(main)
| [
"numpy.random.seed",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.layers.Input"
] | tensorflow_federated/python/examples/remote_execution/remote_executor_example.py | [(35, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""host"""', 'None', '"""The host to connect to."""'], {}), False, 'from absl import flags\n'), (36, 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""host"""'], {}), False, 'from absl import flags\n'), (37, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""port"""', '"""8000"""', '"""The port to connect to."""'], {}), False, 'from absl import flags\n'), (38, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_clients"""', '(10)', '"""Number of clients."""'], {}), False, 'from absl import flags\n'), (39, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_rounds"""', '(3)', '"""Number of rounds."""'], {}), False, 'from absl import flags\n'), (90, 'tensorflow_federated.framework.FederatingExecutor', 'tff.framework.FederatingExecutor', (['federating_strategy_factory', 'unplaced_ex'], {}), True, 'import tensorflow_federated as tff\n'), (92, 'tensorflow_federated.framework.ReferenceResolvingExecutor', 'tff.framework.ReferenceResolvingExecutor', (['federating_ex'], {}), True, 'import tensorflow_federated as tff\n'), (99, 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), False, 'import warnings\n'), (101, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (103, 'tensorflow_federated.simulation.datasets.emnist.load_data', 'tff.simulation.datasets.emnist.load_data', ([], {}), True, 'import tensorflow_federated as tff\n'), (131, 'tensorflow_federated.framework.ResourceManagingExecutorFactory', 'tff.framework.ResourceManagingExecutorFactory', (['make_remote_executor'], {}), True, 'import tensorflow_federated as tff\n'), (132, 'tensorflow_federated.framework.ExecutionContext', 'tff.framework.ExecutionContext', (['factory'], {}), True, 'import tensorflow_federated as tff\n'), (133, 'tensorflow_federated.framework.set_default_context', 'tff.framework.set_default_context', (['context'], {}), True, 'import tensorflow_federated as tff\n'), (146, 'absl.app.run', 'app.run', (['main'], {}), False, 'from absl import app\n'), (68, 'tensorflow_federated.framework.ThreadDelegatingExecutor', 'tff.framework.ThreadDelegatingExecutor', (['ex'], {}), True, 'import tensorflow_federated as tff\n'), (69, 'tensorflow_federated.framework.ReferenceResolvingExecutor', 'tff.framework.ReferenceResolvingExecutor', (['ex'], {}), True, 'import tensorflow_federated as tff\n'), (80, 'tensorflow_federated.framework.RemoteExecutor', 'tff.framework.RemoteExecutor', (['channel'], {}), True, 'import tensorflow_federated as tff\n'), (89, 'tensorflow_federated.framework.EagerTFExecutor', 'tff.framework.EagerTFExecutor', ([], {}), True, 'import tensorflow_federated as tff\n'), (97, 'absl.app.UsageError', 'app.UsageError', (['"""Too many command-line arguments."""'], {}), False, 'from absl import app\n'), (86, 'tensorflow_federated.framework.EagerTFExecutor', 'tff.framework.EagerTFExecutor', ([], {}), True, 'import tensorflow_federated as tff\n'), (117, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': '(784,)'}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'kernel_initializer': '"""zeros"""'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.02)'}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.reshape', 'tf.reshape', (["element['pixels']", '[-1]'], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.reshape', 'tf.reshape', (["element['label']", '[1]'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n')] |
leandro-gracia-gil/addons | d981b0f1d1bc23f697d159eb1510c24b3c476d28 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements Snake layer."""
import tensorflow as tf
from typeguard import typechecked
from tensorflow_addons.activations.snake import snake
from tensorflow_addons.utils import types
@tf.keras.utils.register_keras_serializable(package="Addons")
class Snake(tf.keras.layers.Layer):
"""Snake layer to learn periodic functions with the trainable `frequency` scalar.
https://arxiv.org/abs/2006.08195
Arguments:
frequency_initializer: Initializer for the `frequency` scalar.
"""
@typechecked
def __init__(self, frequency_initializer: types.Initializer = "ones", **kwargs):
super().__init__(**kwargs)
self.frequency_initializer = tf.keras.initializers.get(frequency_initializer)
self.frequency = self.add_weight(
initializer=frequency_initializer, trainable=True
)
def call(self, inputs):
return snake(inputs, self.frequency)
def get_config(self):
config = {
"frequency_initializer": tf.keras.initializers.serialize(
self.frequency_initializer
),
}
base_config = super().get_config()
return {**base_config, **config}
| [
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.initializers.get"
] | tensorflow_addons/layers/snake.py | [(25, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Addons"""'}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['frequency_initializer'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow_addons.activations.snake.snake', 'snake', (['inputs', 'self.frequency'], {}), False, 'from tensorflow_addons.activations.snake import snake\n'), (48, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self.frequency_initializer'], {}), True, 'import tensorflow as tf\n')] |
ganik/DeepSpeedExamples | 174ae3bc8dbb688cfaccb4afa15d6e2cdbe19ce5 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 MobileBERT model. """
import warnings
from dataclasses import dataclass
from typing import Dict, Optional, Tuple
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
ModelOutput,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
replace_return_docstrings,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPooling,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFNextSentencePredictorOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFNextSentencePredictionLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_mobilebert import MobileBertConfig
logger = logging.get_logger(__name__)
_CONFIG_FOR_DOC = "MobileBertConfig"
_TOKENIZER_FOR_DOC = "MobileBertTokenizer"
TF_MOBILEBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"google/mobilebert-uncased",
# See all MobileBERT models at https://huggingface.co/models?filter=mobilebert
]
class TFMobileBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(config.intermediate_size, name="dense")
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFLayerNorm(tf.keras.layers.LayerNormalization):
def __init__(self, feat_size, *args, **kwargs):
super().__init__(*args, **kwargs)
class TFNoNorm(tf.keras.layers.Layer):
def __init__(self, feat_size, epsilon=None, **kwargs):
super().__init__(**kwargs)
self.feat_size = feat_size
def build(self, input_shape):
self.bias = self.add_weight("bias", shape=[self.feat_size], initializer="zeros")
self.weight = self.add_weight("weight", shape=[self.feat_size], initializer="ones")
def call(self, inputs: tf.Tensor):
return inputs * self.weight + self.bias
NORM2FN = {"layer_norm": TFLayerNorm, "no_norm": TFNoNorm}
class TFMobileBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.trigram_input = config.trigram_input
self.embedding_size = config.embedding_size
self.vocab_size = config.vocab_size
self.hidden_size = config.hidden_size
self.type_vocab_size = config.type_vocab_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.embeddings_sum = tf.keras.layers.Add()
self.embedding_transformation = tf.keras.layers.Dense(config.hidden_size, name="embedding_transformation")
# self.LayerNorm is not snake-cased to stick with TensorFlow model variable name and be able to load
# any TensorFlow checkpoint file
self.LayerNorm = NORM2FN[config.normalization_type](
config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(initializer_range=self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.hidden_size],
initializer=get_initializer(initializer_range=self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.hidden_size],
initializer=get_initializer(initializer_range=self.initializer_range),
)
super().build(input_shape)
def call(self, input_ids=None, position_ids=None, token_type_ids=None, inputs_embeds=None, training=False):
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if self.trigram_input:
# From the paper MobileBERT: a Compact Task-Agnostic BERT for Resource-Limited
# Devices (https://arxiv.org/abs/2004.02984)
#
# The embedding table in BERT models accounts for a substantial proportion of model size. To compress
# the embedding layer, we reduce the embedding dimension to 128 in MobileBERT.
# Then, we apply a 1D convolution with kernel size 3 on the raw token embedding to produce a 512
# dimensional output.
inputs_embeds = tf.concat(
[
tf.pad(inputs_embeds[:, 1:], ((0, 0), (0, 1), (0, 0))),
inputs_embeds,
tf.pad(inputs_embeds[:, :-1], ((0, 0), (1, 0), (0, 0))),
],
axis=2,
)
if self.trigram_input or self.embedding_size != self.hidden_size:
inputs_embeds = self.embedding_transformation(inputs_embeds)
if position_ids is None:
position_ids = tf.expand_dims(tf.range(start=0, limit=input_shape[-1]), axis=0)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
position_embeds = tf.tile(input=position_embeds, multiples=(input_shape[0], 1, 1))
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = self.embeddings_sum(inputs=[inputs_embeds, position_embeds, token_type_embeds])
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFMobileBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
"The hidden size (%d) is not a multiple of the number of attention "
"heads (%d)" % (config.hidden_size, config.num_attention_heads)
)
self.num_attention_heads = config.num_attention_heads
self.output_attentions = config.output_attentions
assert config.hidden_size % config.num_attention_heads == 0
self.attention_head_size = int(config.true_hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(
self, query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=False
):
batch_size = shape_list(attention_mask)[0]
mixed_query_layer = self.query(query_tensor)
mixed_key_layer = self.key(key_tensor)
mixed_value_layer = self.value(value_tensor)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], dtype=attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFMobileBertModel call() function)
attention_mask = tf.cast(attention_mask, dtype=attention_scores.dtype)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFMobileBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.use_bottleneck = config.use_bottleneck
self.dense = tf.keras.layers.Dense(
config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = NORM2FN[config.normalization_type](
config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
)
if not self.use_bottleneck:
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, residual_tensor, training=False):
hidden_states = self.dense(hidden_states)
if not self.use_bottleneck:
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + residual_tensor)
return hidden_states
class TFMobileBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self = TFMobileBertSelfAttention(config, name="self")
self.mobilebert_output = TFMobileBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(
self,
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask,
head_mask,
output_attentions,
training=False,
):
self_outputs = self.self(
query_tensor, key_tensor, value_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.mobilebert_output(self_outputs[0], layer_input, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class TFOutputBottleneck(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(config.hidden_size, name="dense")
self.LayerNorm = NORM2FN[config.normalization_type](
config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
)
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, residual_tensor, training=False):
layer_outputs = self.dense(hidden_states)
layer_outputs = self.dropout(layer_outputs, training=training)
layer_outputs = self.LayerNorm(layer_outputs + residual_tensor)
return layer_outputs
class TFMobileBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.use_bottleneck = config.use_bottleneck
self.dense = tf.keras.layers.Dense(
config.true_hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = NORM2FN[config.normalization_type](
config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
)
if not self.use_bottleneck:
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
else:
self.bottleneck = TFOutputBottleneck(config, name="bottleneck")
def call(self, hidden_states, residual_tensor_1, residual_tensor_2, training=False):
hidden_states = self.dense(hidden_states)
if not self.use_bottleneck:
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
else:
hidden_states = self.LayerNorm(hidden_states + residual_tensor_1)
hidden_states = self.bottleneck(hidden_states, residual_tensor_2)
return hidden_states
class TFBottleneckLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(config.intra_bottleneck_size, name="dense")
self.LayerNorm = NORM2FN[config.normalization_type](
config.intra_bottleneck_size, epsilon=config.layer_norm_eps, name="LayerNorm"
)
def call(self, inputs):
hidden_states = self.dense(inputs)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class TFBottleneck(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.key_query_shared_bottleneck = config.key_query_shared_bottleneck
self.use_bottleneck_attention = config.use_bottleneck_attention
self.bottleneck_input = TFBottleneckLayer(config, name="input")
if self.key_query_shared_bottleneck:
self.attention = TFBottleneckLayer(config, name="attention")
def call(self, hidden_states):
# This method can return three different tuples of values. These different values make use of bottlenecks,
# which are linear layers used to project the hidden states to a lower-dimensional vector, reducing memory
# usage. These linear layer have weights that are learned during training.
#
# If `config.use_bottleneck_attention`, it will return the result of the bottleneck layer four times for the
# key, query, value, and "layer input" to be used by the attention layer.
# This bottleneck is used to project the hidden. This last layer input will be used as a residual tensor
# in the attention self output, after the attention scores have been computed.
#
# If not `config.use_bottleneck_attention` and `config.key_query_shared_bottleneck`, this will return
# four values, three of which have been passed through a bottleneck: the query and key, passed through the same
# bottleneck, and the residual layer to be applied in the attention self output, through another bottleneck.
#
# Finally, in the last case, the values for the query, key and values are the hidden states without bottleneck,
# and the residual layer will be this value passed through a bottleneck.
bottlenecked_hidden_states = self.bottleneck_input(hidden_states)
if self.use_bottleneck_attention:
return (bottlenecked_hidden_states,) * 4
elif self.key_query_shared_bottleneck:
shared_attention_input = self.attention(hidden_states)
return (shared_attention_input, shared_attention_input, hidden_states, bottlenecked_hidden_states)
else:
return (hidden_states, hidden_states, hidden_states, bottlenecked_hidden_states)
class TFFFNOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(config.true_hidden_size, name="dense")
self.LayerNorm = NORM2FN[config.normalization_type](
config.true_hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm"
)
def call(self, hidden_states, residual_tensor):
hidden_states = self.dense(hidden_states)
hidden_states = self.LayerNorm(hidden_states + residual_tensor)
return hidden_states
class TFFFNLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
self.mobilebert_output = TFFFNOutput(config, name="output")
def call(self, hidden_states):
intermediate_output = self.intermediate(hidden_states)
layer_outputs = self.mobilebert_output(intermediate_output, hidden_states)
return layer_outputs
class TFMobileBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.use_bottleneck = config.use_bottleneck
self.num_feedforward_networks = config.num_feedforward_networks
self.attention = TFMobileBertAttention(config, name="attention")
self.intermediate = TFMobileBertIntermediate(config, name="intermediate")
self.mobilebert_output = TFMobileBertOutput(config, name="output")
if self.use_bottleneck:
self.bottleneck = TFBottleneck(config, name="bottleneck")
if config.num_feedforward_networks > 1:
self.ffn = [
TFFFNLayer(config, name="ffn.{}".format(i)) for i in range(config.num_feedforward_networks - 1)
]
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
if self.use_bottleneck:
query_tensor, key_tensor, value_tensor, layer_input = self.bottleneck(hidden_states)
else:
query_tensor, key_tensor, value_tensor, layer_input = [hidden_states] * 4
attention_outputs = self.attention(
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_mask,
head_mask,
output_attentions,
training=training,
)
attention_output = attention_outputs[0]
s = (attention_output,)
if self.num_feedforward_networks != 1:
for i, ffn_module in enumerate(self.ffn):
attention_output = ffn_module(attention_output)
s += (attention_output,)
intermediate_output = self.intermediate(attention_output)
layer_output = self.mobilebert_output(intermediate_output, attention_output, hidden_states, training=training)
outputs = (
(layer_output,)
+ attention_outputs[1:]
+ (
tf.constant(0),
query_tensor,
key_tensor,
value_tensor,
layer_input,
attention_output,
intermediate_output,
)
+ s
) # add attentions if we output them
return outputs
class TFMobileBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.layer = [TFMobileBertLayer(config, name="layer_._{}".format(i)) for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFMobileBertPooler(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.do_activate = config.classifier_activation
if self.do_activate:
self.dense = tf.keras.layers.Dense(
config.hidden_size,
kernel_initializer=get_initializer(config.initializer_range),
activation="tanh",
name="dense",
)
def call(self, hidden_states):
# We "pool" the model by simply taking the hidden state corresponding
# to the first token.
first_token_tensor = hidden_states[:, 0]
if not self.do_activate:
return first_token_tensor
else:
pooled_output = self.dense(first_token_tensor)
return pooled_output
class TFMobileBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = NORM2FN["layer_norm"](config.hidden_size, epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
class TFMobileBertLMPredictionHead(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.transform = TFMobileBertPredictionHeadTransform(config, name="transform")
self.vocab_size = config.vocab_size
self.config = config
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
self.dense = self.add_weight(
shape=(self.config.hidden_size - self.config.embedding_size, self.vocab_size),
initializer="zeros",
trainable=True,
name="dense/weight",
)
self.decoder = self.add_weight(
shape=(self.config.vocab_size, self.config.embedding_size),
initializer="zeros",
trainable=True,
name="decoder/weight",
)
super().build(input_shape)
def get_output_embeddings(self):
return self
def set_output_embeddings(self, value):
self.decoder = value
self.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
hidden_states = self.transform(hidden_states)
hidden_states = tf.matmul(hidden_states, tf.concat([tf.transpose(self.decoder), self.dense], axis=0))
hidden_states = hidden_states + self.bias
return hidden_states
class TFMobileBertMLMHead(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.predictions = TFMobileBertLMPredictionHead(config, name="predictions")
def call(self, sequence_output):
prediction_scores = self.predictions(sequence_output)
return prediction_scores
@keras_serializable
class TFMobileBertMainLayer(tf.keras.layers.Layer):
config_class = MobileBertConfig
def __init__(self, config, add_pooling_layer=True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.num_hidden_layers = config.num_hidden_layers
self.output_attentions = config.output_attentions
self.output_hidden_states = config.output_hidden_states
self.return_dict = config.use_return_dict
self.embeddings = TFMobileBertEmbeddings(config, name="embeddings")
self.encoder = TFMobileBertEncoder(config, name="encoder")
self.pooler = TFMobileBertPooler(config, name="pooler") if add_pooling_layer else None
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
embedding_output = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.num_hidden_layers
encoder_outputs = self.encoder(
embedding_output,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
pooled_output = self.pooler(sequence_output) if self.pooler is not None else None
if not inputs["return_dict"]:
return (
sequence_output,
pooled_output,
) + encoder_outputs[1:]
return TFBaseModelOutputWithPooling(
last_hidden_state=sequence_output,
pooler_output=pooled_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFMobileBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = MobileBertConfig
base_model_prefix = "mobilebert"
@dataclass
class TFMobileBertForPreTrainingOutput(ModelOutput):
"""
Output type of :class:`~transformers.TFMobileBertForPreTraining`.
Args:
prediction_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length, config.vocab_size)`):
Prediction scores of the language modeling head (scores for each vocabulary token before SoftMax).
seq_relationship_logits (:obj:`tf.Tensor` of shape :obj:`(batch_size, 2)`):
Prediction scores of the next sequence prediction (classification) head (scores of True/False continuation
before SoftMax).
hidden_states (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_hidden_states=True`` is passed or when ``config.output_hidden_states=True``):
Tuple of :obj:`tf.Tensor` (one for the output of the embeddings + one for the output of each layer) of
shape :obj:`(batch_size, sequence_length, hidden_size)`.
Hidden-states of the model at the output of each layer plus the initial embedding outputs.
attentions (:obj:`tuple(tf.Tensor)`, `optional`, returned when ``output_attentions=True`` is passed or when ``config.output_attentions=True``):
Tuple of :obj:`tf.Tensor` (one for each layer) of shape :obj:`(batch_size, num_heads, sequence_length,
sequence_length)`.
Attentions weights after the attention softmax, used to compute the weighted average in the self-attention
heads.
"""
loss: Optional[tf.Tensor] = None
prediction_logits: tf.Tensor = None
seq_relationship_logits: tf.Tensor = None
hidden_states: Optional[Tuple[tf.Tensor]] = None
attentions: Optional[Tuple[tf.Tensor]] = None
MOBILEBERT_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Parameters:
config (:class:`~transformers.MobileBertConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
MOBILEBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.MobileBertTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
position_ids (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range ``[0,
config.max_position_embeddings - 1]``.
`What are position IDs? <../glossary.html#position-ids>`__
head_mask (:obj:`Numpy array` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare MobileBert Model transformer outputting raw hidden-states without any specific head on top.",
MOBILEBERT_START_DOCSTRING,
)
class TFMobileBertModel(TFMobileBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/mobilebert-uncased",
output_type=TFBaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.mobilebert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertModel.serving_output
def serving_output(self, output: TFBaseModelOutputWithPooling) -> TFBaseModelOutputWithPooling:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutputWithPooling(
last_hidden_state=output.last_hidden_state,
pooler_output=output.pooler_output,
hidden_states=hs,
attentions=attns,
)
@add_start_docstrings(
"""
MobileBert Model with two heads on top as done during the pretraining: a `masked language modeling` head and a
`next sentence prediction (classification)` head.
""",
MOBILEBERT_START_DOCSTRING,
)
class TFMobileBertForPreTraining(TFMobileBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
self.seq_relationship = TFMobileBertOnlyNSPHead(2, name="seq_relationship___cls")
def get_lm_head(self):
return self.predictions.predictions
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.predictions.name + "/" + self.predictions.predictions.name
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFMobileBertForPreTrainingOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
r"""
Return:
Examples::
>>> import tensorflow as tf
>>> from transformers import MobileBertTokenizer, TFMobileBertForPreTraining
>>> tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased')
>>> model = TFMobileBertForPreTraining.from_pretrained('google/mobilebert-uncased')
>>> input_ids = tf.constant(tokenizer.encode("Hello, my dog is cute"))[None, :] # Batch size 1
>>> outputs = model(input_ids)
>>> prediction_scores, seq_relationship_scores = outputs[:2]
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.mobilebert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output, pooled_output = outputs[:2]
prediction_scores = self.predictions(sequence_output)
seq_relationship_score = self.seq_relationship(pooled_output)
if not inputs["return_dict"]:
return (prediction_scores, seq_relationship_score) + outputs[2:]
return TFMobileBertForPreTrainingOutput(
prediction_logits=prediction_scores,
seq_relationship_logits=seq_relationship_score,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMobileBertForPreTrainingOutput(
prediction_logits=output.prediction_logits,
seq_relationship_logits=output.seq_relationship_logits,
hidden_states=hs,
attentions=attns,
)
@add_start_docstrings("""MobileBert Model with a `language modeling` head on top. """, MOBILEBERT_START_DOCSTRING)
class TFMobileBertForMaskedLM(TFMobileBertPreTrainedModel, TFMaskedLanguageModelingLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"pooler",
r"seq_relationship___cls",
r"cls.seq_relationship",
]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
self.predictions = TFMobileBertMLMHead(config, name="predictions___cls")
def get_lm_head(self):
return self.predictions.predictions
def get_prefix_bias_name(self):
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return self.name + "/" + self.mlm.name + "/" + self.mlm.predictions.name
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/mobilebert-uncased",
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.mobilebert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.predictions(sequence_output, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFMobileBertOnlyNSPHead(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.seq_relationship = tf.keras.layers.Dense(2, name="seq_relationship")
def call(self, pooled_output):
seq_relationship_score = self.seq_relationship(pooled_output)
return seq_relationship_score
@add_start_docstrings(
"""MobileBert Model with a `next sentence prediction (classification)` head on top. """,
MOBILEBERT_START_DOCSTRING,
)
class TFMobileBertForNextSentencePrediction(TFMobileBertPreTrainedModel, TFNextSentencePredictionLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [r"predictions___cls", r"cls.predictions"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
self.cls = TFMobileBertOnlyNSPHead(config, name="seq_relationship___cls")
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@replace_return_docstrings(output_type=TFNextSentencePredictorOutput, config_class=_CONFIG_FOR_DOC)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
next_sentence_label=None,
training=False,
**kwargs,
):
r"""
Return:
Examples::
>>> import tensorflow as tf
>>> from transformers import MobileBertTokenizer, TFMobileBertForNextSentencePrediction
>>> tokenizer = MobileBertTokenizer.from_pretrained('google/mobilebert-uncased')
>>> model = TFMobileBertForNextSentencePrediction.from_pretrained('google/mobilebert-uncased')
>>> prompt = "In Italy, pizza served in formal settings, such as at a restaurant, is presented unsliced."
>>> next_sentence = "The sky is blue due to the shorter wavelength of blue light."
>>> encoding = tokenizer(prompt, next_sentence, return_tensors='tf')
>>> logits = model(encoding['input_ids'], token_type_ids=encoding['token_type_ids'])[0]
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
next_sentence_label=next_sentence_label,
training=training,
kwargs_call=kwargs,
)
outputs = self.mobilebert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
seq_relationship_scores = self.cls(pooled_output)
next_sentence_loss = (
None
if inputs["next_sentence_label"] is None
else self.compute_loss(labels=inputs["next_sentence_label"], logits=seq_relationship_scores)
)
if not inputs["return_dict"]:
output = (seq_relationship_scores,) + outputs[2:]
return ((next_sentence_loss,) + output) if next_sentence_loss is not None else output
return TFNextSentencePredictorOutput(
loss=next_sentence_loss,
logits=seq_relationship_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForNextSentencePrediction.serving_output
def serving_output(self, output: TFNextSentencePredictorOutput) -> TFNextSentencePredictorOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFNextSentencePredictorOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
MobileBert Model transformer with a sequence classification/regression head on top (a linear layer on top of the
pooled output) e.g. for GLUE tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
class TFMobileBertForSequenceClassification(TFMobileBertPreTrainedModel, TFSequenceClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"predictions___cls",
r"seq_relationship___cls",
r"cls.predictions",
r"cls.seq_relationship",
]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/mobilebert-uncased",
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.mobilebert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, training=inputs["training"])
logits = self.classifier(pooled_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForSequenceClassification.serving_output
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
MobileBert Model with a span classification head on top for extractive question-answering tasks like SQuAD (a
linear layers on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
MOBILEBERT_START_DOCSTRING,
)
class TFMobileBertForQuestionAnswering(TFMobileBertPreTrainedModel, TFQuestionAnsweringLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"pooler",
r"predictions___cls",
r"seq_relationship___cls",
r"cls.predictions",
r"cls.seq_relationship",
]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/mobilebert-uncased",
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.mobilebert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForQuestionAnswering.serving_output
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
@add_start_docstrings(
"""
MobileBert Model with a multiple choice classification head on top (a linear layer on top of the pooled output and
a softmax) e.g. for RocStories/SWAG tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
class TFMobileBertForMultipleChoice(TFMobileBertPreTrainedModel, TFMultipleChoiceLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"predictions___cls",
r"seq_relationship___cls",
r"cls.predictions",
r"cls.seq_relationship",
]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.mobilebert = TFMobileBertMainLayer(config, name="mobilebert")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/mobilebert-uncased",
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.mobilebert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
pooled_output = outputs[1]
pooled_output = self.dropout(pooled_output, training=inputs["training"])
logits = self.classifier(pooled_output)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving
def serving(self, inputs: Dict[str, tf.Tensor]):
output = self.call(input_ids=inputs)
return self.serving_output(output)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMultipleChoice.serving_output
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
MobileBert Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g.
for Named-Entity-Recognition (NER) tasks.
""",
MOBILEBERT_START_DOCSTRING,
)
class TFMobileBertForTokenClassification(TFMobileBertPreTrainedModel, TFTokenClassificationLoss):
# names with a '.' represents the authorized unexpected/missing layers when a TF model is loaded from a PT model
_keys_to_ignore_on_load_unexpected = [
r"pooler",
r"predictions___cls",
r"seq_relationship___cls",
r"cls.predictions",
r"cls.seq_relationship",
]
_keys_to_ignore_on_load_missing = [r"dropout"]
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.mobilebert = TFMobileBertMainLayer(config, add_pooling_layer=False, name="mobilebert")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(MOBILEBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
tokenizer_class=_TOKENIZER_FOR_DOC,
checkpoint="google/mobilebert-uncased",
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (:obj:`tf.Tensor` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.mobilebert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=return_dict,
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForTokenClassification.serving_output
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
| [
"tensorflow.convert_to_tensor",
"tensorflow.cast",
"tensorflow.pad",
"tensorflow.squeeze",
"tensorflow.subtract",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.keras.layers.Add",
"tensorflow.tile",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.keras.layers.Dense",
"tensorflow.split",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.math.sqrt",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
] | MoQ/huggingface-transformers/src/transformers/models/mobilebert/modeling_tf_mobilebert.py | [(75, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.intermediate_size'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.hidden_size'], {'name': '"""embedding_transformation"""'}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.hidden_dropout_prob'}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.position_embeddings', 'indices': 'position_ids'}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.tile', 'tf.tile', ([], {'input': 'position_embeds', 'multiples': '(input_shape[0], 1, 1)'}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.token_type_embeddings', 'indices': 'token_type_ids'}), True, 'import tensorflow as tf\n'), (232, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.attention_probs_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (236, 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.num_attention_heads, self.attention_head_size)'], {}), True, 'import tensorflow as tf\n'), (237, 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.matmul', 'tf.matmul', (['query_layer', 'key_layer'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['attention_scores'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.matmul', 'tf.matmul', (['attention_probs', 'value_layer'], {}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.transpose', 'tf.transpose', (['context_layer'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.reshape', 'tf.reshape', (['context_layer', '(batch_size, -1, self.all_head_size)'], {}), True, 'import tensorflow as tf\n'), (338, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.hidden_size'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (380, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.intra_bottleneck_size'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (430, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.true_hidden_size'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (743, 'tensorflow.reshape', 'tf.reshape', (["inputs['attention_mask']", '(input_shape[0], 1, 1, input_shape[1])'], {}), True, 'import tensorflow as tf\n'), (750, 'tensorflow.cast', 'tf.cast', (['extended_attention_mask'], {'dtype': 'embedding_output.dtype'}), True, 'import tensorflow as tf\n'), (751, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'dtype': 'embedding_output.dtype'}), True, 'import tensorflow as tf\n'), (752, 'tensorflow.constant', 'tf.constant', (['(-10000.0)'], {'dtype': 'embedding_output.dtype'}), True, 'import tensorflow as tf\n'), (1017, 'warnings.warn', 'warnings.warn', (['"""The method get_prefix_bias_name is deprecated. Please use `get_bias` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (1124, 'warnings.warn', 'warnings.warn', (['"""The method get_prefix_bias_name is deprecated. Please use `get_bias` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (1210, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'name': '"""seq_relationship"""'}), True, 'import tensorflow as tf\n'), (1343, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (1521, 'tensorflow.split', 'tf.split', (['logits', '(2)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (1522, 'tensorflow.squeeze', 'tf.squeeze', (['start_logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (1523, 'tensorflow.squeeze', 'tf.squeeze', (['end_logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (1574, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (1673, 'tensorflow.reshape', 'tf.reshape', (['logits', '(-1, num_choices)'], {}), True, 'import tensorflow as tf\n'), (1734, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.name_scope', 'tf.name_scope', (['"""word_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.name_scope', 'tf.name_scope', (['"""token_type_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.name_scope', 'tf.name_scope', (['"""position_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (167, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.weight', 'indices': 'input_ids'}), True, 'import tensorflow as tf\n'), (172, 'tensorflow.fill', 'tf.fill', ([], {'dims': 'input_shape', 'value': '(0)'}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.cast', 'tf.cast', (['attention_mask'], {'dtype': 'attention_scores.dtype'}), True, 'import tensorflow as tf\n'), (296, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (725, 'tensorflow.fill', 'tf.fill', (['input_shape', '(1)'], {}), True, 'import tensorflow as tf\n'), (728, 'tensorflow.fill', 'tf.fill', (['input_shape', '(0)'], {}), True, 'import tensorflow as tf\n'), (753, 'tensorflow.subtract', 'tf.subtract', (['one_cst', 'extended_attention_mask'], {}), True, 'import tensorflow as tf\n'), (988, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (989, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1094, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1095, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1201, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1202, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1315, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1316, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1424, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1425, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1545, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1546, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1587, 'tensorflow.constant', 'tf.constant', (['MULTIPLE_CHOICE_DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (1643, 'tensorflow.reshape', 'tf.reshape', (["inputs['input_ids']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1645, 'tensorflow.reshape', 'tf.reshape', (["inputs['attention_mask']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1648, 'tensorflow.reshape', 'tf.reshape', (["inputs['token_type_ids']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1651, 'tensorflow.reshape', 'tf.reshape', (["inputs['position_ids']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1705, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1706, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1814, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1815, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (195, 'tensorflow.range', 'tf.range', ([], {'start': '(0)', 'limit': 'input_shape[-1]'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.pad', 'tf.pad', (['inputs_embeds[:, 1:]', '((0, 0), (0, 1), (0, 0))'], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.pad', 'tf.pad', (['inputs_embeds[:, :-1]', '((0, 0), (1, 0), (0, 0))'], {}), True, 'import tensorflow as tf\n'), (501, 'tensorflow.constant', 'tf.constant', (['(0)'], {}), True, 'import tensorflow as tf\n'), (639, 'tensorflow.transpose', 'tf.transpose', (['self.decoder'], {}), True, 'import tensorflow as tf\n'), (1691, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (1692, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""attention_mask"""'}), True, 'import tensorflow as tf\n'), (1693, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""token_type_ids"""'}), True, 'import tensorflow as tf\n')] |
phunc20/rlcomp2020 | c37f8f05cc86d55fca2648bf5491d6a2218c2cad | ########################################
# Changes compared to 30_11_dDQN_light_tweak71.py
# 01.
# lr_optimizer = 7.3e-4
########################################
import sys
import numpy as np
#import pandas as pd
import datetime
import json
from array import *
import os
import math
from random import randrange
import random
from tensorflow.keras.models import Sequential
from tensorflow.keras.models import model_from_json
from tensorflow.keras.layers import Dense, Activation
from tensorflow.keras import optimizers
import tensorflow.keras as keras
#import tensorflow.compat.v1 as tf
#from tensorflow.compat.v1.keras import backend as K
#tf.disable_v2_behavior()
import tensorflow as tf
from tensorflow.keras import backend as K
import constants
import non_RL_agent
import non_RL_agent02
import non_RL_agent03
import non_RL_agent04
import non_RL_agent05
import non_RL_agent06
n_episodes = 500_000
#n_epsilon_decay = int(n_episodes*.6)
#n_epsilon_decay = int(n_episodes*.805)
#n_epsilon_decay = 10**6 / 0.99
n_epsilon_decay = int(n_episodes // 50)
n_episodes_buf_fill = 10_000
batch_size = 32
discount_rate = 0.95
#lr_optimizer = 2.5e-4
lr_optimizer = 7.3e-4
#loss_fn = keras.losses.mean_squared_error
loss_fn = keras.losses.Huber()
max_replay_len = 50_000
#Classes in GAME_SOCKET_DUMMY.py
class ObstacleInfo:
# initial energy for obstacles: Land (key = 0): -1, Forest(key = -1): 0 (random), Trap(key = -2): -10, Swamp (key = -3): -5
types = {0: -1, -1: 0, -2: -10, -3: -5}
def __init__(self):
self.type = 0
self.posx = 0
self.posy = 0
self.value = 0
class GoldInfo:
def __init__(self):
self.posx = 0
self.posy = 0
self.amount = 0
def loads(self, data):
golds = []
for gd in data:
g = GoldInfo()
g.posx = gd["posx"]
g.posy = gd["posy"]
g.amount = gd["amount"]
golds.append(g)
return golds
class PlayerInfo:
STATUS_PLAYING = 0
STATUS_ELIMINATED_WENT_OUT_MAP = 1
STATUS_ELIMINATED_OUT_OF_ENERGY = 2
STATUS_ELIMINATED_INVALID_ACTION = 3
STATUS_STOP_EMPTY_GOLD = 4
STATUS_STOP_END_STEP = 5
def __init__(self, id):
self.playerId = id
self.score = 0
self.energy = 0
self.posx = 0
self.posy = 0
self.lastAction = -1
self.status = PlayerInfo.STATUS_PLAYING
self.freeCount = 0
class GameInfo:
def __init__(self):
self.numberOfPlayers = 1
self.width = 0
self.height = 0
self.steps = 100
self.golds = []
self.obstacles = []
def loads(self, data):
m = GameInfo()
m.width = data["width"]
m.height = data["height"]
m.golds = GoldInfo().loads(data["golds"])
m.obstacles = data["obstacles"]
m.numberOfPlayers = data["numberOfPlayers"]
m.steps = data["steps"]
return m
class UserMatch:
def __init__(self):
self.playerId = 1
self.posx = 0
self.posy = 0
self.energy = 50
self.gameinfo = GameInfo()
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
class StepState:
def __init__(self):
self.players = []
self.golds = []
self.changedObstacles = []
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
#Main class in GAME_SOCKET_DUMMY.py
class GameSocket:
bog_energy_chain = {-5: -20, -20: -40, -40: -100, -100: -100}
def __init__(self):
self.stepCount = 0
self.maxStep = 0
self.mapdir = "Maps" # where to load all pre-defined maps
self.mapid = ""
self.userMatch = UserMatch()
self.user = PlayerInfo(1)
self.stepState = StepState()
self.maps = {} # key: map file name, value: file content
self.map = [] # running map info: 0->Land, -1->Forest, -2->Trap, -3:Swamp, >0:Gold
self.energyOnMap = [] # self.energyOnMap[x][y]: <0, amount of energy which player will consume if it move into (x,y)
self.E = 50
self.resetFlag = True
self.craftUsers = [] # players that craft at current step - for calculating amount of gold
self.bots = []
self.craftMap = {} # cells that players craft at current step, key: x_y, value: number of players that craft at (x,y)
def init_bots(self):
self.bots = [Bot1(2), Bot2(3), Bot3(4)] # use bot1(id=2), bot2(id=3), bot3(id=4)
#for (bot) in self.bots: # at the beginning, all bots will have same position, energy as player
for bot in self.bots: # at the beginning, all bots will have same position, energy as player
bot.info.posx = self.user.posx
bot.info.posy = self.user.posy
bot.info.energy = self.user.energy
bot.info.lastAction = -1
bot.info.status = PlayerInfo.STATUS_PLAYING
bot.info.score = 0
self.stepState.players.append(bot.info)
self.userMatch.gameinfo.numberOfPlayers = len(self.stepState.players)
#print("numberOfPlayers: ", self.userMatch.gameinfo.numberOfPlayers)
def reset(self, requests): # load new game by given request: [map id (filename), posx, posy, initial energy]
# load new map
self.reset_map(requests[0])
self.userMatch.posx = int(requests[1])
self.userMatch.posy = int(requests[2])
self.userMatch.energy = int(requests[3])
self.userMatch.gameinfo.steps = int(requests[4])
self.maxStep = self.userMatch.gameinfo.steps
# init data for players
self.user.posx = self.userMatch.posx # in
self.user.posy = self.userMatch.posy
self.user.energy = self.userMatch.energy
self.user.status = PlayerInfo.STATUS_PLAYING
self.user.score = 0
self.stepState.players = [self.user]
self.E = self.userMatch.energy
self.resetFlag = True
self.init_bots()
self.stepCount = 0
def reset_map(self, id): # load map info
self.mapId = id
self.map = json.loads(self.maps[self.mapId])
self.userMatch = self.map_info(self.map)
self.stepState.golds = self.userMatch.gameinfo.golds
self.map = json.loads(self.maps[self.mapId])
self.energyOnMap = json.loads(self.maps[self.mapId])
for x in range(len(self.map)):
for y in range(len(self.map[x])):
if self.map[x][y] > 0: # gold
self.energyOnMap[x][y] = -4
else: # obstacles
self.energyOnMap[x][y] = ObstacleInfo.types[self.map[x][y]]
def connect(self): # simulate player's connect request
print("Connected to server.")
for mapid in range(len(Maps)):
filename = "map" + str(mapid)
print("Found: " + filename)
self.maps[filename] = str(Maps[mapid])
def map_info(self, map): # get map info
# print(map)
userMatch = UserMatch()
userMatch.gameinfo.height = len(map)
userMatch.gameinfo.width = len(map[0])
i = 0
while i < len(map):
j = 0
while j < len(map[i]):
if map[i][j] > 0: # gold
g = GoldInfo()
g.posx = j
g.posy = i
g.amount = map[i][j]
userMatch.gameinfo.golds.append(g)
else: # obstacles
o = ObstacleInfo()
o.posx = j
o.posy = i
o.type = -map[i][j]
o.value = ObstacleInfo.types[map[i][j]]
userMatch.gameinfo.obstacles.append(o)
j += 1
i += 1
return userMatch
def receive(self): # send data to player (simulate player's receive request)
if self.resetFlag: # for the first time -> send game info
self.resetFlag = False
data = self.userMatch.to_json()
for (bot) in self.bots:
bot.new_game(data)
# print(data)
return data
else: # send step state
self.stepCount = self.stepCount + 1
if self.stepCount >= self.maxStep:
for player in self.stepState.players:
player.status = PlayerInfo.STATUS_STOP_END_STEP
data = self.stepState.to_json()
#for (bot) in self.bots: # update bots' state
for bot in self.bots: # update bots' state
bot.new_state(data)
# print(data)
return data
def send(self, message): # receive message from player (simulate send request from player)
if message.isnumeric(): # player send action
self.resetFlag = False
self.stepState.changedObstacles = []
action = int(message)
# print("Action = ", action)
self.user.lastAction = action
self.craftUsers = []
self.step_action(self.user, action)
for bot in self.bots:
if bot.info.status == PlayerInfo.STATUS_PLAYING:
action = bot.next_action()
bot.info.lastAction = action
# print("Bot Action: ", action)
self.step_action(bot.info, action)
self.action_5_craft()
for c in self.stepState.changedObstacles:
self.map[c["posy"]][c["posx"]] = -c["type"]
self.energyOnMap[c["posy"]][c["posx"]] = c["value"]
else: # reset game
requests = message.split(",")
#print("Reset game: ", requests[:3], end='')
self.reset(requests)
def step_action(self, user, action):
switcher = {
0: self.action_0_left,
1: self.action_1_right,
2: self.action_2_up,
3: self.action_3_down,
4: self.action_4_free,
5: self.action_5_craft_pre
}
func = switcher.get(action, self.invalidAction)
func(user)
def action_5_craft_pre(self, user): # collect players who craft at current step
user.freeCount = 0
if self.map[user.posy][user.posx] <= 0: # craft at the non-gold cell
user.energy -= 10
if user.energy <= 0:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
else:
user.energy -= 5
if user.energy > 0:
self.craftUsers.append(user)
key = str(user.posx) + "_" + str(user.posy)
if key in self.craftMap:
count = self.craftMap[key]
self.craftMap[key] = count + 1
else:
self.craftMap[key] = 1
else:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
def action_0_left(self, user): # user go left
user.freeCount = 0
user.posx = user.posx - 1
if user.posx < 0:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_1_right(self, user): # user go right
user.freeCount = 0
user.posx = user.posx + 1
if user.posx >= self.userMatch.gameinfo.width:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_2_up(self, user): # user go up
user.freeCount = 0
user.posy = user.posy - 1
if user.posy < 0:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_3_down(self, user): # user go right
user.freeCount = 0
user.posy = user.posy + 1
if user.posy >= self.userMatch.gameinfo.height:
user.status = PlayerInfo.STATUS_ELIMINATED_WENT_OUT_MAP
user.lastAction = 6 #eliminated
else:
self.go_to_pos(user)
def action_4_free(self, user): # user free
user.freeCount += 1
if user.freeCount == 1:
user.energy += int(self.E / 4)
elif user.freeCount == 2:
user.energy += int(self.E / 3)
elif user.freeCount == 3:
user.energy += int(self.E / 2)
else:
user.energy = self.E
if user.energy > self.E:
user.energy = self.E
def action_5_craft(self):
craftCount = len(self.craftUsers)
# print ("craftCount",craftCount)
if (craftCount > 0):
for user in self.craftUsers:
x = user.posx
y = user.posy
key = str(user.posx) + "_" + str(user.posy)
c = self.craftMap[key]
m = min(math.ceil(self.map[y][x] / c), 50)
user.score += m
# print ("user", user.playerId, m)
for user in self.craftUsers:
x = user.posx
y = user.posy
key = str(user.posx) + "_" + str(user.posy)
if key in self.craftMap:
c = self.craftMap[key]
del self.craftMap[key]
m = min(math.ceil(self.map[y][x] / c), 50)
self.map[y][x] -= m * c
if self.map[y][x] < 0:
self.map[y][x] = 0
self.energyOnMap[y][x] = ObstacleInfo.types[0]
for g in self.stepState.golds:
if g.posx == x and g.posy == y:
g.amount = self.map[y][x]
if g.amount == 0:
self.stepState.golds.remove(g)
self.add_changed_obstacle(x, y, 0, ObstacleInfo.types[0])
if len(self.stepState.golds) == 0:
for player in self.stepState.players:
player.status = PlayerInfo.STATUS_STOP_EMPTY_GOLD
break;
self.craftMap = {}
def invalidAction(self, user):
user.status = PlayerInfo.STATUS_ELIMINATED_INVALID_ACTION
user.lastAction = 6 #eliminated
def go_to_pos(self, user): # player move to cell(x,y)
if self.map[user.posy][user.posx] == -1:
user.energy -= randrange(16) + 5
elif self.map[user.posy][user.posx] == 0:
user.energy += self.energyOnMap[user.posy][user.posx]
elif self.map[user.posy][user.posx] == -2:
user.energy += self.energyOnMap[user.posy][user.posx]
self.add_changed_obstacle(user.posx, user.posy, 0, ObstacleInfo.types[0])
elif self.map[user.posy][user.posx] == -3:
user.energy += self.energyOnMap[user.posy][user.posx]
self.add_changed_obstacle(user.posx, user.posy, 3,
self.bog_energy_chain[self.energyOnMap[user.posy][user.posx]])
else:
user.energy -= 4
if user.energy <= 0:
user.status = PlayerInfo.STATUS_ELIMINATED_OUT_OF_ENERGY
user.lastAction = 6 #eliminated
def add_changed_obstacle(self, x, y, t, v):
added = False
for o in self.stepState.changedObstacles:
if o["posx"] == x and o["posy"] == y:
added = True
break
if added == False:
o = {}
o["posx"] = x
o["posy"] = y
o["type"] = t
o["value"] = v
self.stepState.changedObstacles.append(o)
def close(self):
print("Close socket.")
class Bot1:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
#return int(greedy_policy(s))
return int(non_RL_agent.greedy_policy(s))
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
class Bot2:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
#return int(non_RL_agent03.greedy_policy(s))
return int(non_RL_agent.greedy_policy(s, how_gold=non_RL_agent.find_worthiest_gold))
#if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0:
# if self.info.energy >= 6:
# return self.ACTION_CRAFT
# else:
# return self.ACTION_FREE
#if self.info.energy < 5:
# return self.ACTION_FREE
#else:
# action = np.random.randint(0, 4)
# return action
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
class Bot3:
ACTION_GO_LEFT = 0
ACTION_GO_RIGHT = 1
ACTION_GO_UP = 2
ACTION_GO_DOWN = 3
ACTION_FREE = 4
ACTION_CRAFT = 5
def __init__(self, id):
self.state = State()
self.info = PlayerInfo(id)
def get_state(self):
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
DQNState = view.flatten().tolist() #Flattening the map matrix to a vector
#DQNState.append(self.state.x)
#DQNState.append(self.state.y)
#DQNState.append(self.state.energy)
DQNState.append(self.info.posx)
DQNState.append(self.info.posy)
DQNState.append(self.info.energy)
for player in self.state.players:
# self.info.playerId is the id of the current bot
if player["playerId"] != self.info.playerId:
DQNState.append(player["posx"])
DQNState.append(player["posy"])
DQNState = np.array(DQNState)
return DQNState
def next_action(self):
s = self.get_state()
return int(non_RL_agent02.greedy_policy(s))
#if self.state.mapInfo.gold_amount(self.info.posx, self.info.posy) > 0:
# if self.info.energy >= 6:
# return self.ACTION_CRAFT
# else:
# return self.ACTION_FREE
#if self.info.energy < 5:
# return self.ACTION_FREE
#else:
# action = self.ACTION_GO_LEFT
# if self.info.posx % 2 == 0:
# if self.info.posy < self.state.mapInfo.max_y:
# action = self.ACTION_GO_DOWN
# else:
# if self.info.posy > 0:
# action = self.ACTION_GO_UP
# else:
# action = self.ACTION_GO_RIGHT
# return action
def new_game(self, data):
try:
self.state.init_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def new_state(self, data):
# action = self.next_action();
# self.socket.send(action)
try:
self.state.update_state(data)
except Exception as e:
import traceback
traceback.print_exc()
def get_score(self):
return [player["score"] for player in minerEnv.socket.bots[1].state.players if player["playerId"] == self.info.playerId][0]
#MinerState.py
def str_2_json(str):
return json.loads(str, encoding="utf-8")
class MapInfo:
def __init__(self):
self.max_x = 0 #Width of the map
self.max_y = 0 #Height of the map
self.golds = [] #List of the golds in the map
self.obstacles = []
self.numberOfPlayers = 0
self.maxStep = 0 #The maximum number of step is set for this map
def init_map(self, gameInfo):
#Initialize the map at the begining of each episode
self.max_x = gameInfo["width"] - 1
self.max_y = gameInfo["height"] - 1
self.golds = gameInfo["golds"]
self.obstacles = gameInfo["obstacles"]
self.maxStep = gameInfo["steps"]
self.numberOfPlayers = gameInfo["numberOfPlayers"]
def update(self, golds, changedObstacles):
#Update the map after every step
self.golds = golds
for cob in changedObstacles:
newOb = True
for ob in self.obstacles:
if cob["posx"] == ob["posx"] and cob["posy"] == ob["posy"]:
newOb = False
#print("cell(", cob["posx"], ",", cob["posy"], ") change type from: ", ob["type"], " -> ",
# cob["type"], " / value: ", ob["value"], " -> ", cob["value"])
ob["type"] = cob["type"]
ob["value"] = cob["value"]
break
if newOb:
self.obstacles.append(cob)
#print("new obstacle: ", cob["posx"], ",", cob["posy"], ", type = ", cob["type"], ", value = ",
# cob["value"])
def get_min_x(self):
return min([cell["posx"] for cell in self.golds])
def get_max_x(self):
return max([cell["posx"] for cell in self.golds])
def get_min_y(self):
return min([cell["posy"] for cell in self.golds])
def get_max_y(self):
return max([cell["posy"] for cell in self.golds])
def is_row_has_gold(self, y):
return y in [cell["posy"] for cell in self.golds]
def is_column_has_gold(self, x):
return x in [cell["posx"] for cell in self.golds]
def gold_amount(self, x, y): #Get the amount of golds at cell (x,y)
for cell in self.golds:
if x == cell["posx"] and y == cell["posy"]:
return cell["amount"]
return 0
def get_obstacle(self, x, y): # Get the kind of the obstacle at cell(x,y)
for cell in self.obstacles:
if x == cell["posx"] and y == cell["posy"]:
return cell["type"]
return -1 # No obstacle at the cell (x,y)
class State:
STATUS_PLAYING = 0
STATUS_ELIMINATED_WENT_OUT_MAP = 1
STATUS_ELIMINATED_OUT_OF_ENERGY = 2
STATUS_ELIMINATED_INVALID_ACTION = 3
STATUS_STOP_EMPTY_GOLD = 4
STATUS_STOP_END_STEP = 5
def __init__(self):
self.end = False
self.score = 0
self.lastAction = None
self.id = 0
self.x = 0
self.y = 0
self.energy = 0
self.energy_pre = 0
self.mapInfo = MapInfo()
self.players = []
self.stepCount = 0
self.status = State.STATUS_PLAYING
def init_state(self, data): #parse data from server into object
game_info = str_2_json(data)
self.end = False
self.score = 0
self.lastAction = None
self.id = game_info["playerId"]
self.x = game_info["posx"]
self.y = game_info["posy"]
self.energy = game_info["energy"]
self.mapInfo.init_map(game_info["gameinfo"])
self.stepCount = 0
self.status = State.STATUS_PLAYING
self.players = [{"playerId": 2, "posx": self.x, "posy": self.y},
{"playerId": 3, "posx": self.x, "posy": self.y},
{"playerId": 4, "posx": self.x, "posy": self.y}]
def update_state(self, data):
new_state = str_2_json(data)
for player in new_state["players"]:
if player["playerId"] == self.id:
self.x = player["posx"]
self.y = player["posy"]
self.energy_pre = self.energy
self.energy = player["energy"]
self.score = player["score"]
self.lastAction = player["lastAction"]
self.status = player["status"]
self.mapInfo.update(new_state["golds"], new_state["changedObstacles"])
self.players = new_state["players"]
for i in range(len(self.players), 4, 1):
self.players.append({"playerId": i, "posx": self.x, "posy": self.y})
self.stepCount = self.stepCount + 1
#MinerEnv.py
TreeID = 1
TrapID = 2
SwampID = 3
class MinerEnv:
def __init__(self):
self.socket = GameSocket()
self.state = State()
self.score_pre = self.state.score
def start(self): #connect to server
self.socket.connect()
def end(self): #disconnect server
self.socket.close()
def send_map_info(self, request):#tell server which map to run
self.socket.send(request)
def reset(self): #start new game
try:
message = self.socket.receive() #receive game info from server
self.state.init_state(message) #init state
except Exception as e:
import traceback
traceback.print_exc()
def step(self, action): #step process
self.socket.send(action) #send action to server
try:
message = self.socket.receive() #receive new state from server
self.state.update_state(message) #update to local state
except Exception as e:
import traceback
traceback.print_exc()
def get_state(self):
"""
Fuse `view` and `energyOnMap` into a single matrix to have a simple and concise state/observation.
We want a matrix showing the following:
`gold`: The amount of gold
`all the others`: The energy that each type of terrain is going to take if being stepped into, e.g.
`land` => -1, `trap` => -10, etc.
"""
view = np.zeros([self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1], dtype=int)
for x in range(self.state.mapInfo.max_x + 1):
for y in range(self.state.mapInfo.max_y + 1):
if self.state.mapInfo.get_obstacle(x, y) == TreeID: # Tree
view[y, x] = -TreeID
if self.state.mapInfo.get_obstacle(x, y) == TrapID: # Trap
view[y, x] = -TrapID
if self.state.mapInfo.get_obstacle(x, y) == SwampID: # Swamp
view[y, x] = -SwampID
if self.state.mapInfo.gold_amount(x, y) > 0:
view[y, x] = self.state.mapInfo.gold_amount(x, y)
energyOnMap = np.array(self.socket.energyOnMap)
# `view` will contribute only to the type of terrain of `gold`
view[view <= 0] = -9999 # Just a dummy large negative number to be got rid of later
# `energyOnMap` will contribute to the types of terrain of `land`, `trap`, `forest` and `swamp`.
# Recall. `forest` was designated by BTC to the value of 0, to mean random integer btw [5..20].
energyOnMap[energyOnMap == 0] = - constants.forest_energy
channel0 = np.maximum(view, energyOnMap)
# Finish channel 0
# Channel 1 will contain the position of the agent
channel1 = np.zeros_like(channel0)
x_agent_out_of_map = self.state.x < 0 or self.state.x >= constants.width
y_agent_out_of_map = self.state.y < 0 or self.state.y >= constants.height
if x_agent_out_of_map or y_agent_out_of_map:
pass
else:
channel1[self.state.y, self.state.x] = self.state.energy
state = np.stack((channel0, channel1), axis=-1)
return state
def get_reward(self):
# Initialize reward
reward = 0
if self.state.status == constants.agent_state_str2id["out_of_MAP"]:
#if self.state.stepCount < 50:
# reward += -5*(50 - self.state.stepCount)
reward -= 1000
#elif self.state.status == constants.agent_state_str2id["no_more_STEP"]:
# #reward += (self.state.score/total_gold) * 100
# pass
elif self.state.status == constants.agent_state_str2id["no_more_ENERGY"]:
reward -= 300
#elif self.state.status == constants.agent_state_str2id["no_more_GOLD"]:
# pass
#elif self.state.status == constants.agent_state_str2id["INVALID_action"]:
# pass
else: # Here below: we are almost sure that agent is not out of map
s = self.get_state()
try:
terrain_now = s[self.state.y, self.state.x, 0]
except Exception as e:
print(f"{e}")
print(f"self.state.x, self.state.y = {self.state.x}, {self.state.y} ")
pos_now = np.array([self.state.x, self.state.y])
reverse_mv = constants.action_id2ndarray[constants.reverse_action_id[self.state.lastAction]]
pos_pre = pos_now + reverse_mv
x_pre, y_pre = pos_pre
terrain_pre = s[y_pre, x_pre, 0]
# Punish `dig on obstacle`
if self.state.lastAction == constants.dig:
if terrain_now < 0:
reward -= 100
elif terrain_now > 0:
score_action = self.state.score - self.score_pre
reward += score_action
self.score_pre = self.state.score
if self.state.lastAction in (constants.up, constants.down, constants.left, constants.right,): # i.e. if agent moved
if terrain_pre > 100: # punish leaving gold
reward -= terrain_pre
if terrain_now > 0: # entering gold
if self.state.energy > constants.punishments["gold"]:
reward += 50
else:
reward -= 100
if terrain_now < 0: # punish according to terrain_now
reward += terrain_now
if terrain_now == -100: # i.e. fatal swamp
reward -= 500
if self.state.lastAction == constants.rest:
if self.state.energy_pre >= 40:
reward -= 200
if self.state.energy_pre <= 5:
reward += 20
if self.state.status == constants.agent_state_str2id["PLAYing"]:
reward += 1
return reward
def check_terminate(self):
#Checking the status of the game
#it indicates the game ends or is playing
return self.state.status != State.STATUS_PLAYING
Maps = [constants.maps[i] for i in range(1, 6)]
env = MinerEnv() # Creating a communication environment between the DQN model and the game environment
env.start() # Connect to the game
#eliminated = []
#def pictorial_state(obs):
# pictorial = np.zeros((constants.height, constants.width, 1+4), dtype=np.float32)
# # 1+4 is +1 for map and +1 for each of the players = 5 channels
# # dtype=np.float32 because pictorial will later be carried into tensorflow CNN
# pictorial[..., 0] = obs[:constants.n_px].reshape((constants.height, constants.width))
# # position of agent: we put the energy value at the coordinate where stands the agent, the whole in channel 1, the channel for the agent.
# x_agent, y_agent = obs[constants.n_px], obs[constants.n_px+1]
# if x_agent >= constants.width or y_agent >= constants.height:
# pass
# else:
# pictorial[y_agent, x_agent, 1] = obs[constants.n_px+2]
# # position of bots: we put -1 on the coord of the bots
# for i in range(1, 3+1):
# if i in eliminated:
# continue
# y = obs[constants.n_px+(2*i+2)]
# x = obs[constants.n_px+(2*i+1)]
# if x >= constants.width or y >= constants.height:
# eliminated.append(i)
# continue
# pictorial[y, x, i+1] = -1
# return pictorial
from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten
tf.random.set_seed(42)
np.random.seed(42)
#input_shape = [constants.height, constants.width, 1+4]
input_shape = [constants.height, constants.width, 1+1]
n_outputs = 6
model = keras.models.Sequential([
Conv2D(4, 3, activation="relu", padding="same", input_shape=input_shape),
#MaxPooling2D(2),
Conv2D(8, 3, activation="relu", padding="same"),
#Conv2D(128, 3, activation="relu", padding="same"),
#MaxPooling2D(2),
Flatten(),
#Dense(128, activation="elu"),
Dense(128, activation="elu"),
Dense(64, activation="elu"),
Dense(32, activation="elu"),
Dense(n_outputs)
])
#h5 = "models/30_11_dDQN_light_tweak14/avg-1785.00-episode-11155-30_11_dDQN_light_tweak14-gold-1800-step-100-20200827-0903.h5"
#model = keras.models.load_model(h5)
target = keras.models.clone_model(model)
target.set_weights(model.get_weights())
from collections import deque
replay_memory = deque(maxlen=max_replay_len)
def sample_experiences(batch_size):
indices = np.random.randint(len(replay_memory), size=batch_size)
batch = [replay_memory[index] for index in indices]
states, actions, rewards, next_states, dones = [
np.array([experience[field_index] for experience in batch])
for field_index in range(5)]
return states, actions, rewards, next_states, dones
def epsilon_greedy_policy(state, epsilon=0, n_actions=6):
if np.random.rand() < epsilon:
return np.random.randint(n_actions)
else:
#pictorial = pictorial_state(state)
#Q_values = model.predict(pictorial[np.newaxis])
Q_values = model.predict(state[np.newaxis])
return np.argmax(Q_values[0])
def play_one_step(env, state, epsilon):
action = epsilon_greedy_policy(state, epsilon)
#next_state, reward, done, info = env.step(action)
env.step(str(action))
next_state = env.get_state()
reward = env.get_reward()
done = env.check_terminate()
replay_memory.append((state, action, reward, next_state, done))
return next_state, reward, done
#optimizer = keras.optimizers.Adam(lr=1e-3)
#optimizer = keras.optimizers.Adam(lr=2.5e-4)
optimizer = keras.optimizers.Adam(lr=lr_optimizer)
def training_step(batch_size):
experiences = sample_experiences(batch_size)
states, actions, rewards, next_states, dones = experiences
#pictorials = np.array([pictorial_state(s) for s in states])
#next_pictorials = np.array([pictorial_state(next_s) for next_s in next_states])
#next_Q_values = model.predict(next_pictorials)
next_Q_values = model.predict(next_states)
#max_next_Q_values = np.max(next_Q_values, axis=1)
best_next_actions = np.argmax(next_Q_values, axis=1)
next_mask = tf.one_hot(best_next_actions, n_outputs).numpy()
next_best_Q_values = (target.predict(next_states) * next_mask).sum(axis=1)
#target_Q_values = rewards + (1 - dones) * discount_rate * max_next_Q_values
target_Q_values = rewards + (1 - dones) * discount_rate * next_best_Q_values
target_Q_values = target_Q_values.reshape(-1, 1)
mask = tf.one_hot(actions, n_outputs)
with tf.GradientTape() as tape:
#all_Q_values = model(pictorials)
all_Q_values = model(states)
Q_values = tf.reduce_sum(all_Q_values * mask, axis=1, keepdims=True)
loss = tf.reduce_mean(loss_fn(target_Q_values, Q_values))
grads = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(zip(grads, model.trainable_variables))
np.random.seed(42)
tf.random.set_seed(42)
from constants import n_allowed_steps
now = datetime.datetime.now()
now_str = now.strftime("%Y%m%d-%H%M")
script_name = __file__.split('.')[0]
save_path = os.path.join("models", script_name)
os.makedirs(save_path, exist_ok=True)
scores = []
scores_avg = []
best_score = 0
k = 10
scores_k_most_recent = deque([0]*k, maxlen=k)
best_score_avg = 1400
with open(os.path.join(save_path, f"log-{now_str}.txt"), 'w') as log:
for episode in range(n_episodes):
eliminated = []
mapID = np.random.randint(0, 5)
posID_x = np.random.randint(constants.width)
posID_y = np.random.randint(constants.height)
request = "map{},{},{},50,100".format(mapID, posID_x, posID_y)
env.send_map_info(request)
env.reset()
obs = env.get_state()
undiscounted_return = 0
for step in range(n_allowed_steps):
epsilon = max(1 - episode / n_epsilon_decay, 0.01)
obs, reward, done = play_one_step(env, obs, epsilon)
undiscounted_return += reward
if done:
break
score = env.state.score
scores.append(score)
scores_k_most_recent.append(score)
#score_avg = np.mean(scores_k_most_recent)
score_avg = round(np.mean(scores_k_most_recent), 1)
scores_avg.append(score_avg)
#if score > best_score:
if score_avg > best_score_avg:
#best_weights = model.get_weights()
best_score_avg = score_avg
#best_score = score
#model.save(os.path.join(save_path, f"episode-{episode+1}-gold-{env.state.score}-avg-{score_avg:4.2f}-step-{step+1}-{now_str}.h5"))
model.save(os.path.join(save_path, f"avg-{score_avg:07.2f}-episode-{episode+1}-{__file__.split('.')[0]}-gold-{env.state.score}-step-{step+1}-{now_str}.h5"))
#message = "(Episode {: 5d}/{}) Gold {: 4d} avg {: 8.2f} undisc_return {: 6d} step {: 3d} eps {:.2f} ({})\n".format(episode+1, n_episodes, env.state.score, score_avg, undiscounted_return, step + 1, epsilon, constants.agent_state_id2str[env.state.status])
message = "(Episode {: 5d}/{}) Gold {: 4d} avg {: 8.1f} undisc_return {: 6d} step {: 3d} eps: {:.2f} ({})\n".format(episode+1, n_episodes, env.state.score, score_avg, undiscounted_return, step + 1, epsilon, constants.agent_state_id2str[env.state.status])
##############################################
#score = env.state.score*(n_allowed_steps - step)
#score = env.state.score
#scores.append(score)
#if score > best_score:
# #best_weights = model.get_weights()
# best_score = score
# model.save(os.path.join(save_path, f"episode-{episode+1}-gold-{env.state.score}-step-{step+1}-{now_str}.h5"))
#message = "(Episode {: 5d}/{}) Gold: {: 4d} undiscounted_return: {: 6d} Steps: {: 3d} eps: {:.3f} ({})\n".format(episode+1, n_episodes, env.state.score, undiscounted_return, step + 1, epsilon, constants.agent_state_id2str[env.state.status])
print(message, end='')
log.write(message)
#if episode > 500:
if episode > n_episodes_buf_fill:
training_step(batch_size)
if episode % n_episodes_buf_fill == 0:
target.set_weights(model.get_weights())
#np.save(f"scores-{now_str}", np.array(scores))
#np.save(f"scores-N-scores_avg-{now_str}", np.array([scores, scores_avg]))
np.save(f"scores-N-scores_avg-{__file__.split('.')[0]}-{now_str}", np.array([scores, scores_avg]))
| [
"tensorflow.keras.models.clone_model",
"tensorflow.reduce_sum",
"numpy.zeros_like",
"numpy.mean",
"tensorflow.random.set_seed",
"numpy.random.randint",
"tensorflow.keras.layers.Conv2D",
"numpy.stack",
"numpy.argmax",
"tensorflow.keras.layers.Flatten",
"numpy.zeros",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.losses.Huber",
"tensorflow.one_hot",
"numpy.random.rand",
"numpy.array",
"tensorflow.GradientTape",
"numpy.maximum",
"numpy.random.seed",
"tensorflow.keras.optimizers.Adam"
] | round01/30_11_dDQN_light_tweak73.py | [(51, 'tensorflow.keras.losses.Huber', 'keras.losses.Huber', ([], {}), True, 'import tensorflow.keras as keras\n'), (987, 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), True, 'import tensorflow as tf\n'), (988, 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), True, 'import numpy as np\n'), (1009, 'tensorflow.keras.models.clone_model', 'keras.models.clone_model', (['model'], {}), True, 'import tensorflow.keras as keras\n'), (1015, 'collections.deque', 'deque', ([], {'maxlen': 'max_replay_len'}), False, 'from collections import deque\n'), (1050, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'lr': 'lr_optimizer'}), True, 'import tensorflow.keras as keras\n'), (1076, 'numpy.random.seed', 'np.random.seed', (['(42)'], {}), True, 'import numpy as np\n'), (1077, 'tensorflow.random.set_seed', 'tf.random.set_seed', (['(42)'], {}), True, 'import tensorflow as tf\n'), (1082, 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), False, 'import datetime\n'), (1085, 'os.path.join', 'os.path.join', (['"""models"""', 'script_name'], {}), False, 'import os\n'), (1086, 'os.makedirs', 'os.makedirs', (['save_path'], {'exist_ok': '(True)'}), False, 'import os\n'), (1093, 'collections.deque', 'deque', (['([0] * k)'], {'maxlen': 'k'}), False, 'from collections import deque\n'), (683, 'json.loads', 'json.loads', (['str'], {'encoding': '"""utf-8"""'}), False, 'import json\n'), (1060, 'numpy.argmax', 'np.argmax', (['next_Q_values'], {'axis': '(1)'}), True, 'import numpy as np\n'), (1066, 'tensorflow.one_hot', 'tf.one_hot', (['actions', 'n_outputs'], {}), True, 'import tensorflow as tf\n'), (1150, 'numpy.array', 'np.array', (['[scores, scores_avg]'], {}), True, 'import numpy as np\n'), (128, 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), False, 'import json\n'), (137, 'json.dumps', 'json.dumps', (['self'], {'default': '(lambda o: o.__dict__)', 'sort_keys': '(True)', 'indent': '(4)'}), False, 'import json\n'), (198, 'json.loads', 'json.loads', (['self.maps[self.mapId]'], {}), False, 'import json\n'), (201, 'json.loads', 'json.loads', (['self.maps[self.mapId]'], {}), False, 'import json\n'), (202, 'json.loads', 'json.loads', (['self.maps[self.mapId]'], {}), False, 'import json\n'), (458, 'numpy.zeros', 'np.zeros', (['[self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1]'], {'dtype': 'int'}), True, 'import numpy as np\n'), (484, 'numpy.array', 'np.array', (['DQNState'], {}), True, 'import numpy as np\n'), (526, 'numpy.zeros', 'np.zeros', (['[self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1]'], {'dtype': 'int'}), True, 'import numpy as np\n'), (552, 'numpy.array', 'np.array', (['DQNState'], {}), True, 'import numpy as np\n'), (608, 'numpy.zeros', 'np.zeros', (['[self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1]'], {'dtype': 'int'}), True, 'import numpy as np\n'), (634, 'numpy.array', 'np.array', (['DQNState'], {}), True, 'import numpy as np\n'), (855, 'numpy.zeros', 'np.zeros', (['[self.state.mapInfo.max_y + 1, self.state.mapInfo.max_x + 1]'], {'dtype': 'int'}), True, 'import numpy as np\n'), (866, 'numpy.array', 'np.array', (['self.socket.energyOnMap'], {}), True, 'import numpy as np\n'), (873, 'numpy.maximum', 'np.maximum', (['view', 'energyOnMap'], {}), True, 'import numpy as np\n'), (876, 'numpy.zeros_like', 'np.zeros_like', (['channel0'], {}), True, 'import numpy as np\n'), (883, 'numpy.stack', 'np.stack', (['(channel0, channel1)'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (995, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), (997, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(8)', '(3)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), (1000, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), (1002, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""elu"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), (1003, 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': '"""elu"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), (1004, 'tensorflow.keras.layers.Dense', 'Dense', (['(32)'], {'activation': '"""elu"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), (1005, 'tensorflow.keras.layers.Dense', 'Dense', (['n_outputs'], {}), False, 'from tensorflow.keras.layers import Dense, Conv2D, MaxPooling2D, Dropout, Flatten\n'), (1022, 'numpy.array', 'np.array', (['[experience[field_index] for experience in batch]'], {}), True, 'import numpy as np\n'), (1028, 'numpy.random.rand', 'np.random.rand', ([], {}), True, 'import numpy as np\n'), (1029, 'numpy.random.randint', 'np.random.randint', (['n_actions'], {}), True, 'import numpy as np\n'), (1034, 'numpy.argmax', 'np.argmax', (['Q_values[0]'], {}), True, 'import numpy as np\n'), (1067, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (1070, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['(all_Q_values * mask)'], {'axis': '(1)', 'keepdims': '(True)'}), True, 'import tensorflow as tf\n'), (1096, 'os.path.join', 'os.path.join', (['save_path', 'f"""log-{now_str}.txt"""'], {}), False, 'import os\n'), (1099, 'numpy.random.randint', 'np.random.randint', (['(0)', '(5)'], {}), True, 'import numpy as np\n'), (1100, 'numpy.random.randint', 'np.random.randint', (['constants.width'], {}), True, 'import numpy as np\n'), (1101, 'numpy.random.randint', 'np.random.randint', (['constants.height'], {}), True, 'import numpy as np\n'), (492, 'non_RL_agent.greedy_policy', 'non_RL_agent.greedy_policy', (['s'], {}), False, 'import non_RL_agent\n'), (562, 'non_RL_agent.greedy_policy', 'non_RL_agent.greedy_policy', (['s'], {'how_gold': 'non_RL_agent.find_worthiest_gold'}), False, 'import non_RL_agent\n'), (641, 'non_RL_agent02.greedy_policy', 'non_RL_agent02.greedy_policy', (['s'], {}), False, 'import non_RL_agent02\n'), (1061, 'tensorflow.one_hot', 'tf.one_hot', (['best_next_actions', 'n_outputs'], {}), True, 'import tensorflow as tf\n'), (1117, 'numpy.mean', 'np.mean', (['scores_k_most_recent'], {}), True, 'import numpy as np\n'), (412, 'random.randrange', 'randrange', (['(16)'], {}), False, 'from random import randrange\n'), (502, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (511, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (581, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (590, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (666, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (675, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (835, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (844, 'traceback.print_exc', 'traceback.print_exc', ([], {}), False, 'import traceback\n'), (910, 'numpy.array', 'np.array', (['[self.state.x, self.state.y]'], {}), True, 'import numpy as np\n'), (379, 'math.ceil', 'math.ceil', (['(self.map[y][x] / c)'], {}), False, 'import math\n'), (389, 'math.ceil', 'math.ceil', (['(self.map[y][x] / c)'], {}), False, 'import math\n')] |
santhoshkumarvs/tensorflow | 5581b91ada226f1ec20f55cd6423853072b2813c | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import copy
import os
import numpy
import six
from tensorflow.python.eager import context
from tensorflow.python.eager import test
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import test_util
from tensorflow.python.keras.engine import training
from tensorflow.python.keras.layers import core
from tensorflow.python.keras.layers import normalization
from tensorflow.python.layers import core as non_keras_core
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import resource_variable_ops
from tensorflow.python.ops import variables
from tensorflow.python.training.tracking import data_structures
from tensorflow.python.training.tracking import tracking
from tensorflow.python.training.tracking import util
class HasList(training.Model):
def __init__(self):
super(HasList, self).__init__()
self.layer_list = data_structures.List([core.Dense(3)])
self.layer_list.append(core.Dense(4))
self.layer_list.extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_list += [
core.Dense(7, bias_regularizer=math_ops.reduce_sum),
core.Dense(8)
]
self.layer_list += (
data_structures.List([core.Dense(9)]) + data_structures.List(
[core.Dense(10)]))
self.layer_list.extend(
data_structures.List(
list([core.Dense(11)]) + [core.Dense(12)]))
self.layers_with_updates = data_structures.List(
(normalization.BatchNormalization(),))
def call(self, x):
aggregation = 0.
for l in self.layer_list:
x = l(x)
aggregation += math_ops.reduce_sum(x)
bn, = self.layers_with_updates
return bn(x) / aggregation
class ListTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTracking(self):
model = HasList()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 12], output.shape)
self.assertEqual(11, len(model.layers))
self.assertEqual(10, len(model.layer_list.layers))
six.assertCountEqual(
self,
model.layers,
model.layer_list.layers + model.layers_with_updates)
for index in range(10):
self.assertEqual(3 + index, model.layer_list.layers[index].units)
self.assertEqual(2, len(model._checkpoint_dependencies))
self.assertIs(model.layer_list, model._checkpoint_dependencies[0].ref)
self.assertIs(model.layers_with_updates,
model._checkpoint_dependencies[1].ref)
self.assertEqual(
10, len(model._checkpoint_dependencies[0].ref._checkpoint_dependencies))
self.evaluate([v.initializer for v in model.variables])
self.evaluate(model.variables[0].assign([[1., 2., 3.], [4., 5., 6.]]))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(model.variables[0].assign(array_ops.zeros([2, 3])))
model.load_weights(save_path)
self.assertAllEqual([[1., 2., 3.], [4., 5., 6.]],
self.evaluate(model.variables[0]))
v = variables.Variable(1.)
model.var_list = [v]
self.assertIn(v, model.variables)
self.assertIn(v, model.trainable_variables)
self.assertNotIn(v, model.non_trainable_variables)
@test_util.run_v1_only("b/120545219")
def testUpdatesForwarded(self):
with context.graph_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertGreater(len(model.layers_with_updates[0].updates), 0)
self.assertEqual(set(model.layers_with_updates[0].updates),
set(model.updates))
with context.eager_mode():
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(0, len(model.updates))
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testLossesForwarded(self):
model = HasList()
model_input = array_ops.ones([32, 2])
model(model_input)
self.assertEqual(2, len(model.losses))
def testModelContainersCompareEqual(self):
class HasEqualContainers(training.Model):
def __init__(self):
super(HasEqualContainers, self).__init__()
self.l1 = []
self.l2 = []
model = HasEqualContainers()
first_layer = HasEqualContainers()
model.l1.append(first_layer)
second_layer = HasEqualContainers()
model.l2.append(second_layer)
self.assertEqual([first_layer, second_layer], model.layers)
def testNotTrackable(self):
class NotTrackable(object):
pass
with self.assertRaises(ValueError):
data_structures.List([NotTrackable()])
def testCallNotImplemented(self):
with self.assertRaisesRegexp(TypeError, "not callable"):
data_structures.List()(1.)
def testNoPop(self):
with self.assertRaises(AttributeError):
data_structures.List().pop()
@test_util.run_in_graph_and_eager_modes
def testTensorConversion(self):
class ListToTensor(training.Model):
def __init__(self):
super(ListToTensor, self).__init__()
self.l = [1., 2., 3.]
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(constant_op.constant(ListToTensor().l)))
self.assertAllEqual(
[1., 2., 3.],
self.evaluate(array_ops.pack(ListToTensor().l)))
def testNesting(self):
with context.graph_mode():
inner = data_structures.List()
outer = data_structures.List([inner])
inner.append(non_keras_core.Dense(1))
inner[0](array_ops.ones([2, 3]))
self.assertEqual(2, len(outer.variables))
self.assertIsInstance(
outer.variables[0],
resource_variable_ops.ResourceVariable)
def testNonLayerVariables(self):
v = resource_variable_ops.ResourceVariable([1.])
l = data_structures.List([v])
self.assertTrue(l.trainable)
self.assertEqual([], l.layers)
self.assertEqual([v], l.variables)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([], l.non_trainable_variables)
l.trainable = False
self.assertEqual([v], l.variables)
self.assertEqual([], l.trainable_variables)
self.assertEqual([v], l.non_trainable_variables)
l.trainable = True
v2 = resource_variable_ops.ResourceVariable(1., trainable=False)
l.append(v2)
self.assertEqual([v, v2], l.weights)
self.assertEqual([v], l.trainable_weights)
self.assertEqual([v2], l.non_trainable_weights)
def testCopy(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
l1 = data_structures.List([v1, v2])
l2 = l1.copy()
l2.append(v3)
self.assertEqual(list(l1), [v1, v2])
self.assertEqual(list(l2), [v1, v2, v3])
def testSlicing(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
v3 = resource_variable_ops.ResourceVariable(1.)
v4 = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v1, v2, v3, v4])
self.assertEqual(l[1:], [v2, v3, v4])
self.assertEqual(l[1:-1], [v2, v3])
self.assertEqual(l[:-1], [v1, v2, v3])
def testHash(self):
has_sequences = set([data_structures.List(),
data_structures.List()])
self.assertEqual(2, len(has_sequences))
self.assertNotIn(data_structures.List(), has_sequences)
def testIMul_zero(self):
l = data_structures.List([])
with self.assertRaisesRegexp(ValueError, "List only supports append"):
l *= 0
def testIMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v])
l *= 2
self.assertEqual(list(l), [v] * 2)
def testMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(l * 2), [v, v, v] * 2)
def testRMul(self):
v = resource_variable_ops.ResourceVariable(1.)
l = data_structures.List([v, v, v])
self.assertEqual(list(2 * l), [v, v, v] * 2)
class ListWrapperTest(test.TestCase):
IGNORED = ("__new__", "__init__", "__subclasshook__", "__getattribute__")
def test_overrides_all_list_methods(self):
not_overridden = []
for name in dir(list):
if name in ListWrapperTest.IGNORED:
continue
list_method = getattr(list, name)
if not callable(list_method):
continue
object_method = getattr(object, name, None)
if object_method is not None and object_method == list_method:
# Skip methods that aren't overridden from object.
continue
if list_method == getattr(data_structures._ListWrapper, name):
not_overridden.append(name)
if not_overridden:
self.fail("_ListWrapper does not override %s" % (not_overridden))
def testListWrapperBasic(self):
# _ListWrapper, unlike List, compares like the built-in list type (since it
# is used to automatically replace lists).
a = tracking.AutoTrackable()
b = tracking.AutoTrackable()
self.assertEqual([a, a],
[a, a])
self.assertEqual(data_structures._ListWrapper([a, a]),
data_structures._ListWrapper([a, a]))
self.assertEqual([a, a],
data_structures._ListWrapper([a, a]))
self.assertEqual(data_structures._ListWrapper([a, a]),
[a, a])
self.assertNotEqual([a, a],
[b, a])
self.assertNotEqual(data_structures._ListWrapper([a, a]),
data_structures._ListWrapper([b, a]))
self.assertNotEqual([a, a],
data_structures._ListWrapper([b, a]))
self.assertLess([a], [a, b])
self.assertLess(data_structures._ListWrapper([a]),
data_structures._ListWrapper([a, b]))
self.assertLessEqual([a], [a, b])
self.assertLessEqual(data_structures._ListWrapper([a]),
data_structures._ListWrapper([a, b]))
self.assertGreater([a, b], [a])
self.assertGreater(data_structures._ListWrapper([a, b]),
data_structures._ListWrapper([a]))
self.assertGreaterEqual([a, b], [a])
self.assertGreaterEqual(data_structures._ListWrapper([a, b]),
data_structures._ListWrapper([a]))
self.assertEqual([a], data_structures._ListWrapper([a]))
self.assertEqual([a], list(data_structures.List([a])))
self.assertEqual([a, a], data_structures._ListWrapper([a]) + [a])
self.assertEqual([a, a], [a] + data_structures._ListWrapper([a]))
self.assertIsInstance(data_structures._ListWrapper([a]), list)
def testAcceptsNonTrackableContent(self):
l = data_structures._ListWrapper([1, 2, 3])
self.assertEqual(l, [1, 2, 3])
def testWrapperChangesList(self):
l = []
l_wrapper = data_structures._ListWrapper(l)
l_wrapper.append(1)
self.assertEqual([1], l)
def testListChangesWrapper(self):
l = []
l_wrapper = data_structures._ListWrapper(l)
l.append(1)
self.assertEqual([1], l_wrapper)
def testLayerCollectionWithExternalMutation(self):
l = []
l_wrapper = data_structures._ListWrapper(l)
layer = core.Dense(1)
l.append(layer)
self.assertEqual([layer], l_wrapper.layers)
def testNotHashable(self):
with self.assertRaises(TypeError):
hash(data_structures._ListWrapper())
def testDelItem(self):
l = data_structures._ListWrapper([1, 2, 3, 4])
del l[0]
self.assertEqual(l, [2, 3, 4])
self.assertUnableToSave(l, "Unable to save .*__delitem__")
def testDelSlice(self):
l = data_structures._ListWrapper([1, 2, 3, 4])
del l[2:3]
self.assertEqual(l, [1, 2, 4])
self.assertUnableToSave(l, "Unable to save .*__delslice__")
def testSetSlice_canSaveForNonTrackableItems(self):
l = data_structures._ListWrapper([1, 2, 3, 4])
l[:] = 2, 8, 9, 0
self.assertEqual(l, [2, 8, 9, 0])
l._maybe_initialize_trackable() # pylint: disable=protected-access
self.assertEqual(len(l._checkpoint_dependencies), 0) # pylint: disable=protected-access
def testSetSlice_cannotSaveIfTrackableModified(self):
v1 = resource_variable_ops.ResourceVariable(1.)
v2 = resource_variable_ops.ResourceVariable(1.)
l = data_structures._ListWrapper([1, 2, v1, v2])
l[:] = 2, 8, 9, v2
self.assertEqual(l, [2, 8, 9, v2])
self.assertUnableToSave(l, "Unable to save .*__setslice__")
def testSetSlice_truncate(self):
l = data_structures._ListWrapper([1, 2, 3, 4])
l[:] = []
self.assertEqual(l, [])
def testSetSlice_extend(self):
l = data_structures._ListWrapper([1, 2, 3, 4])
l[2:] = 1, 2, 3, 4
self.assertEqual(l, [1, 2, 1, 2, 3, 4])
def testIMulNegative(self):
l = data_structures._ListWrapper([1, 2, 3, 4])
l *= -1
self.assertEqual(l, [1, 2, 3, 4] * -1)
self.assertUnableToSave(l, "Unable to save")
def testIMulPositive(self):
v = variables.Variable(1.)
l = data_structures._ListWrapper([1, 2, 3, 4, v])
self.assertEqual([("4", v)], l._checkpoint_dependencies)
root = util.Checkpoint(l=l)
prefix = os.path.join(self.get_temp_dir(), "ckpt")
path = root.save(prefix)
v.assign(5.)
l *= 2
self.assertEqual(l, [1, 2, 3, 4, v, 1, 2, 3, 4, v])
self.assertEqual([("4", v), ("9", v)], l._checkpoint_dependencies)
root.restore(path)
self.assertAllClose(1., v.numpy())
def testSort(self):
l = data_structures._ListWrapper([1, 2, 3, 4])
l.sort()
self.assertEqual(l, [1, 2, 3, 4])
# Regardless of being a no-op for the input list, we still refuse to save.
# This is intentional since otherwise we would end up with a hard to debug
# case for users (e.g. sometimes sort on a ListWrapper is trackable and
# other times it is not).
self.assertUnableToSave(l, "Unable to save .*sort")
def assertUnableToSave(self, l, msg):
l._maybe_initialize_trackable() # pylint: disable=protected-access
with self.assertRaisesRegexp(ValueError, msg):
return l._checkpoint_dependencies # pylint: disable=protected-access
class HasMapping(training.Model):
def __init__(self):
super(HasMapping, self).__init__()
self.layer_dict = data_structures.Mapping(output=core.Dense(7))
self.layer_dict["norm"] = data_structures.List()
self.layer_dict["dense"] = data_structures.List()
self.layer_dict["dense"].extend(
[core.Dense(5),
core.Dense(6, kernel_regularizer=math_ops.reduce_sum)])
self.layer_dict["norm"].append(
normalization.BatchNormalization())
self.layer_dict["norm"].append(
normalization.BatchNormalization())
def call(self, x):
aggregation = 0.
for norm, dense in zip(self.layer_dict["norm"], self.layer_dict["dense"]):
x = norm(dense(x))
aggregation += math_ops.reduce_sum(x)
return self.layer_dict["output"](x) / aggregation
class MappingTests(test.TestCase):
@test_util.run_in_graph_and_eager_modes
@test_util.run_v1_only("b/120545219")
def testTracking(self):
model = HasMapping()
output = model(array_ops.ones([32, 2]))
self.assertAllEqual([32, 7], output.shape)
self.assertEqual(5, len(model.layers))
six.assertCountEqual(self, model.layers, model.layer_dict.layers)
self.assertEqual(1, len(model._checkpoint_dependencies))
self.assertIs(model.layer_dict, model._checkpoint_dependencies[0].ref)
self.evaluate([v.initializer for v in model.variables])
test_var = model.layer_dict["output"].kernel
self.evaluate(test_var.assign(array_ops.ones([6, 7])))
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
self.evaluate(test_var.assign(array_ops.zeros([6, 7])))
model.load_weights(save_path)
self.assertAllEqual(numpy.ones([6, 7]),
self.evaluate(test_var))
def testNoOverwrite(self):
mapping = data_structures.Mapping()
original = data_structures.List()
mapping["a"] = original
with self.assertRaises(ValueError):
mapping["a"] = data_structures.List()
self.assertIs(original, mapping["a"])
with self.assertRaises(AttributeError):
del mapping["a"]
mapping.update(b=data_structures.Mapping())
with self.assertRaises(ValueError):
mapping.update({"b": data_structures.Mapping()})
def testNonStringKeys(self):
mapping = data_structures.Mapping()
with self.assertRaises(TypeError):
mapping[1] = data_structures.List()
def testLayerCollectionWithExternalMutation(self):
d = {}
root = tracking.AutoTrackable()
root.wrapper = d
self.assertEqual([], root.wrapper.layers)
self.assertEqual([], root.wrapper.trainable_weights)
layer1 = core.Dense(1)
layer2 = core.Dense(1)
d["a"] = layer1
d["b"] = layer2
self.assertEqual([layer1, layer2], root.wrapper.layers)
# The layers have still not created variables
self.assertEqual([], root.wrapper.trainable_weights)
def testHashing(self):
has_mappings = set([data_structures.Mapping(),
data_structures.Mapping()])
self.assertEqual(2, len(has_mappings))
self.assertNotIn(data_structures.Mapping(), has_mappings)
# In contrast to Mapping, dict wrappers are not hashable
a = tracking.AutoTrackable()
a.d = {}
self.assertEqual({}, a.d)
self.assertFalse({} != a.d) # pylint: disable=g-explicit-bool-comparison
self.assertNotEqual({1: 2}, a.d)
with self.assertRaisesRegexp(TypeError, "unhashable"):
set([a.d])
def testDictWrapperBadKeys(self):
a = tracking.AutoTrackable()
a.d = {}
a.d[1] = data_structures.List()
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "non-string key"):
model.save_weights(save_path)
def testDictWrapperNoDependency(self):
a = tracking.AutoTrackable()
a.d = data_structures.NoDependency({})
a.d[1] = [3]
self.assertEqual([a], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonStringKeyNotTrackableValue(self):
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = data_structures.NoDependency([3])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testNonAppendNotTrackable(self):
# Non-append mutations (deleting or overwriting values) are OK when the
# values aren't tracked.
a = tracking.AutoTrackable()
a.d = {}
a.d["a"] = [3]
a.d[1] = 3
a.d[1] = 2
self.assertEqual(2, a.d[1])
del a.d[1]
a.d[2] = data_structures.NoDependency(tracking.AutoTrackable())
second = tracking.AutoTrackable()
a.d[2] = data_structures.NoDependency(second)
self.assertIs(second, a.d[2])
self.assertEqual([a, a.d, a.d["a"]], util.list_objects(a))
model = training.Model()
model.sub = a
save_path = os.path.join(self.get_temp_dir(), "ckpt")
model.save_weights(save_path)
model.load_weights(save_path)
def testDelNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = []
del model.d["a"]
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "overwritten or deleted"):
model.save_weights(save_path)
def testPopNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = []
model.d.pop("a")
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "overwritten or deleted"):
model.save_weights(save_path)
def testExternalModificationNoSave(self):
model = training.Model()
external_reference = {}
model.d = external_reference
external_reference["a"] = []
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "modified outside the wrapper"):
model.save_weights(save_path)
def testOverwriteNoSave(self):
model = training.Model()
model.d = {}
model.d["a"] = {}
model.d["a"] = {}
save_path = os.path.join(self.get_temp_dir(), "ckpt")
with self.assertRaisesRegexp(ValueError, "overwritten or deleted"):
model.save_weights(save_path)
def testIter(self):
model = training.Model()
model.d = {1: 3}
model.d[1] = 3
self.assertEqual([1], list(model.d))
new_dict = {}
# This update() is super tricky. If the dict wrapper subclasses dict,
# CPython will access its storage directly instead of calling any
# methods/properties on the object. So the options are either not to
# subclass dict (in which case update will call normal iter methods, but the
# object won't pass isinstance checks) or to subclass dict and keep that
# storage updated (no shadowing all its methods like _ListWrapper).
new_dict.update(model.d)
self.assertEqual({1: 3}, new_dict)
def testListShallowCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.copy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIs(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testListDeepCopy(self):
root = tracking.AutoTrackable()
orig_list = [[1.]]
root.a = orig_list
copied = copy.deepcopy(root.a)
self.assertAllEqual([[1.]], copied)
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a[0], copied[0])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_list.append(1.)
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testDictShallowCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.copy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
copied = root.a.copy()
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIs(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.copy(root.a))
def testDictDeepCopy(self):
root = tracking.AutoTrackable()
orig_dict = {"a": [1.]}
root.a = orig_dict
copied = copy.deepcopy(root.a)
self.assertAllEqual([1.], copied["a"])
self.assertIsNot(root.a, copied)
self.assertIsNot(root.a["a"], copied["a"])
# Dirtiness should be inherited
util.list_objects(root.a)
orig_dict["b"] = []
with self.assertRaises(ValueError):
util.list_objects(root.a)
with self.assertRaises(ValueError):
util.list_objects(copy.deepcopy(root.a))
def testShallowCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
shallow_copied = copy.copy(original)
self.assertIs(original_sub, shallow_copied.b["a"])
self.assertIsNot(original, shallow_copied)
self.assertEqual([[1.]], shallow_copied.a)
shallow_deps = util.list_objects(shallow_copied)
self.assertIn(shallow_copied.a, shallow_deps)
self.assertIn(shallow_copied.b, shallow_deps)
self.assertIn(shallow_copied.b["a"], shallow_deps)
def testDeepCopyTrackable(self):
original = tracking.AutoTrackable()
original_sub = tracking.AutoTrackable()
original.a = [[1.]]
original.b = {"a": original_sub}
deep_copied = copy.deepcopy(original)
self.assertIsNot(original, deep_copied)
self.assertIsNot(original_sub, deep_copied.b["a"])
self.assertEqual([[1.]], deep_copied.a)
self.assertIsInstance(deep_copied.b["a"], tracking.AutoTrackable)
deps = util.list_objects(deep_copied)
self.assertIn(deep_copied.a, deps)
self.assertIn(deep_copied.b, deps)
self.assertIn(deep_copied.b["a"], deps)
self.assertNotIn(original_sub, deps)
def testConstructableFromSequence(self):
result = data_structures._DictWrapper([(1, 2), (3, 4)])
self.assertIsInstance(result, dict)
self.assertEqual({1: 2, 3: 4}, result)
def testListAddOrder(self):
self.assertEqual([1., 2.],
data_structures._ListWrapper([1.])
+ data_structures._ListWrapper([2.]))
self.assertEqual([1., 2.],
data_structures._ListWrapper([1.])
+ [2.])
self.assertEqual([1., 2.],
[1.]
+ data_structures._ListWrapper([2.]))
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.training.tracking.util.list_objects",
"tensorflow.python.keras.layers.normalization.BatchNormalization",
"tensorflow.python.ops.variables.Variable",
"tensorflow.python.ops.array_ops.zeros",
"tensorflow.python.layers.core.Dense",
"tensorflow.python.framework.test_util.run_v1_only",
"tensorflow.python.ops.resource_variable_ops.ResourceVariable",
"tensorflow.python.training.tracking.data_structures._DictWrapper",
"tensorflow.python.ops.array_ops.ones",
"tensorflow.python.training.tracking.data_structures._ListWrapper",
"tensorflow.python.eager.context.graph_mode",
"tensorflow.python.eager.context.eager_mode",
"tensorflow.python.eager.test.main",
"tensorflow.python.training.tracking.tracking.AutoTrackable",
"tensorflow.python.training.tracking.data_structures.NoDependency",
"tensorflow.python.keras.layers.core.Dense",
"tensorflow.python.training.tracking.data_structures.Mapping",
"tensorflow.python.training.tracking.data_structures.List",
"numpy.ones",
"tensorflow.python.training.tracking.util.Checkpoint",
"tensorflow.python.ops.math_ops.reduce_sum",
"tensorflow.python.keras.engine.training.Model"
] | tensorflow/python/training/tracking/data_structures_test.py | [(76, 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""b/120545219"""'], {}), False, 'from tensorflow.python.framework import test_util\n'), (109, 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""b/120545219"""'], {}), False, 'from tensorflow.python.framework import test_util\n'), (126, 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""b/120545219"""'], {}), False, 'from tensorflow.python.framework import test_util\n'), (450, 'tensorflow.python.framework.test_util.run_v1_only', 'test_util.run_v1_only', (['"""b/120545219"""'], {}), False, 'from tensorflow.python.framework import test_util\n'), (741, 'tensorflow.python.eager.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.eager import test\n'), (83, 'six.assertCountEqual', 'six.assertCountEqual', (['self', 'model.layers', '(model.layer_list.layers + model.layers_with_updates)'], {}), False, 'import six\n'), (103, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (129, 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[32, 2]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (192, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['[1.0]'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (193, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[v]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (204, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {'trainable': '(False)'}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (211, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (212, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (213, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (215, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[v1, v2]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (222, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (223, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (224, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (225, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (227, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[v1, v2, v3, v4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (239, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (244, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (245, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[v]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (250, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (251, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[v, v, v]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (255, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (256, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[v, v, v]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (290, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (291, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (325, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (330, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['l'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (336, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['l'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (342, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['l'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (343, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(1)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (352, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (358, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (364, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (371, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (372, 'tensorflow.python.ops.resource_variable_ops.ResourceVariable', 'resource_variable_ops.ResourceVariable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import resource_variable_ops\n'), (373, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, v1, v2]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (379, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (384, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (389, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (395, 'tensorflow.python.ops.variables.Variable', 'variables.Variable', (['(1.0)'], {}), False, 'from tensorflow.python.ops import variables\n'), (396, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4, v]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (398, 'tensorflow.python.training.tracking.util.Checkpoint', 'util.Checkpoint', ([], {'l': 'l'}), False, 'from tensorflow.python.training.tracking import util\n'), (409, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1, 2, 3, 4]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (429, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (430, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (456, 'six.assertCountEqual', 'six.assertCountEqual', (['self', 'model.layers', 'model.layer_dict.layers'], {}), False, 'import six\n'), (470, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (471, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (483, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (489, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (493, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(1)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (494, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(1)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (507, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (516, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (518, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (519, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (526, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (527, 'tensorflow.python.training.tracking.data_structures.NoDependency', 'data_structures.NoDependency', (['{}'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (530, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (537, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (540, 'tensorflow.python.training.tracking.data_structures.NoDependency', 'data_structures.NoDependency', (['[3]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (542, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (551, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (559, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (560, 'tensorflow.python.training.tracking.data_structures.NoDependency', 'data_structures.NoDependency', (['second'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (563, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (570, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (579, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (588, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (597, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (606, 'tensorflow.python.keras.engine.training.Model', 'training.Model', ([], {}), False, 'from tensorflow.python.keras.engine import training\n'), (621, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (624, 'copy.copy', 'copy.copy', (['root.a'], {}), False, 'import copy\n'), (630, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (638, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (641, 'copy.deepcopy', 'copy.deepcopy', (['root.a'], {}), False, 'import copy\n'), (647, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (655, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (658, 'copy.copy', 'copy.copy', (['root.a'], {}), False, 'import copy\n'), (669, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (677, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (680, 'copy.deepcopy', 'copy.deepcopy', (['root.a'], {}), False, 'import copy\n'), (686, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (694, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (695, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (698, 'copy.copy', 'copy.copy', (['original'], {}), False, 'import copy\n'), (702, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['shallow_copied'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (708, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (709, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (712, 'copy.deepcopy', 'copy.deepcopy', (['original'], {}), False, 'import copy\n'), (717, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['deep_copied'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (724, 'tensorflow.python.training.tracking.data_structures._DictWrapper', 'data_structures._DictWrapper', (['[(1, 2), (3, 4)]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (47, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(4)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (52, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(7)'], {'bias_regularizer': 'math_ops.reduce_sum'}), False, 'from tensorflow.python.keras.layers import core\n'), (53, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(8)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (68, 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (79, 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[32, 2]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (111, 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (113, 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[32, 2]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (119, 'tensorflow.python.eager.context.eager_mode', 'context.eager_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (121, 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[32, 2]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (181, 'tensorflow.python.eager.context.graph_mode', 'context.graph_mode', ([], {}), False, 'from tensorflow.python.eager import context\n'), (182, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (183, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[inner]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (236, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (294, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (295, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (297, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (298, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (302, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (303, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[b, a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (305, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[b, a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (307, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (308, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, b]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (310, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (311, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, b]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (313, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, b]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (314, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (316, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a, b]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (317, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (318, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (322, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (435, 'tensorflow.python.keras.layers.normalization.BatchNormalization', 'normalization.BatchNormalization', ([], {}), False, 'from tensorflow.python.keras.layers import normalization\n'), (437, 'tensorflow.python.keras.layers.normalization.BatchNormalization', 'normalization.BatchNormalization', ([], {}), False, 'from tensorflow.python.keras.layers import normalization\n'), (443, 'tensorflow.python.ops.math_ops.reduce_sum', 'math_ops.reduce_sum', (['x'], {}), False, 'from tensorflow.python.ops import math_ops\n'), (453, 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[32, 2]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (466, 'numpy.ones', 'numpy.ones', (['[6, 7]'], {}), False, 'import numpy\n'), (474, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (485, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (505, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (529, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (541, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (558, 'tensorflow.python.training.tracking.tracking.AutoTrackable', 'tracking.AutoTrackable', ([], {}), False, 'from tensorflow.python.training.tracking import tracking\n'), (562, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (633, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (650, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (672, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (689, 'tensorflow.python.training.tracking.util.list_objects', 'util.list_objects', (['root.a'], {}), False, 'from tensorflow.python.training.tracking import util\n'), (46, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(3)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (49, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(5)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (50, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(6)'], {'kernel_regularizer': 'math_ops.reduce_sum'}), False, 'from tensorflow.python.keras.layers import core\n'), (62, 'tensorflow.python.keras.layers.normalization.BatchNormalization', 'normalization.BatchNormalization', ([], {}), False, 'from tensorflow.python.keras.layers import normalization\n'), (99, 'tensorflow.python.ops.array_ops.zeros', 'array_ops.zeros', (['[2, 3]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (157, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (184, 'tensorflow.python.layers.core.Dense', 'non_keras_core.Dense', (['(1)'], {}), True, 'from tensorflow.python.layers import core as non_keras_core\n'), (185, 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[2, 3]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (233, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (234, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (319, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (320, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (321, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[a]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (349, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (428, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(7)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (432, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(5)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (433, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(6)'], {'kernel_regularizer': 'math_ops.reduce_sum'}), False, 'from tensorflow.python.keras.layers import core\n'), (461, 'tensorflow.python.ops.array_ops.ones', 'array_ops.ones', (['[6, 7]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (464, 'tensorflow.python.ops.array_ops.zeros', 'array_ops.zeros', (['[6, 7]'], {}), False, 'from tensorflow.python.ops import array_ops\n'), (478, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (502, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (503, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (635, 'copy.copy', 'copy.copy', (['root.a'], {}), False, 'import copy\n'), (652, 'copy.deepcopy', 'copy.deepcopy', (['root.a'], {}), False, 'import copy\n'), (674, 'copy.copy', 'copy.copy', (['root.a'], {}), False, 'import copy\n'), (691, 'copy.deepcopy', 'copy.deepcopy', (['root.a'], {}), False, 'import copy\n'), (730, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1.0]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (731, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[2.0]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (733, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[1.0]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (737, 'tensorflow.python.training.tracking.data_structures._ListWrapper', 'data_structures._ListWrapper', (['[2.0]'], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (56, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(9)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (57, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(10)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (161, 'tensorflow.python.training.tracking.data_structures.List', 'data_structures.List', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (480, 'tensorflow.python.training.tracking.data_structures.Mapping', 'data_structures.Mapping', ([], {}), False, 'from tensorflow.python.training.tracking import data_structures\n'), (60, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(12)'], {}), False, 'from tensorflow.python.keras.layers import core\n'), (60, 'tensorflow.python.keras.layers.core.Dense', 'core.Dense', (['(11)'], {}), False, 'from tensorflow.python.keras.layers import core\n')] |
magenta/midi-ddsp | 3ff97496b42becead5289b349524b38f5b55d530 | # Copyright 2022 The MIDI-DDSP Authors.
# #
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# #
# http://www.apache.org/licenses/LICENSE-2.0
# #
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Model class for DDSP Inference module used in MIDI-DDSP."""
import tensorflow as tf
import ddsp
from midi_ddsp.utils.audio_io import tf_log_mel
from midi_ddsp.data_handling.instrument_name_utils import NUM_INST
from ddsp.training import nn
from ddsp.spectral_ops import F0_RANGE, DB_RANGE
tfk = tf.keras
tfkl = tfk.layers
class MelF0LDEncoder(tfkl.Layer):
"""The encoder in DDSP Inference.
The MelF0LDEncoder takes input of audio, loudness and f0.
The MelF0LDEncoder extract features from audio using an 8-layer CNN,
and extract features from loudness and f0 using fully-connected layers.
Then, a bi-lstm is used to extract contextual features from the extracted
features.
"""
def __init__(self, cnn, nhid, sample_rate, win_length, hop_length, n_fft,
num_mels, fmin):
super().__init__()
self.nhid = nhid
self.cnn = cnn
self.z_fc = tfkl.Dense(nhid)
self.f0_ld_fc = tfkl.Dense(nhid)
self.rnn = tfkl.Bidirectional(
tfkl.LSTM(units=nhid, return_sequences=True), name='bilstm'
)
# TODO(yusongwu): change emb dim to 64
self.instrument_emb = tfkl.Embedding(NUM_INST, 128)
# mel-spec parameters
self.sample_rate = sample_rate
self.win_length = win_length
self.hop_length = hop_length
self.n_fft = n_fft
self.num_mels = num_mels
self.fmin = fmin
def call(self, inputs, training=False):
mel = tf_log_mel(inputs['audio'],
self.sample_rate,
self.win_length,
self.hop_length,
self.n_fft,
self.num_mels,
self.fmin)
z_cnn = self.cnn(mel, training=training)
z_reduce = self.z_fc(z_cnn)
instrument_z = tf.tile(
self.instrument_emb(inputs['instrument_id'])[:, tf.newaxis, :],
[1, z_cnn.shape[1], 1])
x = tf.concat([ddsp.core.hz_to_midi(inputs['f0_hz']) / F0_RANGE,
inputs['loudness_db'] / DB_RANGE], -1)
x_z = self.f0_ld_fc(x)
z_out = self.rnn(tf.concat([x_z, z_reduce, instrument_z], -1))
return z_out
class FCHarmonicDecoder(tfkl.Layer):
"""The decoder in DDSP Inference.
The FCHarmonicDecoder takes input of a feature sequence,
and output the synthesis parameters for DDSP through fully-connected layers.
"""
def __init__(self, nhramonic=100, nnoise=65):
super().__init__()
self.harmonic_amp_fc = tfkl.Dense(1, bias_initializer='ones')
self.harmonic_distribution_fc = tfkl.Dense(nhramonic)
self.noise_mag_fc = tfkl.Dense(nnoise)
def get_synth_params(self, inputs):
z, data = inputs
harmonic_amp = self.harmonic_amp_fc(z)
harmonic_distribution = self.harmonic_distribution_fc(z)
noise_mag = self.noise_mag_fc(z)
synth_params = {
'f0_hz': data['f0_hz'],
'amplitudes': harmonic_amp,
'harmonic_distribution': harmonic_distribution,
'noise_magnitudes': noise_mag,
}
return synth_params
def call(self, inputs):
synth_params = self.get_synth_params(inputs)
return synth_params
class F0LDEncoder(tfkl.Layer):
"""The encoder of original DDSP autoencoder."""
# TODO: (yusongwu) To be removed and use the decoders.RnnFcDecoder
def __init__(self):
super().__init__()
self.nhid = 512
self.f0_fc = nn.FcStack(self.nhid, layers=3)
self.ld_fc = nn.FcStack(self.nhid, layers=3)
self.instrument_emb = tfkl.Embedding(NUM_INST, 128)
self.rnn = tfkl.GRU(
units=self.nhid, return_sequences=True, # dropout=0.2,
)
def call(self, inputs, training=False):
z_f0 = self.f0_fc(ddsp.core.hz_to_midi(inputs['f0_hz']) / F0_RANGE)
z_ld = self.ld_fc(inputs['loudness_db'] / DB_RANGE)
instrument_z = tf.tile(
self.instrument_emb(inputs['instrument_id'])[:, tf.newaxis, :],
[1, z_ld.shape[1], 1])
x_z = tf.concat([z_f0, z_ld, instrument_z], -1)
z_out = self.rnn(x_z)
z_out = tf.concat([x_z, z_out], -1)
return z_out
class FCStackHarmonicDecoder(tfkl.Layer):
"""The decoder original DDSP autoencoder.
The FCStackHarmonicDecoder takes input of a feature sequence,
and output the synthesis parameters for DDSP through stacked MLP.
"""
def __init__(self, nharmonic=100, nnoise=65):
super().__init__()
self.output_splits = (
('amplitudes', 1), ('harmonic_distribution', nharmonic),
('noise_magnitudes', nnoise))
self.n_out = sum([v[1] for v in self.output_splits])
self.out_stack = nn.FcStack(512, layers=3)
self.dense_out = tfkl.Dense(self.n_out)
def get_synth_params(self, inputs):
z, data = inputs
z_output = self.out_stack(z)
synth_params = nn.split_to_dict(self.dense_out(z_output),
self.output_splits)
synth_params['f0_hz'] = data['f0_hz']
return synth_params
def call(self, inputs):
synth_params = self.get_synth_params(inputs)
return synth_params
class ConvBlock(tfkl.Layer):
"""
A tensorflow implementation of ConvBlock used in audioset classification.
This CNN has better performance when used in spectrogram feature
extraction for audio tagging. Adapted from pytorch implementation:
https://github.com/qiuqiangkong/audioset_tagging_cnn.
paper: https://arxiv.org/abs/1912.10211.
Args:
out_channels: number of output channels.
pool_size: size of pooling, in height and width.
"""
def __init__(self, out_channels, pool_size=(2, 2)):
super().__init__()
self.conv1 = tfkl.Conv2D(filters=out_channels,
kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False,
kernel_initializer=
tf.keras.initializers.GlorotUniform())
self.conv2 = tfkl.Conv2D(filters=out_channels,
kernel_size=(3, 3), strides=(1, 1),
padding='same', use_bias=False,
kernel_initializer=
tf.keras.initializers.GlorotUniform())
self.bn1 = tfkl.BatchNormalization(beta_initializer='zeros',
gamma_initializer='ones')
self.bn2 = tfkl.BatchNormalization(beta_initializer='zeros',
gamma_initializer='ones')
self.max_pool = tfkl.MaxPool2D(pool_size=pool_size, padding='same')
self.avg_pool = tfkl.AveragePooling2D(pool_size=pool_size, padding='same')
def call(self, inputs, training=None, pool_type='avg'):
x = inputs
x = tf.nn.relu(self.bn1(self.conv1(x), training=training))
x = tf.nn.relu(self.bn2(self.conv2(x), training=training))
if pool_type == 'max':
x = self.max_pool(x)
elif pool_type == 'avg':
x = self.avg_pool(x)
elif pool_type == 'avg+max':
x1 = self.avg_pool(x)
x2 = self.max_pool(x)
x = x1 + x2
else:
raise Exception('Incorrect argument!')
return x
class Cnn8(tfkl.Layer):
"""
A tensorflow implementation of CNN8 used in audioset classification.
This CNN has better performance when used in spectrogram feature
extraction for audio tagging. Adapted from pytorch implementation:
https://github.com/qiuqiangkong/audioset_tagging_cnn.
paper: https://arxiv.org/abs/1912.10211.
"""
def __init__(self, pool_size=(2, 2), dropout=0.2):
super().__init__()
self.conv_block1 = ConvBlock(out_channels=64, pool_size=pool_size)
self.conv_block2 = ConvBlock(out_channels=128, pool_size=pool_size)
self.conv_block3 = ConvBlock(out_channels=256, pool_size=pool_size)
self.conv_block4 = ConvBlock(out_channels=512, pool_size=pool_size)
self.dropout = tfkl.Dropout(rate=dropout)
def call(self, x, training=None):
x = x[..., tf.newaxis]
x = self.conv_block1(x, pool_type='avg', training=training)
x = self.dropout(x, training=training)
x = self.conv_block2(x, pool_type='avg', training=training)
x = self.dropout(x, training=training)
x = self.conv_block3(x, pool_type='avg', training=training)
x = self.dropout(x, training=training)
x = self.conv_block4(x, pool_type='avg', training=training)
x = self.dropout(x, training=training)
x = tf.reshape(x, [x.shape[0], x.shape[1], -1])
return x
| [
"tensorflow.reshape",
"tensorflow.concat",
"tensorflow.keras.initializers.GlorotUniform"
] | midi_ddsp/modules/ddsp_inference.py | [(59, 'midi_ddsp.utils.audio_io.tf_log_mel', 'tf_log_mel', (["inputs['audio']", 'self.sample_rate', 'self.win_length', 'self.hop_length', 'self.n_fft', 'self.num_mels', 'self.fmin'], {}), False, 'from midi_ddsp.utils.audio_io import tf_log_mel\n'), (121, 'ddsp.training.nn.FcStack', 'nn.FcStack', (['self.nhid'], {'layers': '(3)'}), False, 'from ddsp.training import nn\n'), (122, 'ddsp.training.nn.FcStack', 'nn.FcStack', (['self.nhid'], {'layers': '(3)'}), False, 'from ddsp.training import nn\n'), (134, 'tensorflow.concat', 'tf.concat', (['[z_f0, z_ld, instrument_z]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (136, 'tensorflow.concat', 'tf.concat', (['[x_z, z_out]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (153, 'ddsp.training.nn.FcStack', 'nn.FcStack', (['(512)'], {'layers': '(3)'}), False, 'from ddsp.training import nn\n'), (255, 'tensorflow.reshape', 'tf.reshape', (['x', '[x.shape[0], x.shape[1], -1]'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.concat', 'tf.concat', (['[x_z, z_reduce, instrument_z]', '(-1)'], {}), True, 'import tensorflow as tf\n'), (129, 'ddsp.core.hz_to_midi', 'ddsp.core.hz_to_midi', (["inputs['f0_hz']"], {}), False, 'import ddsp\n'), (191, 'tensorflow.keras.initializers.GlorotUniform', 'tf.keras.initializers.GlorotUniform', ([], {}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.keras.initializers.GlorotUniform', 'tf.keras.initializers.GlorotUniform', ([], {}), True, 'import tensorflow as tf\n'), (71, 'ddsp.core.hz_to_midi', 'ddsp.core.hz_to_midi', (["inputs['f0_hz']"], {}), False, 'import ddsp\n')] |
seawavve/Random_CNN | 5dee90ddc8a79d4b4f2d9c5bd83e62e910c6fc83 | # -*- coding: utf-8 -*-
"""0902_rand_cnn.ipynb
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1p6_O9ie0cDHaiKxQBn935uTVwj85cEz9
"""
'''
pilab seawavve
random cnn
2020.05.20~
Acc: 91.28% Epoch:75
****PATCH NOTE****
0520 cnn network구성
0000 EarlyStopping&ModelCheckpoint
0000 이미지증강
0621 bypass
0902 random
depth&add확률 조정 요망
summary에 체크한 레이어 수보다 많이 나오는 오류 디버깅 요망
'''
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import sys
import random
import numpy as np
from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras import datasets
def make_rand(net_list): #파생된 레이어들을 list에 담아 반환
lis=list()
re_seed=random.randint(1,4) #파생레이어 1~4개 생성
for i in range(re_seed):
seed=random.randint(1,4) #한 레이어에서 파생레이어 생성
if seed==1:
im_output= layers.Conv2D(filters=64, kernel_size=[3,3], padding='same', activation='relu')(output)
elif seed==2:
im_output= layers.Dropout(rate=0.25)(output)
elif seed==3:
im_output= layers.MaxPooling2D(pool_size=[3, 3], padding='same', strides=1)(output)
elif seed==4:
im_output = layers.Activation('relu')(output)
lis.append(im_output)
return lis
def make_short_cut(a_layer,b_layer): # 받은 두개의 레이어로 shortcut을 만들어 반환
im_output = layers.Add()([a_layer,b_layer])
return im_output
print('Python version : ', sys.version)
print('Keras version : ', keras.__version__)
img_rows = 28
img_cols = 28
(x_train, y_train), (x_test, y_test) = keras.datasets.fashion_mnist.load_data()
input_shape = (img_rows, img_cols, 1)
x_train = x_train.reshape(x_train.shape[0], img_rows, img_cols, 1)
x_test = x_test.reshape(x_test.shape[0], img_rows, img_cols, 1)
x_train = x_train.astype('float32') / 255.
x_test = x_test.astype('float32') / 255.
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
batch_size = 128
num_classes = 10
epochs = 300
filename = 'checkpoint.h5'.format(epochs, batch_size)
early_stopping=EarlyStopping(monitor='val_loss',mode='min',patience=15,verbose=1) #얼리스타핑
checkpoint=ModelCheckpoint(filename,monitor='val_loss',verbose=1,save_best_only=True,mode='auto') #체크포인트
y_train = keras.utils.to_categorical(y_train, num_classes)
y_test = keras.utils.to_categorical(y_test, num_classes)
inputs = keras.Input(shape=input_shape, name='input' )
output= layers.Conv2D(filters=64, kernel_size=[3,3], padding='same', activation='relu')(inputs)
net_list=list()
add_num=0
for depth in range(5): #깊이정하기
a=make_rand(net_list) #랜덤레이어 생성
net_list.extend(a)
print('make_list로 만든 리스트의 길이:',len(a))
if len(a)==1:r_num=0 #a 중에서 하나 레이어 골라서 output에 붙이기
else:r_num=random.randint(0,len(a)-1)
print('랜덤 index number:',r_num+1)
output=a[r_num]
short_cut_dec=random.randint(1,5) #20%확률적으로 shortcut
if short_cut_dec==1 or short_cut_dec==2:
add_num=add_num+1
a_layer_num=random.randint(0,len(net_list)-1)
c=make_short_cut(net_list[a_layer_num],output)
output=c
print('\n',depth+1,'개 레이어추가',add_num,'개 shortcut추가')
output = layers.GlobalAveragePooling2D()(output)
output = layers.Dense(1000, activation='relu')(output)
dropout = layers.Dropout(rate=0.25)(output)
output = layers.Dense(10, activation='softmax')(dropout)
model = keras.Model(inputs=inputs, outputs=output)
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
history = model.fit(x_train, y_train, batch_size=batch_size, epochs=epochs, verbose=1, validation_data=(x_test, y_test),callbacks=[checkpoint,early_stopping])
score = model.evaluate(x_test, y_test, verbose=0)
print('Test loss:', score[0])
print('Test accuracy:', score[1])
model.save('MNIST_CNN_model.h5')
| [
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.datasets.fashion_mnist.load_data",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.utils.to_categorical"
] | CNN/0902_rand_cnn.py | [(31, 'warnings.simplefilter', 'warnings.simplefilter', ([], {'action': '"""ignore"""', 'category': 'FutureWarning'}), False, 'import warnings\n'), (68, 'tensorflow.keras.datasets.fashion_mnist.load_data', 'keras.datasets.fashion_mnist.load_data', ([], {}), False, 'from tensorflow import keras\n'), (86, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'mode': '"""min"""', 'patience': '(15)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), (87, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['filename'], {'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""auto"""'}), False, 'from tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\n'), (90, 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_train', 'num_classes'], {}), False, 'from tensorflow import keras\n'), (91, 'tensorflow.keras.utils.to_categorical', 'keras.utils.to_categorical', (['y_test', 'num_classes'], {}), False, 'from tensorflow import keras\n'), (93, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': 'input_shape', 'name': '"""input"""'}), False, 'from tensorflow import keras\n'), (121, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'inputs', 'outputs': 'output'}), False, 'from tensorflow import keras\n'), (44, 'random.randint', 'random.randint', (['(1)', '(4)'], {}), False, 'import random\n'), (94, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '[3, 3]', 'padding': '"""same"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras import layers\n'), (107, 'random.randint', 'random.randint', (['(1)', '(5)'], {}), False, 'import random\n'), (116, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras import layers\n'), (117, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1000)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras import layers\n'), (118, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', ([], {'rate': '(0.25)'}), False, 'from tensorflow.keras import layers\n'), (119, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras import layers\n'), (46, 'random.randint', 'random.randint', (['(1)', '(4)'], {}), False, 'import random\n'), (59, 'tensorflow.keras.layers.Add', 'layers.Add', ([], {}), False, 'from tensorflow.keras import layers\n'), (48, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '[3, 3]', 'padding': '"""same"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras import layers\n'), (50, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', ([], {'rate': '(0.25)'}), False, 'from tensorflow.keras import layers\n'), (52, 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '[3, 3]', 'padding': '"""same"""', 'strides': '(1)'}), False, 'from tensorflow.keras import layers\n'), (54, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras import layers\n')] |
carlgogo/dl4ds | 2675fe772b7e165ab8726a51c75dd3d9d0a7a465 | import tensorflow as tf
from tensorflow.keras.layers import (Add, Conv2D, Input, Concatenate,
TimeDistributed)
from tensorflow.keras.models import Model
from .blocks import (RecurrentConvBlock, ResidualBlock, ConvBlock,
DenseBlock, TransitionBlock, LocalizedConvBlock,
get_dropout_layer)
from ..utils import checkarg_backbone, checkarg_dropout_variant
def recnet_pin(
backbone_block,
n_channels,
n_aux_channels,
hr_size,
time_window,
# ----- below are parameters that shall be tweaked by the user -----
n_channels_out=1,
n_filters=8,
n_blocks=6,
normalization=None,
dropout_rate=0,
dropout_variant=None,
attention=False,
activation='relu',
output_activation=None,
localcon_layer=False):
"""
Recurrent deep neural network with different backbone architectures
(according to the ``backbone_block``) and pre-upsampling via interpolation
(the samples are expected to be interpolated to the HR grid). This model is
capable of exploiting spatio-temporal samples.
The interpolation method depends on the ``interpolation`` argument used in
the training procedure (which is passed to the DataGenerator).
Parameters
----------
backbone_block : str
Backbone type. One of dl4ds.BACKBONE_BLOCKS. WARNING: this parameter is
not supposed to be set by the user. It's set internallly through
dl4ds.Trainers.
n_channels : int
Number of channels/variables in each sample. WARNING: this parameter is
not supposed to be set by the user. It's set internallly through
dl4ds.Trainers.
n_aux_channels : int
Number of auxiliary channels. WARNING: this parameter is not supposed to
be set by the user. It's set internallly through dl4ds.Trainers.
hr_size : tuple
Height and width of the HR grid. WARNING: this parameter is not supposed
to be set by the user. It's set internallly through dl4ds.Trainers.
time_window : int
Temporal window or number of time steps in each sample. WARNING: this
parameter is not supposed to be set by the user. It's set internallly
through dl4ds.Trainers.
n_filters : int, optional
Number of convolutional filters in RecurrentConvBlock. `n_filters` sets
the number of output filters in the convolution inside the ConvLSTM unit.
n_blocks : int, optional
Number of recurrent convolutional blocks (RecurrentConvBlock).
Sets the depth of the network.
normalization : str or None, optional
Normalization method in the residual or dense block. Can be either 'bn'
for BatchNormalization or 'ln' for LayerNormalization. If None, then no
normalization is performed (eg., for the 'resnet' backbone this results
in the EDSR-style residual block).
dropout_rate : float, optional
Float between 0 and 1. Fraction of the input units to drop. If 0 then no
dropout is applied.
dropout_variant : str or None, optional
Type of dropout. Defined in dl4ds.DROPOUT_VARIANTS variable.
attention : bool, optional
If True, dl4ds.ChannelAttention2D is used in convolutional blocks.
activation : str, optional
Activation function to use, as supported by tf.keras. E.g., 'relu' or
'gelu'.
output_activation : str, optional
Activation function to use in the last ConvBlock. Useful to constraint
the values distribution of the output grid.
localcon_layer : bool, optional
If True, the LocalizedConvBlock is activated in the output module.
"""
backbone_block = checkarg_backbone(backbone_block)
dropout_variant = checkarg_dropout_variant(dropout_variant)
auxvar_array_is_given = True if n_aux_channels > 0 else False
h_hr, w_hr = hr_size
if not localcon_layer:
x_in = Input(shape=(None, None, None, n_channels))
else:
x_in = Input(shape=(None, h_hr, w_hr, n_channels))
init_n_filters = n_filters
x = b = RecurrentConvBlock(n_filters, activation=activation,
normalization=normalization)(x_in)
for i in range(n_blocks):
b = RecurrentConvBlock(n_filters, activation=activation,
normalization=normalization, dropout_rate=dropout_rate,
dropout_variant=dropout_variant, name_suffix=str(i + 2))(b)
b = get_dropout_layer(dropout_rate, dropout_variant, dim=3)(b)
if backbone_block == 'convnet':
x = b
elif backbone_block == 'resnet':
x = Add()([x, b])
elif backbone_block == 'densenet':
x = Concatenate()([x, b])
#---------------------------------------------------------------------------
# HR aux channels are processed
if auxvar_array_is_given:
s_in = Input(shape=(None, None, n_aux_channels))
s = ConvBlock(n_filters, activation=activation, dropout_rate=0,
normalization=None, attention=attention)(s_in)
s = tf.expand_dims(s, 1)
s = tf.repeat(s, time_window, axis=1)
x = Concatenate()([x, s])
#---------------------------------------------------------------------------
# Localized convolutional layer
if localcon_layer:
lcb = LocalizedConvBlock(filters=2, use_bias=True)
lws = TimeDistributed(lcb, name='localized_conv_block')(x)
x = Concatenate()([x, lws])
#---------------------------------------------------------------------------
# Last conv layers
x = TransitionBlock(init_n_filters, name='TransitionLast')(x)
x = ConvBlock(init_n_filters, activation=None, dropout_rate=dropout_rate,
normalization=normalization, attention=True)(x)
x = ConvBlock(n_channels_out, activation=output_activation, dropout_rate=0,
normalization=normalization, attention=False)(x)
model_name = 'rec' + backbone_block + '_pin'
if auxvar_array_is_given:
return Model(inputs=[x_in, s_in], outputs=x, name=model_name)
else:
return Model(inputs=[x_in], outputs=x, name=model_name)
| [
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.TimeDistributed",
"tensorflow.expand_dims",
"tensorflow.repeat",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Input"
] | dl4ds/models/spt_preups.py | [(91, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, None, None, n_channels)'}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n'), (93, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, h_hr, w_hr, n_channels)'}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n'), (117, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(None, None, n_aux_channels)'}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n'), (120, 'tensorflow.expand_dims', 'tf.expand_dims', (['s', '(1)'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.repeat', 'tf.repeat', (['s', 'time_window'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[x_in, s_in]', 'outputs': 'x', 'name': 'model_name'}), False, 'from tensorflow.keras.models import Model\n'), (144, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[x_in]', 'outputs': 'x', 'name': 'model_name'}), False, 'from tensorflow.keras.models import Model\n'), (122, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n'), (128, 'tensorflow.keras.layers.TimeDistributed', 'TimeDistributed', (['lcb'], {'name': '"""localized_conv_block"""'}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n'), (129, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n'), (110, 'tensorflow.keras.layers.Add', 'Add', ([], {}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n'), (112, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Add, Conv2D, Input, Concatenate, TimeDistributed\n')] |
fwgg8547/deeplean_mc | 1b858e59caf082df0cd4b1ca12dc21875fb00b26 | from absl import app, flags, logging
from absl.flags import FLAGS
import tensorflow as tf
import numpy as np
import cv2
from tensorflow.keras.callbacks import (
ReduceLROnPlateau,
EarlyStopping,
ModelCheckpoint,
TensorBoard
)
from yolov3_tf2.models import (
YoloV3, YoloV3Tiny, YoloLoss,
yolo_anchors, yolo_anchor_masks,
yolo_tiny_anchors, yolo_tiny_anchor_masks
)
from yolov3_tf2.utils import freeze_all
import yolov3_tf2.dataset as dataset
flags.DEFINE_string('dataset', '', 'path to dataset')
flags.DEFINE_string('val_dataset', '', 'path to validation dataset')
flags.DEFINE_boolean('tiny', False, 'yolov3 or yolov3-tiny')
flags.DEFINE_string('weights', './checkpoints/mine.tf',
'path to weights file')
flags.DEFINE_string('classes', './data/vocmine.names', 'path to classes file')
flags.DEFINE_enum('mode', 'fit', ['fit', 'eager_fit', 'eager_tf'],
'fit: model.fit, '
'eager_fit: model.fit(run_eagerly=True), '
'eager_tf: custom GradientTape')
flags.DEFINE_enum('transfer', 'none',
['none', 'darknet', 'no_output', 'frozen', 'fine_tune'],
'none: Training from scratch, '
'darknet: Transfer darknet, '
'no_output: Transfer all but output, '
'frozen: Transfer and freeze all, '
'fine_tune: Transfer all and freeze darknet only')
flags.DEFINE_integer('size', 416, 'image size')
flags.DEFINE_integer('epochs', 2, 'number of epochs')
flags.DEFINE_integer('batch_size', 8, 'batch size')
flags.DEFINE_float('learning_rate', 1e-3, 'learning rate')
flags.DEFINE_integer('num_classes', 80, 'number of classes in the model')
flags.DEFINE_integer('weights_num_classes', None, 'specify num class for `weights` file if different, '
'useful in transfer learning with different number of classes')
def main(_argv):
physical_devices = tf.config.experimental.list_physical_devices('GPU')
for physical_device in physical_devices:
tf.config.experimental.set_memory_growth(physical_device, True)
if FLAGS.tiny:
model = YoloV3Tiny(FLAGS.size, training=True,
classes=FLAGS.num_classes)
anchors = yolo_tiny_anchors
anchor_masks = yolo_tiny_anchor_masks
else:
model = YoloV3(FLAGS.size, training=True, classes=FLAGS.num_classes)
anchors = yolo_anchors
anchor_masks = yolo_anchor_masks
if FLAGS.dataset:
train_dataset = dataset.load_tfrecord_dataset(
FLAGS.dataset, FLAGS.classes, FLAGS.size)
else:
train_dataset = dataset.load_fake_dataset()
train_dataset = train_dataset.shuffle(buffer_size=512)
train_dataset = train_dataset.batch(FLAGS.batch_size)
train_dataset = train_dataset.map(lambda x, y: (
dataset.transform_images(x, FLAGS.size),
dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
train_dataset = train_dataset.prefetch(
buffer_size=tf.data.experimental.AUTOTUNE)
if FLAGS.val_dataset:
val_dataset = dataset.load_tfrecord_dataset(
FLAGS.val_dataset, FLAGS.classes, FLAGS.size)
else:
val_dataset = dataset.load_fake_dataset()
val_dataset = val_dataset.batch(FLAGS.batch_size)
val_dataset = val_dataset.map(lambda x, y: (
dataset.transform_images(x, FLAGS.size),
dataset.transform_targets(y, anchors, anchor_masks, FLAGS.size)))
# Configure the model for transfer learning
if FLAGS.transfer == 'none':
pass # Nothing to do
elif FLAGS.transfer in ['darknet', 'no_output']:
# Darknet transfer is a special case that works
# with incompatible number of classes
# reset top layers
if FLAGS.tiny:
model_pretrained = YoloV3Tiny(
FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)
else:
model_pretrained = YoloV3(
FLAGS.size, training=True, classes=FLAGS.weights_num_classes or FLAGS.num_classes)
model_pretrained.load_weights(FLAGS.weights)
if FLAGS.transfer == 'darknet':
model.get_layer('yolo_darknet').set_weights(
model_pretrained.get_layer('yolo_darknet').get_weights())
freeze_all(model.get_layer('yolo_darknet'))
elif FLAGS.transfer == 'no_output':
for l in model.layers:
if not l.name.startswith('yolo_output'):
l.set_weights(model_pretrained.get_layer(
l.name).get_weights())
freeze_all(l)
else:
# All other transfer require matching classes
model.load_weights(FLAGS.weights)
if FLAGS.transfer == 'fine_tune':
# freeze darknet and fine tune other layers
darknet = model.get_layer('yolo_darknet')
freeze_all(darknet)
elif FLAGS.transfer == 'frozen':
# freeze everything
freeze_all(model)
optimizer = tf.keras.optimizers.Adam(lr=FLAGS.learning_rate)
loss = [YoloLoss(anchors[mask], classes=FLAGS.num_classes)
for mask in anchor_masks]
if FLAGS.mode == 'eager_tf':
# Eager mode is great for debugging
# Non eager graph mode is recommended for real training
avg_loss = tf.keras.metrics.Mean('loss', dtype=tf.float32)
avg_val_loss = tf.keras.metrics.Mean('val_loss', dtype=tf.float32)
for epoch in range(1, FLAGS.epochs + 1):
for batch, (images, labels) in enumerate(train_dataset):
with tf.GradientTape() as tape:
outputs = model(images, training=True)
regularization_loss = tf.reduce_sum(model.losses)
pred_loss = []
for output, label, loss_fn in zip(outputs, labels, loss):
pred_loss.append(loss_fn(label, output))
total_loss = tf.reduce_sum(pred_loss) + regularization_loss
grads = tape.gradient(total_loss, model.trainable_variables)
optimizer.apply_gradients(
zip(grads, model.trainable_variables))
logging.info("{}_train_{}, {}, {}".format(
epoch, batch, total_loss.numpy(),
list(map(lambda x: np.sum(x.numpy()), pred_loss))))
avg_loss.update_state(total_loss)
for batch, (images, labels) in enumerate(val_dataset):
outputs = model(images)
regularization_loss = tf.reduce_sum(model.losses)
pred_loss = []
for output, label, loss_fn in zip(outputs, labels, loss):
pred_loss.append(loss_fn(label, output))
total_loss = tf.reduce_sum(pred_loss) + regularization_loss
logging.info("{}_val_{}, {}, {}".format(
epoch, batch, total_loss.numpy(),
list(map(lambda x: np.sum(x.numpy()), pred_loss))))
avg_val_loss.update_state(total_loss)
logging.info("{}, train: {}, val: {}".format(
epoch,
avg_loss.result().numpy(),
avg_val_loss.result().numpy()))
avg_loss.reset_states()
avg_val_loss.reset_states()
model.save_weights(
'checkpoints/yolov3_train_{}.tf'.format(epoch))
else:
model.compile(optimizer=optimizer, loss=loss,
run_eagerly=(FLAGS.mode == 'eager_fit'))
callbacks = [
ReduceLROnPlateau(verbose=1),
EarlyStopping(patience=3, verbose=1),
ModelCheckpoint('checkpoints/yolov3_train_{epoch}.tf',
verbose=1, save_weights_only=True),
TensorBoard(log_dir='logs')
]
history = model.fit(train_dataset,
epochs=FLAGS.epochs,
callbacks=callbacks,
validation_data=val_dataset)
if __name__ == '__main__':
try:
app.run(main)
except SystemExit:
pass
| [
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.reduce_sum",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
] | trainmine.py | [(21, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""dataset"""', '""""""', '"""path to dataset"""'], {}), False, 'from absl import app, flags, logging\n'), (22, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""val_dataset"""', '""""""', '"""path to validation dataset"""'], {}), False, 'from absl import app, flags, logging\n'), (23, 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""tiny"""', '(False)', '"""yolov3 or yolov3-tiny"""'], {}), False, 'from absl import app, flags, logging\n'), (24, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""weights"""', '"""./checkpoints/mine.tf"""', '"""path to weights file"""'], {}), False, 'from absl import app, flags, logging\n'), (26, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""classes"""', '"""./data/vocmine.names"""', '"""path to classes file"""'], {}), False, 'from absl import app, flags, logging\n'), (27, 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""mode"""', '"""fit"""', "['fit', 'eager_fit', 'eager_tf']", '"""fit: model.fit, eager_fit: model.fit(run_eagerly=True), eager_tf: custom GradientTape"""'], {}), False, 'from absl import app, flags, logging\n'), (31, 'absl.flags.DEFINE_enum', 'flags.DEFINE_enum', (['"""transfer"""', '"""none"""', "['none', 'darknet', 'no_output', 'frozen', 'fine_tune']", '"""none: Training from scratch, darknet: Transfer darknet, no_output: Transfer all but output, frozen: Transfer and freeze all, fine_tune: Transfer all and freeze darknet only"""'], {}), False, 'from absl import app, flags, logging\n'), (38, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""size"""', '(416)', '"""image size"""'], {}), False, 'from absl import app, flags, logging\n'), (39, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""epochs"""', '(2)', '"""number of epochs"""'], {}), False, 'from absl import app, flags, logging\n'), (40, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""batch_size"""', '(8)', '"""batch size"""'], {}), False, 'from absl import app, flags, logging\n'), (41, 'absl.flags.DEFINE_float', 'flags.DEFINE_float', (['"""learning_rate"""', '(0.001)', '"""learning rate"""'], {}), False, 'from absl import app, flags, logging\n'), (42, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_classes"""', '(80)', '"""number of classes in the model"""'], {}), False, 'from absl import app, flags, logging\n'), (43, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""weights_num_classes"""', 'None', '"""specify num class for `weights` file if different, useful in transfer learning with different number of classes"""'], {}), False, 'from absl import app, flags, logging\n'), (48, 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'FLAGS.learning_rate'}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_device', '(True)'], {}), True, 'import tensorflow as tf\n'), (53, 'yolov3_tf2.models.YoloV3Tiny', 'YoloV3Tiny', (['FLAGS.size'], {'training': '(True)', 'classes': 'FLAGS.num_classes'}), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny, YoloLoss, yolo_anchors, yolo_anchor_masks, yolo_tiny_anchors, yolo_tiny_anchor_masks\n'), (58, 'yolov3_tf2.models.YoloV3', 'YoloV3', (['FLAGS.size'], {'training': '(True)', 'classes': 'FLAGS.num_classes'}), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny, YoloLoss, yolo_anchors, yolo_anchor_masks, yolo_tiny_anchors, yolo_tiny_anchor_masks\n'), (63, 'yolov3_tf2.dataset.load_tfrecord_dataset', 'dataset.load_tfrecord_dataset', (['FLAGS.dataset', 'FLAGS.classes', 'FLAGS.size'], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (66, 'yolov3_tf2.dataset.load_fake_dataset', 'dataset.load_fake_dataset', ([], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (76, 'yolov3_tf2.dataset.load_tfrecord_dataset', 'dataset.load_tfrecord_dataset', (['FLAGS.val_dataset', 'FLAGS.classes', 'FLAGS.size'], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (79, 'yolov3_tf2.dataset.load_fake_dataset', 'dataset.load_fake_dataset', ([], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (125, 'yolov3_tf2.models.YoloLoss', 'YoloLoss', (['anchors[mask]'], {'classes': 'FLAGS.num_classes'}), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny, YoloLoss, yolo_anchors, yolo_anchor_masks, yolo_tiny_anchors, yolo_tiny_anchor_masks\n'), (131, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""loss"""'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""val_loss"""'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (195, 'absl.app.run', 'app.run', (['main'], {}), False, 'from absl import app, flags, logging\n'), (180, 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (181, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': '(3)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (182, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['"""checkpoints/yolov3_train_{epoch}.tf"""'], {'verbose': '(1)', 'save_weights_only': '(True)'}), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (184, 'tensorflow.keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""logs"""'}), False, 'from tensorflow.keras.callbacks import ReduceLROnPlateau, EarlyStopping, ModelCheckpoint, TensorBoard\n'), (70, 'yolov3_tf2.dataset.transform_images', 'dataset.transform_images', (['x', 'FLAGS.size'], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (71, 'yolov3_tf2.dataset.transform_targets', 'dataset.transform_targets', (['y', 'anchors', 'anchor_masks', 'FLAGS.size'], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (82, 'yolov3_tf2.dataset.transform_images', 'dataset.transform_images', (['x', 'FLAGS.size'], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (83, 'yolov3_tf2.dataset.transform_targets', 'dataset.transform_targets', (['y', 'anchors', 'anchor_masks', 'FLAGS.size'], {}), True, 'import yolov3_tf2.dataset as dataset\n'), (94, 'yolov3_tf2.models.YoloV3Tiny', 'YoloV3Tiny', (['FLAGS.size'], {'training': '(True)', 'classes': '(FLAGS.weights_num_classes or FLAGS.num_classes)'}), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny, YoloLoss, yolo_anchors, yolo_anchor_masks, yolo_tiny_anchors, yolo_tiny_anchor_masks\n'), (97, 'yolov3_tf2.models.YoloV3', 'YoloV3', (['FLAGS.size'], {'training': '(True)', 'classes': '(FLAGS.weights_num_classes or FLAGS.num_classes)'}), False, 'from yolov3_tf2.models import YoloV3, YoloV3Tiny, YoloLoss, yolo_anchors, yolo_anchor_masks, yolo_tiny_anchors, yolo_tiny_anchor_masks\n'), (119, 'yolov3_tf2.utils.freeze_all', 'freeze_all', (['darknet'], {}), False, 'from yolov3_tf2.utils import freeze_all\n'), (155, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['model.losses'], {}), True, 'import tensorflow as tf\n'), (122, 'yolov3_tf2.utils.freeze_all', 'freeze_all', (['model'], {}), False, 'from yolov3_tf2.utils import freeze_all\n'), (136, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['model.losses'], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['pred_loss'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['pred_loss'], {}), True, 'import tensorflow as tf\n'), (111, 'yolov3_tf2.utils.freeze_all', 'freeze_all', (['l'], {}), False, 'from yolov3_tf2.utils import freeze_all\n')] |
DataLab-CQU/stellargraph | 5ca1e59e91cb6ac470bf19ff3da39b3a1a68650e | # -*- coding: utf-8 -*-
#
# Copyright 2018-2020 Data61, CSIRO
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
GAT tests
"""
import pytest
import scipy.sparse as sps
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Input
from stellargraph.mapper import (
FullBatchNodeGenerator,
FullBatchLinkGenerator,
GraphSAGENodeGenerator,
)
from stellargraph.layer import *
from ..test_utils.graphs import example_graph
from .. import test_utils
pytestmark = test_utils.ignore_stellargraph_experimental_mark
class Test_GraphAttention:
"""
Tests of GraphAttention layer
"""
N = 10
F_in = 5
F_out = 2
attn_heads = 8
activation = "relu"
layer = GraphAttention
def get_inputs(self):
x_inp = [
Input(batch_shape=(1, self.N, self.F_in)),
Input(batch_shape=(1, self.N, self.N)),
]
# duplicate input here for Test_GraphAttentionSparse to work
return x_inp, x_inp
def get_matrix(self, edges=[]):
# adjacency matrix with self-loops only
A = np.eye(self.N)
for e, v in edges:
A[e[0], e[1]] = v
return [A[None, :, :]]
def test_constructor(self):
# attn_heads_reduction = "concat":
layer = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
activation=self.activation,
)
assert layer.units == self.F_out
assert layer.attn_heads == self.attn_heads
assert layer.output_dim == self.F_out * self.attn_heads
assert layer.activation == keras.activations.get(self.activation)
# attn_heads_reduction = "average":
layer = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
)
assert layer.output_dim == self.F_out
# attn_heads_reduction = "ave":
with pytest.raises(ValueError):
self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="ave",
activation=self.activation,
)
def test_apply_concat(self):
gat = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
activation=self.activation,
kernel_initializer="ones",
)
x_inp, layer_inp = self.get_inputs()
# Instantiate layer with squeezed matrix
x_out = gat(layer_inp)
model = keras.Model(inputs=x_inp, outputs=x_out)
assert model.output_shape[-1] == self.F_out * self.attn_heads
As = self.get_matrix()
X = np.ones((1, self.N, self.F_in)) # features
expected = np.ones((self.N, self.F_out * self.attn_heads)) * self.F_in
actual = model.predict([X] + As)
np.testing.assert_allclose(actual.squeeze(), expected)
def test_apply_average(self):
gat = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
kernel_initializer="ones",
attn_kernel_initializer="zeros",
bias_initializer="zeros",
)
x_inp, layer_inp = self.get_inputs()
# Instantiate layer with squeezed matrix
x_out = gat(layer_inp)
model = keras.Model(inputs=x_inp, outputs=x_out)
assert model.output_shape[-1] == self.F_out
X = np.ones((1, self.N, self.F_in)) # features
for i in range(self.N):
X[:, i, :] = i + 1
As = self.get_matrix()
expected = (X * self.F_in)[..., : self.F_out]
actual = model.predict([X] + As)
np.testing.assert_allclose(actual.squeeze(), expected.squeeze())
def test_apply_average_with_neighbours(self):
gat_saliency = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
kernel_initializer="ones",
attn_kernel_initializer="zeros",
bias_initializer="zeros",
saliency_map_support=True,
)
gat_origin = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="average",
activation=self.activation,
kernel_initializer="ones",
attn_kernel_initializer="zeros",
bias_initializer="zeros",
saliency_map_support=False,
)
x_inp, layer_inp = self.get_inputs()
# Instantiate layer with squeezed matrix
x_out_saliency = gat_saliency(layer_inp)
x_out_origin = gat_origin(layer_inp)
model_origin = keras.Model(inputs=x_inp, outputs=x_out_origin)
model_saliency = keras.Model(inputs=x_inp, outputs=x_out_saliency)
assert model_origin.output_shape[-1] == self.F_out
assert model_saliency.output_shape[-1] == self.F_out
X = np.zeros((1, self.N, self.F_in)) # features
for i in range(self.N):
X[:, i, :] = i
As = self.get_matrix([((0, 1), 1), ((1, 0), 1)])
expected = (X * self.F_in)[..., : self.F_out]
expected[:, :2] = self.F_in / 2
actual_origin = model_origin.predict([X] + As)
actual_saliency = model_saliency.predict([X] + As)
np.testing.assert_allclose(expected, actual_origin)
np.testing.assert_allclose(expected, actual_saliency)
def test_layer_config(self):
layer = self.layer(
units=self.F_out,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
activation=self.activation,
)
conf = layer.get_config()
assert conf["units"] == self.F_out
assert conf["attn_heads"] == self.attn_heads
assert conf["attn_heads_reduction"] == "concat"
assert conf["activation"] == self.activation
assert conf["use_bias"] == True
assert conf["kernel_initializer"]["class_name"] == "GlorotUniform"
assert conf["bias_initializer"]["class_name"] == "Zeros"
assert conf["kernel_regularizer"] == None
assert conf["bias_regularizer"] == None
assert conf["kernel_constraint"] == None
assert conf["bias_constraint"] == None
class Test_GraphAttentionSparse(Test_GraphAttention):
"""
Tests of GraphAttentionSparse layer
"""
N = 10
F_in = 5
F_out = 2
attn_heads = 8
activation = "relu"
layer = GraphAttentionSparse
def get_inputs(self):
x_inp = [
Input(batch_shape=(1, self.N, self.F_in)),
Input(batch_shape=(1, None, 2), dtype="int64"),
Input(batch_shape=(1, None), dtype="float32"),
]
A_mat = SqueezedSparseConversion(shape=(self.N, self.N))(x_inp[1:])
# For dense matrix, remove batch dimension
layer_inp = x_inp[:1] + [A_mat]
return x_inp, layer_inp
def get_matrix(self, edges=[]):
# adjacency matrix with self-loops + edges
A_sparse = sps.eye(self.N, format="lil")
for e, v in edges:
A_sparse[e[0], e[1]] = v
# Extract indices & values to feed to tensorflow
A_sparse = A_sparse.tocoo()
A_indices = np.expand_dims(
np.hstack((A_sparse.row[:, None], A_sparse.col[:, None])), 0
)
A_values = np.expand_dims(A_sparse.data, 0)
return [A_indices, A_values]
class Test_GAT:
"""
Tests of GAT class
"""
N = 10
F_in = 5
F_out = 2
attn_heads = 8
layer_sizes = [4, 16]
activations = ["relu", "linear"]
sparse = False
method = "gat"
def test_constructor(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
# test default if no activations are passed:
gat = GAT(layer_sizes=self.layer_sizes, generator=gen, bias=True)
assert gat.activations == ["elu", "elu"]
# test error if too many activations:
with pytest.raises(ValueError):
gat = GAT(layer_sizes=[10], activations=self.activations, generator=gen)
# test error if too few activations:
with pytest.raises(ValueError):
gat = GAT(layer_sizes=[10, 10], activations=["relu"], generator=gen)
# test error where layer_sizes is not a list:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=10,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
# test error where layer_sizes values are not valid
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=[4, 0],
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
# test for incorrect length of att_heads list:
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=[8, 8, 1],
generator=gen,
bias=True,
)
# test for invalid values in att_heads list:
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=[8, 0],
generator=gen,
bias=True,
)
# test for invalid type of att_heads argument:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=8.0,
generator=gen,
bias=True,
)
# test error where activations is not a list:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations="relu",
generator=gen,
bias=True,
)
# test attn_heads_reduction errors:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction="concat",
generator=gen,
bias=True,
)
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction=["concat", "concat", "average"],
generator=gen,
bias=True,
)
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction=["concat", "sum"],
generator=gen,
bias=True,
)
# test error where len(activations) is not equal to len(layer_sizes):
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=["relu"],
generator=gen,
bias=True,
)
# Default attention heads reductions:
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
assert gat.activations == self.activations
assert gat.attn_heads_reduction == ["concat", "average"]
assert gat.generator == gen
# User-specified attention heads reductions:
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
attn_heads_reduction=["concat", "concat"],
generator=gen,
bias=True,
)
assert gat.attn_heads_reduction == ["concat", "concat"]
def test_gat_build_constructor(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
assert len(gat.in_out_tensors()) == 2
x_in, x_out = gat.in_out_tensors()
assert len(x_in) == 4 if self.sparse else 3
assert int(x_in[0].shape[-1]) == self.F_in
assert K.int_shape(x_in[-1]) == (1, G.number_of_nodes(), G.number_of_nodes())
assert int(x_out.shape[-1]) == self.layer_sizes[-1]
def test_gat_build_linkmodel_constructor(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchLinkGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
)
assert len(gat.in_out_tensors()) == 2
x_in, x_out = gat.in_out_tensors()
assert len(x_in) == 4 if self.sparse else 3
assert int(x_in[0].shape[-1]) == self.F_in
assert int(x_out.shape[-1]) == self.layer_sizes[-1]
def test_gat_build_constructor_no_generator(self):
G = example_graph(feature_size=self.F_in)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
bias=True,
num_nodes=1000,
num_features=self.F_in,
multiplicity=1,
)
assert gat.use_sparse == False
x_in, x_out = gat.in_out_tensors()
assert len(x_in) == 4 if self.sparse else 3
assert int(x_in[0].shape[-1]) == self.F_in
assert int(x_out.shape[-1]) == self.layer_sizes[-1]
def test_gat_build_constructor_wrong_generator(self):
G = example_graph(feature_size=self.F_in)
gen = GraphSAGENodeGenerator(G, self.N, [5, 10])
# test error where generator is of the wrong type for GAT:
with pytest.raises(TypeError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
bias=True,
generator=gen,
)
def test_gat_build_l2norm(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize="l2",
kernel_initializer="ones",
attn_kernel_initializer="ones",
)
x_in, x_out = gat.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
ng = gen.flow(G.nodes())
actual = model.predict(ng)
expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
1.0 / G.number_of_nodes()
)
np.testing.assert_allclose(expected, actual[0])
def test_gat_build_no_norm(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize=None,
kernel_initializer="ones",
attn_kernel_initializer="ones",
)
x_in, x_out = gat.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
ng = gen.flow(G.nodes())
actual = model.predict(ng)
expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
self.F_in
* self.layer_sizes[0]
* self.attn_heads
* np.max(G.node_features(G.nodes()))
)
np.testing.assert_allclose(expected, actual[0])
def test_gat_build_wrong_norm(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G)
with pytest.raises(ValueError):
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize="whatever",
)
def test_gat_serialize(self):
G = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(G, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
bias=True,
normalize="l2",
)
x_in, x_out = gat.in_out_tensors()
model = keras.Model(inputs=x_in, outputs=x_out)
ng = gen.flow(G.nodes())
# Save model
model_json = model.to_json()
# Set all weights to one
model_weights = [np.ones_like(w) for w in model.get_weights()]
# Load model from json & set all weights
model2 = keras.models.model_from_json(
model_json,
custom_objects={
"GraphAttention": GraphAttention,
"GatherIndices": GatherIndices,
},
)
model2.set_weights(model_weights)
# Test deserialized model
actual = model2.predict(ng)
expected = np.ones((G.number_of_nodes(), self.layer_sizes[-1])) * (
1.0 / G.number_of_nodes()
)
np.testing.assert_allclose(expected, actual[0])
def test_kernel_and_bias_defaults(self):
graph = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(graph, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
)
for layer in gat._layers:
if isinstance(layer, GraphAttention):
assert isinstance(
layer.kernel_initializer, tf.initializers.GlorotUniform
)
assert isinstance(layer.bias_initializer, tf.initializers.Zeros)
assert isinstance(
layer.attn_kernel_initializer, tf.initializers.GlorotUniform
)
assert layer.kernel_regularizer is None
assert layer.bias_regularizer is None
assert layer.attn_kernel_regularizer is None
assert layer.kernel_constraint is None
assert layer.bias_constraint is None
assert layer.attn_kernel_constraint is None
def test_save_load(self, tmpdir):
graph = example_graph(feature_size=self.F_in)
gen = FullBatchNodeGenerator(graph, sparse=self.sparse, method=self.method)
gat = GAT(
layer_sizes=self.layer_sizes,
activations=self.activations,
attn_heads=self.attn_heads,
generator=gen,
)
test_utils.model_save_load(tmpdir, gat)
def TestGATsparse(Test_GAT):
sparse = True
method = "gat"
| [
"scipy.sparse.eye",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.Model",
"tensorflow.keras.models.model_from_json",
"tensorflow.keras.activations.get",
"tensorflow.keras.layers.Input"
] | tests/layer/test_graph_attention.py | [(111, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'x_inp', 'outputs': 'x_out'}), False, 'from tensorflow import keras\n'), (138, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'x_inp', 'outputs': 'x_out'}), False, 'from tensorflow import keras\n'), (181, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'x_inp', 'outputs': 'x_out_origin'}), False, 'from tensorflow import keras\n'), (182, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'x_inp', 'outputs': 'x_out_saliency'}), False, 'from tensorflow import keras\n'), (249, 'scipy.sparse.eye', 'sps.eye', (['self.N'], {'format': '"""lil"""'}), True, 'import scipy.sparse as sps\n'), (277, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['G'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (414, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['G'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (432, 'stellargraph.mapper.FullBatchLinkGenerator', 'FullBatchLinkGenerator', (['G'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (467, 'stellargraph.mapper.GraphSAGENodeGenerator', 'GraphSAGENodeGenerator', (['G', 'self.N', '[5, 10]'], {}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (481, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['G'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (495, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'x_in', 'outputs': 'x_out'}), False, 'from tensorflow import keras\n'), (507, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['G'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (521, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'x_in', 'outputs': 'x_out'}), False, 'from tensorflow import keras\n'), (536, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['G'], {}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (549, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['G'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (560, 'tensorflow.keras.Model', 'keras.Model', ([], {'inputs': 'x_in', 'outputs': 'x_out'}), False, 'from tensorflow import keras\n'), (571, 'tensorflow.keras.models.model_from_json', 'keras.models.model_from_json', (['model_json'], {'custom_objects': "{'GraphAttention': GraphAttention, 'GatherIndices': GatherIndices}"}), False, 'from tensorflow import keras\n'), (589, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['graph'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (614, 'stellargraph.mapper.FullBatchNodeGenerator', 'FullBatchNodeGenerator', (['graph'], {'sparse': 'self.sparse', 'method': 'self.method'}), False, 'from stellargraph.mapper import FullBatchNodeGenerator, FullBatchLinkGenerator, GraphSAGENodeGenerator\n'), (53, 'tensorflow.keras.layers.Input', 'Input', ([], {'batch_shape': '(1, self.N, self.F_in)'}), False, 'from tensorflow.keras.layers import Input\n'), (54, 'tensorflow.keras.layers.Input', 'Input', ([], {'batch_shape': '(1, self.N, self.N)'}), False, 'from tensorflow.keras.layers import Input\n'), (78, 'tensorflow.keras.activations.get', 'keras.activations.get', (['self.activation'], {}), False, 'from tensorflow import keras\n'), (90, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (235, 'tensorflow.keras.layers.Input', 'Input', ([], {'batch_shape': '(1, self.N, self.F_in)'}), False, 'from tensorflow.keras.layers import Input\n'), (236, 'tensorflow.keras.layers.Input', 'Input', ([], {'batch_shape': '(1, None, 2)', 'dtype': '"""int64"""'}), False, 'from tensorflow.keras.layers import Input\n'), (237, 'tensorflow.keras.layers.Input', 'Input', ([], {'batch_shape': '(1, None)', 'dtype': '"""float32"""'}), False, 'from tensorflow.keras.layers import Input\n'), (283, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (287, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (291, 'pytest.raises', 'pytest.raises', (['TypeError'], {}), False, 'import pytest\n'), (301, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (311, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (321, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (331, 'pytest.raises', 'pytest.raises', (['TypeError'], {}), False, 'import pytest\n'), (341, 'pytest.raises', 'pytest.raises', (['TypeError'], {}), False, 'import pytest\n'), (350, 'pytest.raises', 'pytest.raises', (['TypeError'], {}), False, 'import pytest\n'), (359, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (368, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (379, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n'), (427, 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['x_in[-1]'], {}), True, 'from tensorflow.keras import backend as K\n'), (470, 'pytest.raises', 'pytest.raises', (['TypeError'], {}), False, 'import pytest\n'), (537, 'pytest.raises', 'pytest.raises', (['ValueError'], {}), False, 'import pytest\n')] |
hjkim-haga/TF-OD-API | 22ac477ff4dfb93fe7a32c94b5f0b1e74330902b | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""RetinaNet task definition."""
from typing import Any, Optional, List, Tuple, Mapping
from absl import logging
import tensorflow as tf
from official.common import dataset_fn
from official.core import base_task
from official.core import task_factory
from official.vision import keras_cv
from official.vision.beta.configs import retinanet as exp_cfg
from official.vision.beta.dataloaders import input_reader_factory
from official.vision.beta.dataloaders import retinanet_input
from official.vision.beta.dataloaders import tf_example_decoder
from official.vision.beta.dataloaders import tfds_factory
from official.vision.beta.dataloaders import tf_example_label_map_decoder
from official.vision.beta.evaluation import coco_evaluator
from official.vision.beta.modeling import factory
@task_factory.register_task_cls(exp_cfg.RetinaNetTask)
class RetinaNetTask(base_task.Task):
"""A single-replica view of training procedure.
RetinaNet task provides artifacts for training/evalution procedures, including
loading/iterating over Datasets, initializing the model, calculating the loss,
post-processing, and customized metrics with reduction.
"""
def build_model(self):
"""Build RetinaNet model."""
input_specs = tf.keras.layers.InputSpec(
shape=[None] + self.task_config.model.input_size)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory.build_retinanet(
input_specs=input_specs,
model_config=self.task_config.model,
l2_regularizer=l2_regularizer)
return model
def initialize(self, model: tf.keras.Model):
"""Loading pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.restore(ckpt_dir_or_file)
status.assert_consumed()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.restore(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Build input dataset."""
if params.tfds_name:
decoder = tfds_factory.get_detection_decoder(params.tfds_name)
else:
decoder_cfg = params.decoder.get()
if params.decoder.type == 'simple_decoder':
decoder = tf_example_decoder.TfExampleDecoder(
regenerate_source_id=decoder_cfg.regenerate_source_id)
elif params.decoder.type == 'label_map_decoder':
decoder = tf_example_label_map_decoder.TfExampleDecoderLabelMap(
label_map=decoder_cfg.label_map,
regenerate_source_id=decoder_cfg.regenerate_source_id)
else:
raise ValueError('Unknown decoder type: {}!'.format(
params.decoder.type))
parser = retinanet_input.Parser(
output_size=self.task_config.model.input_size[:2],
min_level=self.task_config.model.min_level,
max_level=self.task_config.model.max_level,
num_scales=self.task_config.model.anchor.num_scales,
aspect_ratios=self.task_config.model.anchor.aspect_ratios,
anchor_size=self.task_config.model.anchor.anchor_size,
dtype=params.dtype,
match_threshold=params.parser.match_threshold,
unmatched_threshold=params.parser.unmatched_threshold,
aug_rand_hflip=params.parser.aug_rand_hflip,
aug_scale_min=params.parser.aug_scale_min,
aug_scale_max=params.parser.aug_scale_max,
skip_crowd_during_training=params.parser.skip_crowd_during_training,
max_num_instances=params.parser.max_num_instances)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=dataset_fn.pick_dataset_fn(params.file_type),
decoder_fn=decoder.decode,
parser_fn=parser.parse_fn(params.is_training))
dataset = reader.read(input_context=input_context)
return dataset
def build_attribute_loss(self,
attribute_heads: List[exp_cfg.AttributeHead],
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
box_sample_weight: tf.Tensor) -> float:
"""Computes attribute loss.
Args:
attribute_heads: a list of attribute head configs.
outputs: RetinaNet model outputs.
labels: RetinaNet labels.
box_sample_weight: normalized bounding box sample weights.
Returns:
Attribute loss of all attribute heads.
"""
attribute_loss = 0.0
for head in attribute_heads:
if head.name not in labels['attribute_targets']:
raise ValueError(f'Attribute {head.name} not found in label targets.')
if head.name not in outputs['attribute_outputs']:
raise ValueError(f'Attribute {head.name} not found in model outputs.')
y_true_att = keras_cv.losses.multi_level_flatten(
labels['attribute_targets'][head.name], last_dim=head.size)
y_pred_att = keras_cv.losses.multi_level_flatten(
outputs['attribute_outputs'][head.name], last_dim=head.size)
if head.type == 'regression':
att_loss_fn = tf.keras.losses.Huber(
1.0, reduction=tf.keras.losses.Reduction.SUM)
att_loss = att_loss_fn(
y_true=y_true_att,
y_pred=y_pred_att,
sample_weight=box_sample_weight)
else:
raise ValueError(f'Attribute type {head.type} not supported.')
attribute_loss += att_loss
return attribute_loss
def build_losses(self,
outputs: Mapping[str, Any],
labels: Mapping[str, Any],
aux_losses: Optional[Any] = None):
"""Build RetinaNet losses."""
params = self.task_config
attribute_heads = self.task_config.model.head.attribute_heads
cls_loss_fn = keras_cv.losses.FocalLoss(
alpha=params.losses.focal_loss_alpha,
gamma=params.losses.focal_loss_gamma,
reduction=tf.keras.losses.Reduction.SUM)
box_loss_fn = tf.keras.losses.Huber(
params.losses.huber_loss_delta, reduction=tf.keras.losses.Reduction.SUM)
# Sums all positives in a batch for normalization and avoids zero
# num_positives_sum, which would lead to inf loss during training
cls_sample_weight = labels['cls_weights']
box_sample_weight = labels['box_weights']
num_positives = tf.reduce_sum(box_sample_weight) + 1.0
cls_sample_weight = cls_sample_weight / num_positives
box_sample_weight = box_sample_weight / num_positives
y_true_cls = keras_cv.losses.multi_level_flatten(
labels['cls_targets'], last_dim=None)
y_true_cls = tf.one_hot(y_true_cls, params.model.num_classes)
y_pred_cls = keras_cv.losses.multi_level_flatten(
outputs['cls_outputs'], last_dim=params.model.num_classes)
y_true_box = keras_cv.losses.multi_level_flatten(
labels['box_targets'], last_dim=4)
y_pred_box = keras_cv.losses.multi_level_flatten(
outputs['box_outputs'], last_dim=4)
cls_loss = cls_loss_fn(
y_true=y_true_cls, y_pred=y_pred_cls, sample_weight=cls_sample_weight)
box_loss = box_loss_fn(
y_true=y_true_box, y_pred=y_pred_box, sample_weight=box_sample_weight)
model_loss = cls_loss + params.losses.box_loss_weight * box_loss
if attribute_heads:
model_loss += self.build_attribute_loss(attribute_heads, outputs, labels,
box_sample_weight)
total_loss = model_loss
if aux_losses:
reg_loss = tf.reduce_sum(aux_losses)
total_loss = model_loss + reg_loss
return total_loss, cls_loss, box_loss, model_loss
def build_metrics(self, training: bool = True):
"""Build detection metrics."""
metrics = []
metric_names = ['total_loss', 'cls_loss', 'box_loss', 'model_loss']
for name in metric_names:
metrics.append(tf.keras.metrics.Mean(name, dtype=tf.float32))
if not training:
if self.task_config.validation_data.tfds_name and self.task_config.annotation_file:
raise ValueError(
"Can't evaluate using annotation file when TFDS is used.")
self.coco_metric = coco_evaluator.COCOEvaluator(
annotation_file=self.task_config.annotation_file,
include_mask=False,
per_category_metrics=self.task_config.per_category_metrics)
return metrics
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient when LossScaleOptimizer is used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = model(features, anchor_boxes=labels['anchor_boxes'],
image_shape=labels['image_info'][:, 1, :],
training=False)
loss, cls_loss, box_loss, model_loss = self.build_losses(
outputs=outputs, labels=labels, aux_losses=model.losses)
logs = {self.loss: loss}
all_losses = {
'total_loss': loss,
'cls_loss': cls_loss,
'box_loss': box_loss,
'model_loss': model_loss,
}
coco_model_outputs = {
'detection_boxes': outputs['detection_boxes'],
'detection_scores': outputs['detection_scores'],
'detection_classes': outputs['detection_classes'],
'num_detections': outputs['num_detections'],
'source_id': labels['groundtruths']['source_id'],
'image_info': labels['image_info']
}
logs.update({self.coco_metric.name: (labels['groundtruths'],
coco_model_outputs)})
if metrics:
for m in metrics:
m.update_state(all_losses[m.name])
logs.update({m.name: m.result()})
return logs
def aggregate_logs(self, state=None, step_outputs=None):
if state is None:
self.coco_metric.reset_states()
state = self.coco_metric
self.coco_metric.update_state(step_outputs[self.coco_metric.name][0],
step_outputs[self.coco_metric.name][1])
return state
def reduce_aggregated_logs(self, aggregated_logs, global_step=None):
return self.coco_metric.result()
| [
"tensorflow.io.gfile.isdir",
"tensorflow.train.latest_checkpoint",
"tensorflow.train.Checkpoint",
"tensorflow.keras.regularizers.l2",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.keras.losses.Huber",
"tensorflow.one_hot",
"tensorflow.distribute.get_strategy",
"tensorflow.keras.layers.InputSpec",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
] | official/vision/beta/tasks/retinanet.py | [(34, 'official.core.task_factory.register_task_cls', 'task_factory.register_task_cls', (['exp_cfg.RetinaNetTask'], {}), False, 'from official.core import task_factory\n'), (46, 'tensorflow.keras.layers.InputSpec', 'tf.keras.layers.InputSpec', ([], {'shape': '([None] + self.task_config.model.input_size)'}), True, 'import tensorflow as tf\n'), (56, 'official.vision.beta.modeling.factory.build_retinanet', 'factory.build_retinanet', ([], {'input_specs': 'input_specs', 'model_config': 'self.task_config.model', 'l2_regularizer': 'l2_regularizer'}), False, 'from official.vision.beta.modeling import factory\n'), (68, 'tensorflow.io.gfile.isdir', 'tf.io.gfile.isdir', (['ckpt_dir_or_file'], {}), True, 'import tensorflow as tf\n'), (84, 'absl.logging.info', 'logging.info', (['"""Finished loading pretrained checkpoint from %s"""', 'ckpt_dir_or_file'], {}), False, 'from absl import logging\n'), (107, 'official.vision.beta.dataloaders.retinanet_input.Parser', 'retinanet_input.Parser', ([], {'output_size': 'self.task_config.model.input_size[:2]', 'min_level': 'self.task_config.model.min_level', 'max_level': 'self.task_config.model.max_level', 'num_scales': 'self.task_config.model.anchor.num_scales', 'aspect_ratios': 'self.task_config.model.anchor.aspect_ratios', 'anchor_size': 'self.task_config.model.anchor.anchor_size', 'dtype': 'params.dtype', 'match_threshold': 'params.parser.match_threshold', 'unmatched_threshold': 'params.parser.unmatched_threshold', 'aug_rand_hflip': 'params.parser.aug_rand_hflip', 'aug_scale_min': 'params.parser.aug_scale_min', 'aug_scale_max': 'params.parser.aug_scale_max', 'skip_crowd_during_training': 'params.parser.skip_crowd_during_training', 'max_num_instances': 'params.parser.max_num_instances'}), False, 'from official.vision.beta.dataloaders import retinanet_input\n'), (180, 'official.vision.keras_cv.losses.FocalLoss', 'keras_cv.losses.FocalLoss', ([], {'alpha': 'params.losses.focal_loss_alpha', 'gamma': 'params.losses.focal_loss_gamma', 'reduction': 'tf.keras.losses.Reduction.SUM'}), False, 'from official.vision import keras_cv\n'), (184, 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', (['params.losses.huber_loss_delta'], {'reduction': 'tf.keras.losses.Reduction.SUM'}), True, 'import tensorflow as tf\n'), (194, 'official.vision.keras_cv.losses.multi_level_flatten', 'keras_cv.losses.multi_level_flatten', (["labels['cls_targets']"], {'last_dim': 'None'}), False, 'from official.vision import keras_cv\n'), (196, 'tensorflow.one_hot', 'tf.one_hot', (['y_true_cls', 'params.model.num_classes'], {}), True, 'import tensorflow as tf\n'), (197, 'official.vision.keras_cv.losses.multi_level_flatten', 'keras_cv.losses.multi_level_flatten', (["outputs['cls_outputs']"], {'last_dim': 'params.model.num_classes'}), False, 'from official.vision import keras_cv\n'), (199, 'official.vision.keras_cv.losses.multi_level_flatten', 'keras_cv.losses.multi_level_flatten', (["labels['box_targets']"], {'last_dim': '(4)'}), False, 'from official.vision import keras_cv\n'), (201, 'official.vision.keras_cv.losses.multi_level_flatten', 'keras_cv.losses.multi_level_flatten', (["outputs['box_outputs']"], {'last_dim': '(4)'}), False, 'from official.vision import keras_cv\n'), (53, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(l2_weight_decay / 2.0)'], {}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['ckpt_dir_or_file'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), True, 'import tensorflow as tf\n'), (93, 'official.vision.beta.dataloaders.tfds_factory.get_detection_decoder', 'tfds_factory.get_detection_decoder', (['params.tfds_name'], {}), False, 'from official.vision.beta.dataloaders import tfds_factory\n'), (155, 'official.vision.keras_cv.losses.multi_level_flatten', 'keras_cv.losses.multi_level_flatten', (["labels['attribute_targets'][head.name]"], {'last_dim': 'head.size'}), False, 'from official.vision import keras_cv\n'), (157, 'official.vision.keras_cv.losses.multi_level_flatten', 'keras_cv.losses.multi_level_flatten', (["outputs['attribute_outputs'][head.name]"], {'last_dim': 'head.size'}), False, 'from official.vision import keras_cv\n'), (191, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['box_sample_weight'], {}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['aux_losses'], {}), True, 'import tensorflow as tf\n'), (233, 'official.vision.beta.evaluation.coco_evaluator.COCOEvaluator', 'coco_evaluator.COCOEvaluator', ([], {'annotation_file': 'self.task_config.annotation_file', 'include_mask': '(False)', 'per_category_metrics': 'self.task_config.per_category_metrics'}), False, 'from official.vision.beta.evaluation import coco_evaluator\n'), (257, 'tensorflow.distribute.get_strategy', 'tf.distribute.get_strategy', ([], {}), True, 'import tensorflow as tf\n'), (258, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'backbone': 'model.backbone'}), True, 'import tensorflow as tf\n'), (97, 'official.vision.beta.dataloaders.tf_example_decoder.TfExampleDecoder', 'tf_example_decoder.TfExampleDecoder', ([], {'regenerate_source_id': 'decoder_cfg.regenerate_source_id'}), False, 'from official.vision.beta.dataloaders import tf_example_decoder\n'), (125, 'official.common.dataset_fn.pick_dataset_fn', 'dataset_fn.pick_dataset_fn', (['params.file_type'], {}), False, 'from official.common import dataset_fn\n'), (160, 'tensorflow.keras.losses.Huber', 'tf.keras.losses.Huber', (['(1.0)'], {'reduction': 'tf.keras.losses.Reduction.SUM'}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['name'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (100, 'official.vision.beta.dataloaders.tf_example_label_map_decoder.TfExampleDecoderLabelMap', 'tf_example_label_map_decoder.TfExampleDecoderLabelMap', ([], {'label_map': 'decoder_cfg.label_map', 'regenerate_source_id': 'decoder_cfg.regenerate_source_id'}), False, 'from official.vision.beta.dataloaders import tf_example_label_map_decoder\n'), (261, 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), True, 'import tensorflow as tf\n')] |
iascchen/ai_study_notes | 03f46c5e37670c10bd99000d979940db8878f36c | import os
import numpy as np
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras import layers
latent_dim = 32
height = 32
width = 32
channels = 3
# =========================================
generator_input = keras.Input(shape=(latent_dim,))
# First, transform the input into a 16x16 128-channels feature map
x = layers.Dense(128 * 16 * 16)(generator_input)
x = layers.LeakyReLU()(x)
x = layers.Reshape((16, 16, 128))(x)
# Then, add a convolution layer
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
# Upsample to 32x32
x = layers.Conv2DTranspose(256, 4, strides=2, padding='same')(x)
x = layers.LeakyReLU()(x)
# Few more conv layers
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(256, 5, padding='same')(x)
x = layers.LeakyReLU()(x)
# Produce a 32x32 1-channel feature map
x = layers.Conv2D(channels, 7, activation='tanh', padding='same')(x)
generator = keras.models.Model(generator_input, x)
generator.summary()
# =========================================
discriminator_input = layers.Input(shape=(height, width, channels))
x = layers.Conv2D(128, 3)(discriminator_input)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Conv2D(128, 4, strides=2)(x)
x = layers.LeakyReLU()(x)
x = layers.Flatten()(x)
# One dropout layer - important trick!
x = layers.Dropout(0.4)(x)
# Classification layer
x = layers.Dense(1, activation='sigmoid')(x)
discriminator = keras.models.Model(discriminator_input, x)
discriminator.summary()
# To stabilize training, we use learning rate decay
# and gradient clipping (by value) in the optimizer.
discriminator_optimizer = keras.optimizers.RMSprop(lr=0.0008, clipvalue=1.0, decay=1e-8)
discriminator.compile(optimizer=discriminator_optimizer, loss='binary_crossentropy')
# =========================================
# Set discriminator weights to non-trainable
# (will only apply to the `gan` model)
discriminator.trainable = False
gan_input = keras.Input(shape=(latent_dim,))
gan_output = discriminator(generator(gan_input))
gan = keras.models.Model(gan_input, gan_output)
gan_optimizer = keras.optimizers.RMSprop(lr=0.0004, clipvalue=1.0, decay=1e-8)
gan.compile(optimizer=gan_optimizer, loss='binary_crossentropy')
gan.summary()
# =========================================
# Load CIFAR10 data
(x_train, y_train), (_, _) = keras.datasets.cifar10.load_data()
# Select frog images (class 6)
x_train = x_train[y_train.flatten() == 6]
# Normalize data
x_train = x_train.reshape(
(x_train.shape[0],) + (height, width, channels)).astype('float32') / 255.
iterations = 10000
batch_size = 20
save_dir = './gan_images'
# Start training loop
start = 0
for step in range(iterations):
# Sample random points in the latent space
random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))
# Decode them to fake images
generated_images = generator.predict(random_latent_vectors)
# Combine them with real images
stop = start + batch_size
real_images = x_train[start: stop]
combined_images = np.concatenate([generated_images, real_images])
# Assemble labels discriminating real from fake images
labels = np.concatenate([np.ones((batch_size, 1)),
np.zeros((batch_size, 1))])
# Add random noise to the labels - important trick!
labels += 0.05 * np.random.random(labels.shape)
# Train the discriminator
d_loss = discriminator.train_on_batch(combined_images, labels)
# sample random points in the latent space
random_latent_vectors = np.random.normal(size=(batch_size, latent_dim))
# Assemble labels that say "all real images"
misleading_targets = np.zeros((batch_size, 1))
# Train the generator (via the gan model,
# where the discriminator weights are frozen)
a_loss = gan.train_on_batch(random_latent_vectors, misleading_targets)
start += batch_size
if start > len(x_train) - batch_size:
start = 0
# Occasionally save / plot
if step % 100 == 0:
# Save model weights
gan.save_weights('gan.h5')
# Print metrics
print('discriminator loss at step %s: %s' % (step, d_loss))
print('adversarial loss at step %s: %s' % (step, a_loss))
# Save one generated image
img = image.array_to_img(generated_images[0] * 255., scale=False)
img.save(os.path.join(save_dir, 'generated_frog' + str(step) + '.png'))
# Save one real image, for comparison
img = image.array_to_img(real_images[0] * 255., scale=False)
img.save(os.path.join(save_dir, 'real_frog' + str(step) + '.png'))
| [
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.Input",
"numpy.random.random",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.datasets.cifar10.load_data",
"numpy.ones",
"numpy.concatenate",
"numpy.random.normal",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.preprocessing.image.array_to_img",
"tensorflow.keras.layers.Flatten",
"numpy.zeros",
"tensorflow.keras.layers.Input"
] | src/study_keras/6_hello_gan/hello_gan_cifar10.py | [(15, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(latent_dim,)'}), True, 'import tensorflow.keras as keras\n'), (38, 'tensorflow.keras.models.Model', 'keras.models.Model', (['generator_input', 'x'], {}), True, 'import tensorflow.keras as keras\n'), (43, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': '(height, width, channels)'}), False, 'from tensorflow.keras import layers\n'), (60, 'tensorflow.keras.models.Model', 'keras.models.Model', (['discriminator_input', 'x'], {}), True, 'import tensorflow.keras as keras\n'), (65, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(0.0008)', 'clipvalue': '(1.0)', 'decay': '(1e-08)'}), True, 'import tensorflow.keras as keras\n'), (74, 'tensorflow.keras.Input', 'keras.Input', ([], {'shape': '(latent_dim,)'}), True, 'import tensorflow.keras as keras\n'), (76, 'tensorflow.keras.models.Model', 'keras.models.Model', (['gan_input', 'gan_output'], {}), True, 'import tensorflow.keras as keras\n'), (78, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {'lr': '(0.0004)', 'clipvalue': '(1.0)', 'decay': '(1e-08)'}), True, 'import tensorflow.keras as keras\n'), (86, 'tensorflow.keras.datasets.cifar10.load_data', 'keras.datasets.cifar10.load_data', ([], {}), True, 'import tensorflow.keras as keras\n'), (18, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(128 * 16 * 16)'], {}), False, 'from tensorflow.keras import layers\n'), (19, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (20, 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(16, 16, 128)'], {}), False, 'from tensorflow.keras import layers\n'), (23, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(256)', '(5)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (24, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (27, 'tensorflow.keras.layers.Conv2DTranspose', 'layers.Conv2DTranspose', (['(256)', '(4)'], {'strides': '(2)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (28, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (31, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(256)', '(5)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (32, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (33, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(256)', '(5)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (34, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (37, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['channels', '(7)'], {'activation': '"""tanh"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (44, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(3)'], {}), False, 'from tensorflow.keras import layers\n'), (45, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (46, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(4)'], {'strides': '(2)'}), False, 'from tensorflow.keras import layers\n'), (47, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (48, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(4)'], {'strides': '(2)'}), False, 'from tensorflow.keras import layers\n'), (49, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (50, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['(128)', '(4)'], {'strides': '(2)'}), False, 'from tensorflow.keras import layers\n'), (51, 'tensorflow.keras.layers.LeakyReLU', 'layers.LeakyReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (52, 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), False, 'from tensorflow.keras import layers\n'), (55, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['(0.4)'], {}), False, 'from tensorflow.keras import layers\n'), (58, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras import layers\n'), (103, 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, latent_dim)'}), True, 'import numpy as np\n'), (111, 'numpy.concatenate', 'np.concatenate', (['[generated_images, real_images]'], {}), True, 'import numpy as np\n'), (123, 'numpy.random.normal', 'np.random.normal', ([], {'size': '(batch_size, latent_dim)'}), True, 'import numpy as np\n'), (126, 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), True, 'import numpy as np\n'), (117, 'numpy.random.random', 'np.random.random', (['labels.shape'], {}), True, 'import numpy as np\n'), (146, 'tensorflow.keras.preprocessing.image.array_to_img', 'image.array_to_img', (['(generated_images[0] * 255.0)'], {'scale': '(False)'}), False, 'from tensorflow.keras.preprocessing import image\n'), (150, 'tensorflow.keras.preprocessing.image.array_to_img', 'image.array_to_img', (['(real_images[0] * 255.0)'], {'scale': '(False)'}), False, 'from tensorflow.keras.preprocessing import image\n'), (114, 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {}), True, 'import numpy as np\n'), (115, 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), True, 'import numpy as np\n')] |
garyxcheng/federated | ba7133ead6127af71ea9356e26bfd05c02f8324a | # Copyright 2021, Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import itertools
import os.path
from absl.testing import parameterized
import pandas as pd
import tensorflow as tf
from generalization.utils import centralized_training_loop
from generalization.utils import metric_utils
def create_dataset():
# Create a dataset with 4 examples:
dataset = tf.data.Dataset.from_tensor_slices((
[
[1.0, 2.0],
[3.0, 4.0],
],
[
[5.0],
[6.0],
],
))
# Repeat the dataset 2 times with batches of 3 examples,
# producing 3 minibatches (the last one with only 2 examples).
return dataset.repeat(3).batch(2)
def create_sequential_model(input_dims=2):
dense_layer = tf.keras.layers.Dense(
1,
kernel_initializer='zeros',
bias_initializer='zeros',
input_shape=(input_dims,),
name='dense') # specify names to facilitate testing.
return tf.keras.Sequential([dense_layer], name='sequential')
def compiled_keras_model(input_dims=2,
optimizer=tf.keras.optimizers.SGD(learning_rate=0.01)):
model = create_sequential_model(input_dims)
model.compile(
loss=tf.keras.losses.MeanSquaredError(),
optimizer=optimizer,
metrics=[tf.keras.metrics.MeanSquaredError()])
return model
class CentralizedTrainingLoopTest(tf.test.TestCase, parameterized.TestCase):
def assertMetricDecreases(self, metric, expected_len):
self.assertLen(metric, expected_len)
self.assertLess(metric[-1], metric[0])
@parameterized.named_parameters(
('train_train_eval={},train_val={},val={}'.format(*eval_fn_bools),
*eval_fn_bools)
for eval_fn_bools in itertools.product([False, True], repeat=3))
def test_training_reduces_loss(self, use_part_train_eval_fn, use_part_val_fn,
use_unpart_fn):
keras_model = compiled_keras_model()
dataset = create_dataset()
eval_fn = lambda model: model.evaluate(dataset, return_dict=True, verbose=0)
part_train_eval_fn = eval_fn if use_part_train_eval_fn else None
part_val_fn = eval_fn if use_part_val_fn else None
unpart_fn = eval_fn if use_unpart_fn else None
history = centralized_training_loop.run(
keras_model=keras_model,
train_dataset=dataset,
part_train_eval_fn=part_train_eval_fn,
part_val_fn=part_val_fn,
unpart_fn=unpart_fn,
num_epochs=5)
expected_metrics = ['loss', 'mean_squared_error',
'epoch_time_in_seconds'] # running training metrics
for eval_fn, prefix in ((part_train_eval_fn,
metric_utils.PART_TRAIN_EVAL_METRICS_PREFIX),
(part_val_fn, metric_utils.PART_VAL_METRICS_PREFIX),
(unpart_fn, metric_utils.UNPART_METRICS_PREFIX)):
if eval_fn is not None:
for metric in ('loss', 'mean_squared_error'):
prefixed_metric = prefix + metric
self.assertIn(prefixed_metric, history.history.keys())
self.assertMetricDecreases(
history.history[prefixed_metric], expected_len=5)
expected_metrics.append(prefixed_metric)
expected_metrics.append(prefix + metric_utils.TIME_KEY)
self.assertCountEqual(history.history.keys(), expected_metrics)
def test_lr_callback(self):
optimizer = tf.keras.optimizers.SGD(learning_rate=0.1)
keras_model = compiled_keras_model(optimizer=optimizer)
dataset = create_dataset()
history = centralized_training_loop.run(
keras_model=keras_model,
train_dataset=dataset,
num_epochs=10,
decay_epochs=8,
lr_decay=0.5)
self.assertCountEqual(
history.history.keys(),
['loss', 'mean_squared_error', 'epoch_time_in_seconds', 'lr'])
self.assertAllClose(history.history['lr'], [0.1] * 8 + [0.05] * 2)
class CentralizedTrainingLoopWithDefaultCallbacksTest(tf.test.TestCase,
parameterized.TestCase):
"""Integrated test with `metric_utils.configure_default_callbacks()`."""
def test_checkpoint_callback_can_restore(self):
keras_model = compiled_keras_model()
dataset = create_dataset()
exp_name = 'test_ckpt'
root_output_dir = self.get_temp_dir()
checkpoiont_callback, _ = metric_utils.configure_default_callbacks(
root_output_dir=root_output_dir,
experiment_name=exp_name,
epochs_per_checkpoint=1)
centralized_training_loop.run(
keras_model=keras_model,
train_dataset=dataset,
num_epochs=2,
checkpoint_callback=checkpoiont_callback)
self.assertTrue(tf.io.gfile.exists(root_output_dir))
ckpt_dir = os.path.join(root_output_dir, 'checkpoints', exp_name)
self.assertTrue(tf.io.gfile.exists(ckpt_dir))
restored_model = compiled_keras_model()
restored_model.load_weights(ckpt_dir)
self.assertAllEqual(
keras_model.get_config(),
restored_model.get_config(),
)
@parameterized.named_parameters(
('train_train_eval={},train_val={},val={},test={}'.format(*eval_fn_bools),
*eval_fn_bools)
for eval_fn_bools in itertools.product([False, True], repeat=4))
def test_writing_with_various_evaluation_combination_to_csv(
self, use_part_train_eval_fn, use_part_val_fn, use_unpart_fn,
use_test_fn):
keras_model = compiled_keras_model()
dataset = create_dataset()
exp_name = 'write_eval_metrics'
root_output_dir = self.get_temp_dir()
eval_fn = lambda model: model.evaluate(dataset, return_dict=True, verbose=0)
_, metrics_callbacks = metric_utils.configure_default_callbacks(
root_output_dir=root_output_dir,
experiment_name=exp_name,
epochs_per_checkpoint=1)
part_train_eval_fn = eval_fn if use_part_train_eval_fn else None
part_val_fn = eval_fn if use_part_val_fn else None
unpart_fn = eval_fn if use_unpart_fn else None
test_fn = eval_fn if use_test_fn else None
centralized_training_loop.run(
keras_model=keras_model,
train_dataset=dataset,
num_epochs=2,
part_train_eval_fn=part_train_eval_fn,
part_val_fn=part_val_fn,
unpart_fn=unpart_fn,
test_fn=test_fn,
metrics_callbacks=metrics_callbacks)
log_dir = os.path.join(root_output_dir, 'logdir', exp_name)
self.assertTrue(tf.io.gfile.exists(log_dir))
results_dir = os.path.join(root_output_dir, 'results', exp_name)
self.assertTrue(tf.io.gfile.exists(results_dir))
metrics_file = os.path.join(results_dir, 'experiment.metrics.csv')
self.assertTrue(tf.io.gfile.exists(metrics_file))
metrics_csv = pd.read_csv(metrics_file, index_col=0)
# Build expected columns.
expected_columns = ['loss', 'mean_squared_error',
'epoch_time_in_seconds'] # running training metrics
for eval_fn, prefix in ((part_train_eval_fn,
metric_utils.PART_TRAIN_EVAL_METRICS_PREFIX),
(part_val_fn, metric_utils.PART_VAL_METRICS_PREFIX),
(unpart_fn, metric_utils.UNPART_METRICS_PREFIX),
(test_fn, metric_utils.TEST_METRICS_PREFIX)):
if eval_fn is not None:
expected_columns.extend([
prefix + metric
for metric in ('loss', 'mean_squared_error', metric_utils.TIME_KEY)
])
expected_num_rows = 2 if test_fn is None else 3
self.assertEqual(metrics_csv.shape,
(expected_num_rows, len(expected_columns)))
self.assertCountEqual(metrics_csv.columns, expected_columns)
if __name__ == '__main__':
tf.test.main()
| [
"pandas.read_csv",
"tensorflow.io.gfile.exists",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.Sequential",
"tensorflow.test.main",
"tensorflow.keras.metrics.MeanSquaredError",
"tensorflow.keras.optimizers.SGD"
] | generalization/utils/centralized_training_loop_test.py | [(28, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['([[1.0, 2.0], [3.0, 4.0]], [[5.0], [6.0]])'], {}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {'kernel_initializer': '"""zeros"""', 'bias_initializer': '"""zeros"""', 'input_shape': '(input_dims,)', 'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['[dense_layer]'], {'name': '"""sequential"""'}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01)'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.test.main', 'tf.test.main', ([], {}), True, 'import tensorflow as tf\n'), (83, 'generalization.utils.centralized_training_loop.run', 'centralized_training_loop.run', ([], {'keras_model': 'keras_model', 'train_dataset': 'dataset', 'part_train_eval_fn': 'part_train_eval_fn', 'part_val_fn': 'part_val_fn', 'unpart_fn': 'unpart_fn', 'num_epochs': '(5)'}), False, 'from generalization.utils import centralized_training_loop\n'), (111, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.1)'}), True, 'import tensorflow as tf\n'), (114, 'generalization.utils.centralized_training_loop.run', 'centralized_training_loop.run', ([], {'keras_model': 'keras_model', 'train_dataset': 'dataset', 'num_epochs': '(10)', 'decay_epochs': '(8)', 'lr_decay': '(0.5)'}), False, 'from generalization.utils import centralized_training_loop\n'), (137, 'generalization.utils.metric_utils.configure_default_callbacks', 'metric_utils.configure_default_callbacks', ([], {'root_output_dir': 'root_output_dir', 'experiment_name': 'exp_name', 'epochs_per_checkpoint': '(1)'}), False, 'from generalization.utils import metric_utils\n'), (142, 'generalization.utils.centralized_training_loop.run', 'centralized_training_loop.run', ([], {'keras_model': 'keras_model', 'train_dataset': 'dataset', 'num_epochs': '(2)', 'checkpoint_callback': 'checkpoiont_callback'}), False, 'from generalization.utils import centralized_training_loop\n'), (172, 'generalization.utils.metric_utils.configure_default_callbacks', 'metric_utils.configure_default_callbacks', ([], {'root_output_dir': 'root_output_dir', 'experiment_name': 'exp_name', 'epochs_per_checkpoint': '(1)'}), False, 'from generalization.utils import metric_utils\n'), (182, 'generalization.utils.centralized_training_loop.run', 'centralized_training_loop.run', ([], {'keras_model': 'keras_model', 'train_dataset': 'dataset', 'num_epochs': '(2)', 'part_train_eval_fn': 'part_train_eval_fn', 'part_val_fn': 'part_val_fn', 'unpart_fn': 'unpart_fn', 'test_fn': 'test_fn', 'metrics_callbacks': 'metrics_callbacks'}), False, 'from generalization.utils import centralized_training_loop\n'), (199, 'pandas.read_csv', 'pd.read_csv', (['metrics_file'], {'index_col': '(0)'}), True, 'import pandas as pd\n'), (57, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['root_output_dir'], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['ckpt_dir'], {}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['log_dir'], {}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['results_dir'], {}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.io.gfile.exists', 'tf.io.gfile.exists', (['metrics_file'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.keras.metrics.MeanSquaredError', 'tf.keras.metrics.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (72, 'itertools.product', 'itertools.product', (['[False, True]'], {'repeat': '(3)'}), False, 'import itertools\n'), (162, 'itertools.product', 'itertools.product', (['[False, True]'], {'repeat': '(4)'}), False, 'import itertools\n')] |
BlackHC/uncertainty-baselines | 1a28be3e41e14d8ab74dfa1e3eed15f113718f03 | # coding=utf-8
# Copyright 2022 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Deterministic ViT."""
import functools
import itertools
import multiprocessing
import os
from absl import app
from absl import flags
from absl import logging
from clu import metric_writers
from clu import parameter_overview
from clu import periodic_actions
from clu import preprocess_spec
import flax
import jax
import jax.numpy as jnp
import ml_collections.config_flags
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
import uncertainty_baselines as ub
import checkpoint_utils # local file import from baselines.jft
import data_uncertainty_utils # local file import from baselines.jft
import input_utils # local file import from baselines.jft
import ood_utils # local file import from baselines.jft
import preprocess_utils # local file import from baselines.jft
import train_utils # local file import from baselines.jft
# TODO(dusenberrymw): Open-source remaining imports.
fewshot = None
ml_collections.config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string('output_dir', default=None, help='Work unit directory.')
flags.DEFINE_integer(
'num_cores', default=None, help='Unused. How many devices being used.')
flags.DEFINE_boolean(
'use_gpu', default=None, help='Unused. Whether or not running on GPU.')
flags.DEFINE_string('tpu', None,
'Unused. Name of the TPU. Only used if use_gpu is False.')
FLAGS = flags.FLAGS
def main(config, output_dir):
seed = config.get('seed', 0)
rng = jax.random.PRNGKey(seed)
tf.random.set_seed(seed)
if config.get('data_dir'):
logging.info('data_dir=%s', config.data_dir)
logging.info('Output dir: %s', output_dir)
tf.io.gfile.makedirs(output_dir)
save_checkpoint_path = None
if config.get('checkpoint_steps'):
save_checkpoint_path = os.path.join(output_dir, 'checkpoint.npz')
# Create an asynchronous multi-metric writer.
writer = metric_writers.create_default_writer(
output_dir, just_logging=jax.process_index() > 0)
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
def write_note(note):
if jax.process_index() == 0:
logging.info('NOTE: %s', note)
write_note('Initializing...')
# Verify settings to make sure no checkpoints are accidentally missed.
if config.get('keep_checkpoint_steps'):
assert config.get('checkpoint_steps'), 'Specify `checkpoint_steps`.'
assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (
f'`keep_checkpoint_steps` ({config.checkpoint_steps}) should be'
f'divisible by `checkpoint_steps ({config.checkpoint_steps}).`')
batch_size = config.batch_size
batch_size_eval = config.get('batch_size_eval', batch_size)
if (batch_size % jax.device_count() != 0 or
batch_size_eval % jax.device_count() != 0):
raise ValueError(f'Batch sizes ({batch_size} and {batch_size_eval}) must '
f'be divisible by device number ({jax.device_count()})')
local_batch_size = batch_size // jax.process_count()
local_batch_size_eval = batch_size_eval // jax.process_count()
logging.info(
'Global batch size %d on %d hosts results in %d local batch size. '
'With %d devices per host (%d devices total), that\'s a %d per-device '
'batch size.', batch_size, jax.process_count(), local_batch_size,
jax.local_device_count(), jax.device_count(),
local_batch_size // jax.local_device_count())
write_note('Initializing train dataset...')
rng, train_ds_rng = jax.random.split(rng)
train_ds_rng = jax.random.fold_in(train_ds_rng, jax.process_index())
train_ds = input_utils.get_data(
dataset=config.dataset,
split=config.train_split,
rng=train_ds_rng,
process_batch_size=local_batch_size,
preprocess_fn=preprocess_spec.parse(
spec=config.pp_train, available_ops=preprocess_utils.all_ops()),
shuffle_buffer_size=config.shuffle_buffer_size,
prefetch_size=config.get('prefetch_to_host', 2),
data_dir=config.get('data_dir'))
# Start prefetching already.
train_iter = input_utils.start_input_pipeline(
train_ds, config.get('prefetch_to_device', 1))
write_note('Initializing val dataset(s)...')
def _get_val_split(dataset, split, pp_eval, data_dir=None):
# We do ceil rounding such that we include the last incomplete batch.
nval_img = input_utils.get_num_examples(
dataset,
split=split,
process_batch_size=local_batch_size_eval,
drop_remainder=False,
data_dir=data_dir)
val_steps = int(np.ceil(nval_img / batch_size_eval))
logging.info('Running validation for %d steps for %s, %s', val_steps,
dataset, split)
if isinstance(pp_eval, str):
pp_eval = preprocess_spec.parse(
spec=pp_eval, available_ops=preprocess_utils.all_ops())
val_ds = input_utils.get_data(
dataset=dataset,
split=split,
rng=None,
process_batch_size=local_batch_size_eval,
preprocess_fn=pp_eval,
cache=config.get('val_cache', 'batched'),
num_epochs=1,
repeat_after_batching=True,
shuffle=False,
prefetch_size=config.get('prefetch_to_host', 2),
drop_remainder=False,
data_dir=data_dir)
return val_ds
val_ds_splits = {
'val':
_get_val_split(
config.dataset,
split=config.val_split,
pp_eval=config.pp_eval,
data_dir=config.get('data_dir'))
}
if config.get('test_split'):
val_ds_splits.update({
'test':
_get_val_split(
config.dataset,
split=config.test_split,
pp_eval=config.pp_eval,
data_dir=config.get('data_dir'))
})
if config.get('eval_on_cifar_10h'):
cifar10_to_cifar10h_fn = data_uncertainty_utils.create_cifar10_to_cifar10h_fn(
config.get('data_dir', None))
preprocess_fn = preprocess_spec.parse(
spec=config.pp_eval_cifar_10h, available_ops=preprocess_utils.all_ops())
pp_eval = lambda ex: preprocess_fn(cifar10_to_cifar10h_fn(ex))
val_ds_splits['cifar_10h'] = _get_val_split(
'cifar10',
split=config.get('cifar_10h_split') or 'test',
pp_eval=pp_eval,
data_dir=config.get('data_dir'))
elif config.get('eval_on_imagenet_real'):
imagenet_to_real_fn = data_uncertainty_utils.create_imagenet_to_real_fn()
preprocess_fn = preprocess_spec.parse(
spec=config.pp_eval_imagenet_real,
available_ops=preprocess_utils.all_ops())
pp_eval = lambda ex: preprocess_fn(imagenet_to_real_fn(ex))
val_ds_splits['imagenet_real'] = _get_val_split(
'imagenet2012_real',
split=config.get('imagenet_real_split') or 'validation',
pp_eval=pp_eval,
data_dir=config.get('data_dir'))
ood_ds = {}
if config.get('ood_datasets') and config.get('ood_methods'):
if config.get('ood_methods'): # config.ood_methods is not a empty list
logging.info('loading OOD dataset = %s', config.get('ood_datasets'))
ood_ds, ood_ds_names = ood_utils.load_ood_datasets(
config.dataset,
config.ood_datasets,
config.ood_split,
config.pp_eval,
config.pp_eval_ood,
config.ood_methods,
config.train_split,
config.get('data_dir'),
_get_val_split,
)
ntrain_img = input_utils.get_num_examples(
config.dataset,
split=config.train_split,
process_batch_size=local_batch_size,
data_dir=config.get('data_dir'))
steps_per_epoch = ntrain_img // batch_size
if config.get('num_epochs'):
total_steps = int(config.num_epochs * steps_per_epoch)
assert not config.get('total_steps'), 'Set either num_epochs or total_steps'
else:
total_steps = config.total_steps
logging.info('Total train data points: %d', ntrain_img)
logging.info(
'Running for %d steps, that means %f epochs and %d steps per epoch',
total_steps, total_steps * batch_size / ntrain_img, steps_per_epoch)
write_note('Initializing model...')
logging.info('config.model = %s', config.model)
model = ub.models.vision_transformer(
num_classes=config.num_classes, **config.model)
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@functools.partial(jax.jit, backend='cpu')
def init(rng):
image_size = tuple(train_ds.element_spec['image'].shape[2:])
logging.info('image_size = %s', image_size)
dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)
params = flax.core.unfreeze(model.init(rng, dummy_input,
train=False))['params']
# Set bias in the head to a low value, such that loss is small initially.
params['head']['bias'] = jnp.full_like(params['head']['bias'],
config.get('init_head_bias', 0))
# init head kernel to all zeros for fine-tuning
if config.get('model_init'):
params['head']['kernel'] = jnp.full_like(params['head']['kernel'], 0)
return params
rng, rng_init = jax.random.split(rng)
params_cpu = init(rng_init)
if jax.process_index() == 0:
num_params = sum(p.size for p in jax.tree_flatten(params_cpu)[0])
parameter_overview.log_parameter_overview(params_cpu)
writer.write_scalars(step=0, scalars={'num_params': num_params})
@functools.partial(jax.pmap, axis_name='batch')
def evaluation_fn(params, images, labels, mask):
"""Copy to deterministic_utils.py whenever changes are made!"""
# Ignore the entries with all zero labels for evaluation.
mask *= labels.max(axis=1)
logits, out = model.apply({'params': flax.core.freeze(params)},
images,
train=False)
label_indices = config.get('label_indices')
logging.info('!!! mask %s, label_indices %s', mask, label_indices)
if label_indices:
logits = logits[:, label_indices]
# Note that logits and labels are usually of the shape [batch,num_classes].
# But for OOD data, when num_classes_ood > num_classes_ind, we need to
# adjust labels to labels[:, :config.num_classes] to match the shape of
# logits. That is just to avoid shape mismatch. The output losses does not
# have any meaning for OOD data, because OOD not belong to any IND class.
losses = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(
logits=logits,
labels=labels[:, :(len(label_indices) if label_indices
else config.num_classes)], reduction=False)
loss = jax.lax.psum(losses * mask, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')
n = jax.lax.psum(mask, axis_name='batch')
metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
@functools.partial(jax.pmap, axis_name='batch')
def cifar_10h_evaluation_fn(params, images, labels, mask):
logits, out = model.apply({'params': flax.core.freeze(params)},
images,
train=False)
label_indices = config.get('label_indices')
if label_indices:
logits = logits[:, label_indices]
losses = getattr(train_utils, config.get('loss', 'softmax_xent'))(
logits=logits, labels=labels, reduction=False)
loss = jax.lax.psum(losses, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
one_hot_labels = jnp.eye(10)[jnp.argmax(labels, axis=1)]
top1_correct = jnp.take_along_axis(
one_hot_labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct, axis_name='batch')
n = jax.lax.psum(one_hot_labels, axis_name='batch')
metric_args = jax.lax.all_gather([logits, labels, out['pre_logits'], mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
# Setup function for computing representation.
@functools.partial(jax.pmap, axis_name='batch')
def representation_fn(params, images, labels, mask):
_, outputs = model.apply({'params': flax.core.freeze(params)},
images,
train=False)
representation = outputs[config.fewshot.representation_layer]
representation = jax.lax.all_gather(representation, 'batch')
labels = jax.lax.all_gather(labels, 'batch')
mask = jax.lax.all_gather(mask, 'batch')
return representation, labels, mask
# Load the optimizer from flax.
opt_name = config.get('optim_name')
write_note(f'Initializing {opt_name} optimizer...')
opt_def = getattr(flax.optim, opt_name)(**config.get('optim', {}))
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
opt_cpu = jax.jit(opt_def.create)(params_cpu)
weight_decay_rules = config.get('weight_decay', []) or []
rescale_value = config.lr.base if config.get('weight_decay_decouple') else 1.
weight_decay_fn = train_utils.get_weight_decay_fn(
weight_decay_rules=weight_decay_rules, rescale_value=rescale_value)
@functools.partial(jax.pmap, axis_name='batch', donate_argnums=(0,))
def update_fn(opt, lr, images, labels, rng):
"""Update step. Copy to deterministic_utils.py whenever changes are made!"""
measurements = {}
# Split rng and return next_rng for the following step.
rng, next_rng = jax.random.split(rng, 2)
rng_local = jax.random.fold_in(rng, jax.lax.axis_index('batch'))
def loss_fn(params, images, labels):
logits, _ = model.apply(
{'params': flax.core.freeze(params)}, images,
train=True, rngs={'dropout': rng_local})
label_indices = config.get('label_indices')
if label_indices:
logits = logits[:, label_indices]
loss = getattr(train_utils, config.get('loss', 'sigmoid_xent'))(
logits=logits, labels=labels)
return loss, logits
# Implementation considerations compared and summarized at
# https://docs.google.com/document/d/1g3kMEvqu1DOawaflKNyUsIoQ4yIVEoyE5ZlIPkIl4Lc/edit?hl=en#
(l, logits), g = train_utils.accumulate_gradient(
jax.value_and_grad(loss_fn, has_aux=True), opt.target, images, labels,
config.get('grad_accum_steps'))
l, g = jax.lax.pmean((l, g), axis_name='batch')
measurements['training_loss'] = l
# Log the gradient norm only if we need to compute it anyways (clipping)
# or if we don't use grad_accum_steps, as they interact badly.
if config.get('grad_accum_steps', 1) == 1 or config.get('grad_clip_norm'):
grads, _ = jax.tree_flatten(g)
l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads]))
measurements['l2_grads'] = l2_g
# Optionally resize the global gradient to a maximum norm. We found this
# useful in some cases across optimizers, hence it's in the main loop.
if config.get('grad_clip_norm'):
g_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)
g = jax.tree_util.tree_map(lambda p: g_factor * p, g)
opt = opt.apply_gradient(g, learning_rate=lr)
opt = opt.replace(target=weight_decay_fn(opt.target, lr))
params, _ = jax.tree_flatten(opt.target)
measurements['l2_params'] = jnp.sqrt(sum([jnp.vdot(p, p) for p in params]))
top1_idx = jnp.argmax(logits, axis=1)
top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]
prec1 = jax.lax.psum(jnp.sum(top1_correct), axis_name='batch') / batch_size
measurements['training_prec@1'] = prec1
measurements['learning_rate'] = lr
return opt, next_rng, measurements
reint_params = ('head/kernel', 'head/bias')
if config.get('only_eval', False) or not config.get('reint_head', True):
reint_params = []
checkpoint_data = checkpoint_utils.maybe_load_checkpoint(
train_loop_rngs=rng,
save_checkpoint_path=save_checkpoint_path,
init_optimizer=opt_cpu,
init_params=params_cpu,
init_fixed_model_states=None,
default_reinit_params=reint_params,
config=config)
train_loop_rngs = checkpoint_data.train_loop_rngs
opt_cpu = checkpoint_data.optimizer
accumulated_train_time = checkpoint_data.accumulated_train_time
write_note('Kicking off misc stuff...')
first_step = int(opt_cpu.state.step) # Might be a DeviceArray type.
if first_step == 0 and jax.process_index() == 0:
writer.write_hparams(dict(config))
chrono = train_utils.Chrono(
first_step, total_steps, batch_size, accumulated_train_time)
# Note: switch to ProfileAllHosts() if you need to profile all hosts.
# (Xprof data become much larger and take longer to load for analysis)
profiler = periodic_actions.Profile(
# Create profile after every restart to analyze pre-emption related
# problems and assure we get similar performance in every run.
logdir=output_dir, first_profile=first_step + 10)
# Prepare the learning-rate and pre-fetch it to device to avoid delays.
lr_fn = train_utils.create_learning_rate_schedule(total_steps,
**config.get('lr', {}))
# TODO(dusenberrymw): According to flax docs, prefetching shouldn't be
# necessary for TPUs.
lr_iter = train_utils.prefetch_scalar(
map(lr_fn, range(total_steps)), config.get('prefetch_to_device', 1))
write_note(f'Replicating...\n{chrono.note}')
opt_repl = flax.jax_utils.replicate(opt_cpu)
write_note(f'Initializing few-shotters...\n{chrono.note}')
fewshotter = None
if 'fewshot' in config and fewshot is not None:
fewshotter = fewshot.FewShotEvaluator(
representation_fn, config.fewshot,
config.fewshot.get('batch_size') or batch_size_eval)
checkpoint_writer = None
# Note: we return the train loss, val loss, and fewshot best l2s for use in
# reproducibility unit tests.
train_loss = -jnp.inf
val_loss = {val_name: -jnp.inf for val_name, _ in val_ds_splits.items()}
fewshot_results = {'dummy': {(0, 1): -jnp.inf}}
write_note(f'First step compilations...\n{chrono.note}')
logging.info('first_step = %s', first_step)
# Advance the iterators if we are restarting from an earlier checkpoint.
# TODO(dusenberrymw): Look into checkpointing dataset state instead.
if first_step > 0:
write_note('Advancing iterators after resuming from a checkpoint...')
lr_iter = itertools.islice(lr_iter, first_step, None)
train_iter = itertools.islice(train_iter, first_step, None)
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, train_batch, lr_repl in zip(
range(first_step + 1, total_steps + 1), train_iter, lr_iter):
with jax.profiler.TraceAnnotation('train_step', step_num=step, _r=1):
if not config.get('only_eval', False):
opt_repl, train_loop_rngs, extra_measurements = update_fn(
opt_repl,
lr_repl,
train_batch['image'],
train_batch['labels'],
rng=train_loop_rngs)
if jax.process_index() == 0:
profiler(step)
# Checkpoint saving
if not config.get('only_eval', False) and train_utils.itstime(
step, config.get('checkpoint_steps'), total_steps, process=0):
write_note('Checkpointing...')
chrono.pause()
train_utils.checkpointing_timeout(checkpoint_writer,
config.get('checkpoint_timeout', 1))
accumulated_train_time = chrono.accum_train_time
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see b/160593526). Also, takes device 0's params only.
opt_cpu = jax.tree_util.tree_map(lambda x: np.array(x[0]), opt_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if train_utils.itstime(step, config.get('keep_checkpoint_steps'),
total_steps):
write_note('Keeping a checkpoint copy...')
copy_step = step
# Checkpoint should be a nested dictionary or FLAX datataclasses from
# `flax.struct`. Both can be present in a checkpoint.
checkpoint_data = checkpoint_utils.CheckpointData(
train_loop_rngs=train_loop_rngs,
optimizer=opt_cpu,
accumulated_train_time=accumulated_train_time)
checkpoint_writer = pool.apply_async(
checkpoint_utils.checkpoint_trained_model,
(checkpoint_data, save_checkpoint_path, copy_step))
chrono.resume()
# Report training progress
if not config.get('only_eval', False) and train_utils.itstime(
step, config.log_training_steps, total_steps, process=0):
write_note('Reporting training progress...')
timing_measurements, note = chrono.tick(step)
write_note(note)
train_measurements = {}
train_measurements.update(flax.jax_utils.unreplicate(extra_measurements))
train_measurements.update(timing_measurements)
writer.write_scalars(step, train_measurements)
# Keep train_loss to return for reproducibility tests.
train_loss = train_measurements['training_loss']
# Report validation performance
if config.get('only_eval', False) or train_utils.itstime(
step, config.log_eval_steps, total_steps):
write_note('Evaluating on the validation set...')
chrono.pause()
for val_name, val_ds in val_ds_splits.items():
# Sets up evaluation metrics.
ece_num_bins = config.get('ece_num_bins', 15)
auc_num_bins = config.get('auc_num_bins', 1000)
ece = rm.metrics.ExpectedCalibrationError(num_bins=ece_num_bins)
calib_auc = rm.metrics.CalibrationAUC(correct_pred_as_pos_label=False)
oc_auc_0_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.005,
num_bins=auc_num_bins)
oc_auc_1 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.01,
num_bins=auc_num_bins)
oc_auc_2 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.02,
num_bins=auc_num_bins)
oc_auc_5 = rm.metrics.OracleCollaborativeAUC(oracle_fraction=0.05,
num_bins=auc_num_bins)
label_diversity = tf.keras.metrics.Mean()
sample_diversity = tf.keras.metrics.Mean()
ged = tf.keras.metrics.Mean()
# Runs evaluation loop.
val_iter = input_utils.start_input_pipeline(
val_ds, config.get('prefetch_to_device', 1))
ncorrect, loss, nseen = 0, 0, 0
for batch in val_iter:
if val_name == 'cifar_10h':
batch_ncorrect, batch_losses, batch_n, batch_metric_args = (
cifar_10h_evaluation_fn(opt_repl.target, batch['image'],
batch['labels'], batch['mask']))
else:
batch_ncorrect, batch_losses, batch_n, batch_metric_args = (
evaluation_fn(opt_repl.target, batch['image'],
batch['labels'], batch['mask']))
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical as they got psum'd.
# So let's just take the first one to the host as numpy.
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':
# Here we parse batch_metric_args to compute uncertainty metrics.
# (e.g., ECE or Calibration AUC).
logits, labels, _, masks = batch_metric_args
masks = np.array(masks[0], dtype=np.bool)
logits = np.array(logits[0])
probs = jax.nn.softmax(logits)
# From one-hot to integer labels, as required by ECE.
int_labels = np.argmax(np.array(labels[0]), axis=-1)
int_preds = np.argmax(logits, axis=-1)
confidence = np.max(probs, axis=-1)
for p, c, l, d, m, label in zip(probs, confidence, int_labels,
int_preds, masks, labels[0]):
ece.add_batch(p[m, :], label=l[m])
calib_auc.add_batch(d[m], label=l[m], confidence=c[m])
# TODO(jereliu): Extend to support soft multi-class probabilities.
oc_auc_0_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_1.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_2.add_batch(d[m], label=l[m], custom_binning_score=c[m])
oc_auc_5.add_batch(d[m], label=l[m], custom_binning_score=c[m])
if val_name == 'cifar_10h' or val_name == 'imagenet_real':
num_classes = config.num_classes
if config.get('label_indices'):
num_classes = len(config.get('label_indices'))
batch_label_diversity, batch_sample_diversity, batch_ged = data_uncertainty_utils.generalized_energy_distance(
label[m], p[m, :], num_classes)
label_diversity.update_state(batch_label_diversity)
sample_diversity.update_state(batch_sample_diversity)
ged.update_state(batch_ged)
val_loss[val_name] = loss / nseen # Keep for reproducibility tests.
val_measurements = {
f'{val_name}_prec@1': ncorrect / nseen,
f'{val_name}_loss': val_loss[val_name],
}
if config.get('loss', 'sigmoid_xent') != 'sigmoid_xent':
val_measurements[f'{val_name}_ece'] = ece.result()['ece']
val_measurements[f'{val_name}_calib_auc'] = calib_auc.result()[
'calibration_auc']
val_measurements[f'{val_name}_oc_auc_0.5%'] = oc_auc_0_5.result()[
'collaborative_auc']
val_measurements[f'{val_name}_oc_auc_1%'] = oc_auc_1.result()[
'collaborative_auc']
val_measurements[f'{val_name}_oc_auc_2%'] = oc_auc_2.result()[
'collaborative_auc']
val_measurements[f'{val_name}_oc_auc_5%'] = oc_auc_5.result()[
'collaborative_auc']
writer.write_scalars(step, val_measurements)
if val_name == 'cifar_10h' or val_name == 'imagenet_real':
cifar_10h_measurements = {
f'{val_name}_label_diversity': label_diversity.result(),
f'{val_name}_sample_diversity': sample_diversity.result(),
f'{val_name}_ged': ged.result(),
}
writer.write_scalars(step, cifar_10h_measurements)
# OOD eval
# Entries in the ood_ds dict include:
# (ind_dataset, ood_dataset1, ood_dataset2, ...).
# OOD metrics are computed using ind_dataset paired with each of the
# ood_dataset. When Mahalanobis distance method is applied, train_ind_ds
# is also included in the ood_ds.
if ood_ds and config.ood_methods:
ood_measurements = ood_utils.eval_ood_metrics(
ood_ds,
ood_ds_names,
config.ood_methods,
evaluation_fn,
opt_repl.target,
n_prefetch=config.get('prefetch_to_device', 1))
writer.write_scalars(step, ood_measurements)
chrono.resume()
if 'fewshot' in config and fewshotter is not None:
# Compute few-shot on-the-fly evaluation.
if config.get('only_eval', False) or train_utils.itstime(
step, config.fewshot.log_steps, total_steps):
chrono.pause()
write_note(f'Few-shot evaluation...\n{chrono.note}')
# Keep `results` to return for reproducibility tests.
fewshot_results, best_l2 = fewshotter.run_all(opt_repl.target,
config.fewshot.datasets)
# TODO(dusenberrymw): Remove this once fewshot.py is updated.
def make_writer_measure_fn(step):
def writer_measure(name, value):
writer.write_scalars(step, {name: value})
return writer_measure
fewshotter.walk_results(
make_writer_measure_fn(step), fewshot_results, best_l2)
chrono.resume()
if config.get('only_eval', False):
break
elif config.get('testing_failure_step'):
# Break early to simulate infra failures in test cases.
if config.testing_failure_step == step:
break
write_note(f'Done!\n{chrono.note}')
pool.close()
pool.join()
writer.close()
# Return final training loss, validation loss, and fewshot results for
# reproducibility test cases.
return train_loss, val_loss, fewshot_results
if __name__ == '__main__':
# Adds jax flags to the program.
jax.config.config_with_absl()
# TODO(dusenberrymw): Refactor `main` such that there is a `train_eval`
# function that returns values for tests and does not directly access flags,
# and then have `main` return None.
def _main(argv):
del argv
config = FLAGS.config
output_dir = FLAGS.output_dir
main(config, output_dir)
app.run(_main) # Ignore the returned values from `main`.
| [
"tensorflow.random.set_seed",
"tensorflow.io.gfile.makedirs",
"numpy.ceil",
"numpy.max",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.metrics.Mean"
] | baselines/jft/deterministic.py | [(51, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""'], {'default': 'None', 'help': '"""Work unit directory."""'}), False, 'from absl import flags\n'), (52, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_cores"""'], {'default': 'None', 'help': '"""Unused. How many devices being used."""'}), False, 'from absl import flags\n'), (54, 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_gpu"""'], {'default': 'None', 'help': '"""Unused. Whether or not running on GPU."""'}), False, 'from absl import flags\n'), (56, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tpu"""', 'None', '"""Unused. Name of the TPU. Only used if use_gpu is False."""'], {}), False, 'from absl import flags\n'), (65, 'jax.random.PRNGKey', 'jax.random.PRNGKey', (['seed'], {}), False, 'import jax\n'), (66, 'tensorflow.random.set_seed', 'tf.random.set_seed', (['seed'], {}), True, 'import tensorflow as tf\n'), (70, 'absl.logging.info', 'logging.info', (['"""Output dir: %s"""', 'output_dir'], {}), False, 'from absl import logging\n'), (71, 'tensorflow.io.gfile.makedirs', 'tf.io.gfile.makedirs', (['output_dir'], {}), True, 'import tensorflow as tf\n'), (82, 'multiprocessing.pool.ThreadPool', 'multiprocessing.pool.ThreadPool', ([], {}), False, 'import multiprocessing\n'), (114, 'jax.random.split', 'jax.random.split', (['rng'], {}), False, 'import jax\n'), (236, 'absl.logging.info', 'logging.info', (['"""Total train data points: %d"""', 'ntrain_img'], {}), False, 'from absl import logging\n'), (237, 'absl.logging.info', 'logging.info', (['"""Running for %d steps, that means %f epochs and %d steps per epoch"""', 'total_steps', '(total_steps * batch_size / ntrain_img)', 'steps_per_epoch'], {}), False, 'from absl import logging\n'), (242, 'absl.logging.info', 'logging.info', (['"""config.model = %s"""', 'config.model'], {}), False, 'from absl import logging\n'), (243, 'uncertainty_baselines.models.vision_transformer', 'ub.models.vision_transformer', ([], {'num_classes': 'config.num_classes'}), True, 'import uncertainty_baselines as ub\n'), (249, 'functools.partial', 'functools.partial', (['jax.jit'], {'backend': '"""cpu"""'}), False, 'import functools\n'), (267, 'jax.random.split', 'jax.random.split', (['rng'], {}), False, 'import jax\n'), (275, 'functools.partial', 'functools.partial', (['jax.pmap'], {'axis_name': '"""batch"""'}), False, 'import functools\n'), (309, 'functools.partial', 'functools.partial', (['jax.pmap'], {'axis_name': '"""batch"""'}), False, 'import functools\n'), (336, 'functools.partial', 'functools.partial', (['jax.pmap'], {'axis_name': '"""batch"""'}), False, 'import functools\n'), (358, 'train_utils.get_weight_decay_fn', 'train_utils.get_weight_decay_fn', ([], {'weight_decay_rules': 'weight_decay_rules', 'rescale_value': 'rescale_value'}), False, 'import train_utils\n'), (361, 'functools.partial', 'functools.partial', (['jax.pmap'], {'axis_name': '"""batch"""', 'donate_argnums': '(0,)'}), False, 'import functools\n'), (417, 'checkpoint_utils.maybe_load_checkpoint', 'checkpoint_utils.maybe_load_checkpoint', ([], {'train_loop_rngs': 'rng', 'save_checkpoint_path': 'save_checkpoint_path', 'init_optimizer': 'opt_cpu', 'init_params': 'params_cpu', 'init_fixed_model_states': 'None', 'default_reinit_params': 'reint_params', 'config': 'config'}), False, 'import checkpoint_utils\n'), (433, 'train_utils.Chrono', 'train_utils.Chrono', (['first_step', 'total_steps', 'batch_size', 'accumulated_train_time'], {}), False, 'import train_utils\n'), (437, 'clu.periodic_actions.Profile', 'periodic_actions.Profile', ([], {'logdir': 'output_dir', 'first_profile': '(first_step + 10)'}), False, 'from clu import periodic_actions\n'), (451, 'flax.jax_utils.replicate', 'flax.jax_utils.replicate', (['opt_cpu'], {}), False, 'import flax\n'), (469, 'absl.logging.info', 'logging.info', (['"""first_step = %s"""', 'first_step'], {}), False, 'from absl import logging\n'), (698, 'jax.config.config_with_absl', 'jax.config.config_with_absl', ([], {}), False, 'import jax\n'), (710, 'absl.app.run', 'app.run', (['_main'], {}), False, 'from absl import app\n'), (69, 'absl.logging.info', 'logging.info', (['"""data_dir=%s"""', 'config.data_dir'], {}), False, 'from absl import logging\n'), (75, 'os.path.join', 'os.path.join', (['output_dir', '"""checkpoint.npz"""'], {}), False, 'import os\n'), (104, 'jax.process_count', 'jax.process_count', ([], {}), False, 'import jax\n'), (105, 'jax.process_count', 'jax.process_count', ([], {}), False, 'import jax\n'), (109, 'jax.process_count', 'jax.process_count', ([], {}), False, 'import jax\n'), (110, 'jax.local_device_count', 'jax.local_device_count', ([], {}), False, 'import jax\n'), (110, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (115, 'jax.process_index', 'jax.process_index', ([], {}), False, 'import jax\n'), (135, 'input_utils.get_num_examples', 'input_utils.get_num_examples', (['dataset'], {'split': 'split', 'process_batch_size': 'local_batch_size_eval', 'drop_remainder': '(False)', 'data_dir': 'data_dir'}), False, 'import input_utils\n'), (142, 'absl.logging.info', 'logging.info', (['"""Running validation for %d steps for %s, %s"""', 'val_steps', 'dataset', 'split'], {}), False, 'from absl import logging\n'), (252, 'absl.logging.info', 'logging.info', (['"""image_size = %s"""', 'image_size'], {}), False, 'from absl import logging\n'), (253, 'jax.numpy.zeros', 'jnp.zeros', (['((local_batch_size,) + image_size)', 'jnp.float32'], {}), True, 'import jax.numpy as jnp\n'), (270, 'jax.process_index', 'jax.process_index', ([], {}), False, 'import jax\n'), (272, 'clu.parameter_overview.log_parameter_overview', 'parameter_overview.log_parameter_overview', (['params_cpu'], {}), False, 'from clu import parameter_overview\n'), (284, 'absl.logging.info', 'logging.info', (['"""!!! mask %s, label_indices %s"""', 'mask', 'label_indices'], {}), False, 'from absl import logging\n'), (297, 'jax.lax.psum', 'jax.lax.psum', (['(losses * mask)'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (299, 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (302, 'jax.lax.psum', 'jax.lax.psum', (['(top1_correct * mask)'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (303, 'jax.lax.psum', 'jax.lax.psum', (['mask'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (305, 'jax.lax.all_gather', 'jax.lax.all_gather', (["[logits, labels, out['pre_logits'], mask]"], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (320, 'jax.lax.psum', 'jax.lax.psum', (['losses'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (322, 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (328, 'jax.lax.psum', 'jax.lax.psum', (['top1_correct'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (329, 'jax.lax.psum', 'jax.lax.psum', (['one_hot_labels'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (331, 'jax.lax.all_gather', 'jax.lax.all_gather', (["[logits, labels, out['pre_logits'], mask]"], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (342, 'jax.lax.all_gather', 'jax.lax.all_gather', (['representation', '"""batch"""'], {}), False, 'import jax\n'), (343, 'jax.lax.all_gather', 'jax.lax.all_gather', (['labels', '"""batch"""'], {}), False, 'import jax\n'), (344, 'jax.lax.all_gather', 'jax.lax.all_gather', (['mask', '"""batch"""'], {}), False, 'import jax\n'), (354, 'jax.jit', 'jax.jit', (['opt_def.create'], {}), False, 'import jax\n'), (367, 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), False, 'import jax\n'), (385, 'jax.lax.pmean', 'jax.lax.pmean', (['(l, g)'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (404, 'jax.tree_flatten', 'jax.tree_flatten', (['opt.target'], {}), False, 'import jax\n'), (407, 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (474, 'itertools.islice', 'itertools.islice', (['lr_iter', 'first_step', 'None'], {}), False, 'import itertools\n'), (475, 'itertools.islice', 'itertools.islice', (['train_iter', 'first_step', 'None'], {}), False, 'import itertools\n'), (85, 'jax.process_index', 'jax.process_index', ([], {}), False, 'import jax\n'), (86, 'absl.logging.info', 'logging.info', (['"""NOTE: %s"""', 'note'], {}), False, 'from absl import logging\n'), (111, 'jax.local_device_count', 'jax.local_device_count', ([], {}), False, 'import jax\n'), (141, 'numpy.ceil', 'np.ceil', (['(nval_img / batch_size_eval)'], {}), True, 'import numpy as np\n'), (196, 'data_uncertainty_utils.create_imagenet_to_real_fn', 'data_uncertainty_utils.create_imagenet_to_real_fn', ([], {}), False, 'import data_uncertainty_utils\n'), (263, 'jax.numpy.full_like', 'jnp.full_like', (["params['head']['kernel']", '(0)'], {}), True, 'import jax.numpy as jnp\n'), (301, 'jax.numpy.take_along_axis', 'jnp.take_along_axis', (['labels', 'top1_idx[:, (None)]'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (324, 'jax.numpy.eye', 'jnp.eye', (['(10)'], {}), True, 'import jax.numpy as jnp\n'), (326, 'jax.numpy.take_along_axis', 'jnp.take_along_axis', (['one_hot_labels', 'top1_idx[:, (None)]'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (368, 'jax.lax.axis_index', 'jax.lax.axis_index', (['"""batch"""'], {}), False, 'import jax\n'), (383, 'jax.value_and_grad', 'jax.value_and_grad', (['loss_fn'], {'has_aux': '(True)'}), False, 'import jax\n'), (391, 'jax.tree_flatten', 'jax.tree_flatten', (['g'], {}), False, 'import jax\n'), (398, 'jax.numpy.minimum', 'jnp.minimum', (['(1.0)', '(config.grad_clip_norm / l2_g)'], {}), True, 'import jax.numpy as jnp\n'), (399, 'jax.tree_util.tree_map', 'jax.tree_util.tree_map', (['(lambda p: g_factor * p)', 'g'], {}), False, 'import jax\n'), (408, 'jax.numpy.take_along_axis', 'jnp.take_along_axis', (['labels', 'top1_idx[:, (None)]'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (431, 'jax.process_index', 'jax.process_index', ([], {}), False, 'import jax\n'), (482, 'jax.profiler.TraceAnnotation', 'jax.profiler.TraceAnnotation', (['"""train_step"""'], {'step_num': 'step', '_r': '(1)'}), False, 'import jax\n'), (491, 'jax.process_index', 'jax.process_index', ([], {}), False, 'import jax\n'), (516, 'checkpoint_utils.CheckpointData', 'checkpoint_utils.CheckpointData', ([], {'train_loop_rngs': 'train_loop_rngs', 'optimizer': 'opt_cpu', 'accumulated_train_time': 'accumulated_train_time'}), False, 'import checkpoint_utils\n'), (527, 'train_utils.itstime', 'train_utils.itstime', (['step', 'config.log_training_steps', 'total_steps'], {'process': '(0)'}), False, 'import train_utils\n'), (540, 'train_utils.itstime', 'train_utils.itstime', (['step', 'config.log_eval_steps', 'total_steps'], {}), False, 'import train_utils\n'), (79, 'jax.process_index', 'jax.process_index', ([], {}), False, 'import jax\n'), (99, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (100, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (188, 'preprocess_utils.all_ops', 'preprocess_utils.all_ops', ([], {}), False, 'import preprocess_utils\n'), (280, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (311, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (324, 'jax.numpy.argmax', 'jnp.argmax', (['labels'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (338, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (409, 'jax.numpy.sum', 'jnp.sum', (['top1_correct'], {}), True, 'import jax.numpy as jnp\n'), (533, 'flax.jax_utils.unreplicate', 'flax.jax_utils.unreplicate', (['extra_measurements'], {}), False, 'import flax\n'), (548, 'robustness_metrics.metrics.ExpectedCalibrationError', 'rm.metrics.ExpectedCalibrationError', ([], {'num_bins': 'ece_num_bins'}), True, 'import robustness_metrics as rm\n'), (549, 'robustness_metrics.metrics.CalibrationAUC', 'rm.metrics.CalibrationAUC', ([], {'correct_pred_as_pos_label': '(False)'}), True, 'import robustness_metrics as rm\n'), (550, 'robustness_metrics.metrics.OracleCollaborativeAUC', 'rm.metrics.OracleCollaborativeAUC', ([], {'oracle_fraction': '(0.005)', 'num_bins': 'auc_num_bins'}), True, 'import robustness_metrics as rm\n'), (552, 'robustness_metrics.metrics.OracleCollaborativeAUC', 'rm.metrics.OracleCollaborativeAUC', ([], {'oracle_fraction': '(0.01)', 'num_bins': 'auc_num_bins'}), True, 'import robustness_metrics as rm\n'), (554, 'robustness_metrics.metrics.OracleCollaborativeAUC', 'rm.metrics.OracleCollaborativeAUC', ([], {'oracle_fraction': '(0.02)', 'num_bins': 'auc_num_bins'}), True, 'import robustness_metrics as rm\n'), (556, 'robustness_metrics.metrics.OracleCollaborativeAUC', 'rm.metrics.OracleCollaborativeAUC', ([], {'oracle_fraction': '(0.05)', 'num_bins': 'auc_num_bins'}), True, 'import robustness_metrics as rm\n'), (558, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (559, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (560, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (659, 'train_utils.itstime', 'train_utils.itstime', (['step', 'config.fewshot.log_steps', 'total_steps'], {}), False, 'import train_utils\n'), (101, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (122, 'preprocess_utils.all_ops', 'preprocess_utils.all_ops', ([], {}), False, 'import preprocess_utils\n'), (147, 'preprocess_utils.all_ops', 'preprocess_utils.all_ops', ([], {}), False, 'import preprocess_utils\n'), (199, 'preprocess_utils.all_ops', 'preprocess_utils.all_ops', ([], {}), False, 'import preprocess_utils\n'), (372, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (405, 'jax.numpy.vdot', 'jnp.vdot', (['p', 'p'], {}), True, 'import jax.numpy as jnp\n'), (505, 'numpy.array', 'np.array', (['x[0]'], {}), True, 'import numpy as np\n'), (271, 'jax.tree_flatten', 'jax.tree_flatten', (['params_cpu'], {}), False, 'import jax\n'), (392, 'jax.numpy.vdot', 'jnp.vdot', (['p', 'p'], {}), True, 'import jax.numpy as jnp\n'), (579, 'numpy.array', 'np.array', (['batch_ncorrect[0]'], {}), True, 'import numpy as np\n'), (580, 'numpy.array', 'np.array', (['batch_losses[0]'], {}), True, 'import numpy as np\n'), (581, 'numpy.array', 'np.array', (['batch_n[0]'], {}), True, 'import numpy as np\n'), (586, 'numpy.array', 'np.array', (['masks[0]'], {'dtype': 'np.bool'}), True, 'import numpy as np\n'), (587, 'numpy.array', 'np.array', (['logits[0]'], {}), True, 'import numpy as np\n'), (588, 'jax.nn.softmax', 'jax.nn.softmax', (['logits'], {}), False, 'import jax\n'), (591, 'numpy.argmax', 'np.argmax', (['logits'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (592, 'numpy.max', 'np.max', (['probs'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (590, 'numpy.array', 'np.array', (['labels[0]'], {}), True, 'import numpy as np\n'), (607, 'data_uncertainty_utils.generalized_energy_distance', 'data_uncertainty_utils.generalized_energy_distance', (['label[m]', 'p[(m), :]', 'num_classes'], {}), False, 'import data_uncertainty_utils\n')] |
xchange11/bundesterminator | d7e92bfa8ffda54821364c74ac33a48ffa5f51b9 | import os
import pickle
from google.cloud import storage
from bundestag import data, utils
from bundestag.bundes_w2v import BundesW2V
import pandas as pd
import numpy as np
from tensorflow import keras
from tensorflow.keras import Sequential, layers
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.preprocessing.sequence import pad_sequences
from tensorflow.keras.utils import to_categorical
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
import mlflow
from mlflow.tracking import MlflowClient
from memoized_property import memoized_property
# Google Cloud Platform Data
GCP_BUCKET_NAME = "" # NEEDS TO BE PROVIDED HERE IN CODE
GCP_BUCKET_DATA_FOLDER = 'trained'
# MLFLOW server address
MLFLOW_URL = "" # NEEDS TO BE PROVIDED HERE IN CODE
class Bundestrainer():
model = None
loss = None
optimizer = None
metrics = None
lstm_nodes = None
keras_dense_layers = None
last_layer_nodes = None
batch_size = None
patience = None
epochs = None
validation_split = None
X = None
y = None
X_train = None
X_test = None
y_train = None
y_test = None
speech_data = None
bio_data = None
balance_treshold = None
w2v_model = None
pad_len = None
party_mapping = None
def __init__(self,
loss="categorical_crossentropy",
optimizer="adam",
metrics=['accuracy'],
lstm_nodes=20,
keras_dense_layers={15: 'relu'},
last_layer_nodes=5,
batch_size=32,
patience=3,
epochs=10,
validation_split=0.3,
balance_treshold=500_000,
pad_len=300,
experiment_name=""): # NEEDS TO BE PROVIDED HERE IN CODE
self.loss = loss
self.optimizer = optimizer
self.metrics = metrics
self.lstm_nodes = lstm_nodes
self.keras_dense_layers = keras_dense_layers
self.last_layer_nodes = last_layer_nodes
self.batch_size = batch_size
self.patience = patience
self.epochs = epochs
self.validation_split = validation_split
self.balance_treshold = balance_treshold
self.pad_len = pad_len
self.experiment_name = experiment_name
def get_data(self):
all_data = data.get_data()
self.speech_data = all_data['speech_segments'][["text", "party",
"speech_id",
"speaker_id"]]
self.bio_data = all_data['bio_data']
def preprocess_dataframe(self):
self.speech_data = utils.impute_party(self.speech_data, self.bio_data)
self.speech_data = utils.remove_non_party(self.speech_data)
self.speech_data = self.speech_data.dropna()
self.speech_data["text"] = self.speech_data["text"].map(utils.basic_preprocess)
self.speech_data = self.speech_data.dropna()
self.speech_data = utils.balance(self.speech_data,
self.balance_treshold)
def prepare_data_for_training(self):
self.X = self.speech_data["text"]
self.y = self.speech_data["party"]
self.encode_target() # labeling and cat generation
self.split() # train-test-split and assign X_train etc. to instance prop
self.init_w2v() # create w2v dict with X_train
#Prepare X
self.X_train = self.preprocess_text(self.X_train)
self.X_test = self.preprocess_text(self.X_test)
def preprocess_text(self, document_series):
documents = document_series.to_list()
documents = self.w2v_model.embedding(documents)
documents = pad_sequences(documents,
dtype='float32',
padding='post',
maxlen=self.pad_len)
return documents
def encode_target(self):
party_df = pd.DataFrame()
party_df["party"] = self.y
party_df["party_encoded"] = LabelEncoder().fit_transform(
party_df["party"])
party_mapping = party_df.groupby("party_encoded").first()
party_mapping = list(party_mapping["party"])
self.party_mapping = party_mapping
self.y = to_categorical(party_df["party_encoded"],
num_classes=len(party_mapping),
dtype="int32")
def split(self):
self.X_train, self.X_test, self.y_train, self.y_test = \
train_test_split(self.X, self.y, test_size=0.3, random_state=42)
def init_w2v(self):
self.w2v_model = BundesW2V()
self.w2v_model.init_model(self.X_train)
def init_model(self):
self.model = Sequential()
self.model.add(layers.Masking())
self.model.add(layers.LSTM(self.lstm_nodes, activation='tanh'))
# Create dense layers based on user input.
# Custom amount of neurons + different activation functions possible.
for nodes, act in self.keras_dense_layers.items():
self.model.add(layers.Dense(nodes, activation=act))
# Try grabbing the correct number of last layer nodes
# from the amount of present parties
try:
self.model.add(
layers.Dense(len(self.party_mapping), activation='softmax'))
except:
self.model.add(
layers.Dense(self.last_layer_nodes, activation='softmax'))
self.model.compile(loss=self.loss,
optimizer=self.optimizer,
metrics=self.metrics)
def fit_model(self):
es = EarlyStopping(patience=self.patience, restore_best_weights=True)
self.model.fit(self.X_train,
self.y_train,
batch_size=self.batch_size,
epochs=self.epochs,
validation_split=self.validation_split,
callbacks=[es])
# MLFLOW Logging
# Log the parameters
self.mlflow_log_param('loss', self.loss)
self.mlflow_log_param('optimizer', self.optimizer)
self.mlflow_log_param('lstm_nodes', self.lstm_nodes)
# Log the dense layers of the model
for i, (nodes, act) in enumerate(self.keras_dense_layers.items(), 1):
self.mlflow_log_param(f'dense_{i}_nodes', nodes)
self.mlflow_log_param(f'dense_{i}_activation', act)
self.mlflow_log_param('last_layer_nodes', self.last_layer_nodes)
self.mlflow_log_param('batch_size', self.batch_size)
self.mlflow_log_param('patience', self.patience)
self.mlflow_log_param('epochs', self.epochs)
self.mlflow_log_param('validation_split', self.validation_split)
self.mlflow_log_param('balance_treshold', self.balance_treshold)
self.mlflow_log_param('pad_len', self.pad_len)
# Evaluate and log the metrics
evaluation = self.model.evaluate(self.X_test, self.y_test, verbose=0)
for metric, value in zip(self.model.metrics_names, evaluation):
try:
self.mlflow_log_metric(metric, value)
except:
print(f"Metric :{metric} can't be logged. Does it even exist?")
def get_init_fit(self):
self.get_data()
self.preprocess_dataframe()
self.prepare_data_for_training()
self.init_model()
self.fit_model()
def predict_party_by_string(self, text_string):
processed_string = utils.basic_preprocess(text_string)
processed_string_as_list = [processed_string]
processed_string_as_series = pd.Series(processed_string_as_list)
vectorized_list = self.preprocess_text(processed_string_as_series)
predicted_party_as_classes = self.model.predict_classes(vectorized_list)
predicted_party_as_class = predicted_party_as_classes[0]
predicted_party_as_string = self.party_mapping[predicted_party_as_class]
return predicted_party_as_string
def save_model(self, name):
'''Save the trained model and upload to Google Cloud Platform'''
filename = os.path.join(name)
self.model.save(filename)
self.upload_file_to_gcp(filename)
def load_model(self, path):
self.model = keras.models.load_model(path)
def upload_file_to_gcp(self, location):
'''Upload a file to the Google Cloud Platform'''
client = storage.Client()
bucket = client.bucket(GCP_BUCKET_NAME)
blob = bucket.blob(location)
blob.upload_from_filename(location)
def save_w2v(self, name):
'''Save Word2vec model and also uplaod it to the Google Cloud'''
filename = os.path.join(name)
self.w2v_model.save(filename)
self.upload_file_to_gcp(filename)
def load_w2c(self, path):
self.w2v_model = BundesW2V()
self.w2v_model.load(path)
def save_party_mapping(self, path):
with open(path, "wb") as f:
pickle.dump(self.party_mapping, f)
self.upload_file_to_gcp(path)
def load_party_mapping(self, path):
with open(path, "rb") as f:
self.party_mapping = pickle.load(f)
def save_speech_data(self, path):
with open(path, "wb") as f:
pickle.dump(self.speech_data, f)
def load_speech_data(self, path):
with open(path, 'rb') as f:
self.speech_data = pickle.load(f)
# MLFLOW
@memoized_property
def mlflow_client(self):
mlflow.set_tracking_uri(MLFLOW_URL)
return MlflowClient()
@memoized_property
def mlflow_experiment_id(self):
try:
return self.mlflow_client.create_experiment(self.experiment_name)
except BaseException:
return self.mlflow_client.get_experiment_by_name(self.experiment_name).experiment_id
@memoized_property
def mlflow_run(self):
return self.mlflow_client.create_run(self.mlflow_experiment_id)
def mlflow_log_param(self, key, value):
self.mlflow_client.log_param(self.mlflow_run.info.run_id, key, value)
def mlflow_log_metric(self, key, value):
self.mlflow_client.log_metric(self.mlflow_run.info.run_id, key, value)
if __name__ == '__main__':
# Hire the trainer
trainer = Bundestrainer()
# Train those bastards
trainer.get_init_fit()
# Save the result
trainer.save_w2v('model2.w2v')
trainer.save_model('model2.tf')
trainer.save_party_mapping('model2.pm')
| [
"tensorflow.keras.models.load_model",
"pandas.Series",
"tensorflow.keras.layers.Masking",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential",
"sklearn.model_selection.train_test_split",
"pandas.DataFrame",
"sklearn.preprocessing.LabelEncoder",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.preprocessing.sequence.pad_sequences"
] | bundestag/bundestrainer.py | [(88, 'bundestag.data.get_data', 'data.get_data', ([], {}), False, 'from bundestag import data, utils\n'), (95, 'bundestag.utils.impute_party', 'utils.impute_party', (['self.speech_data', 'self.bio_data'], {}), False, 'from bundestag import data, utils\n'), (96, 'bundestag.utils.remove_non_party', 'utils.remove_non_party', (['self.speech_data'], {}), False, 'from bundestag import data, utils\n'), (100, 'bundestag.utils.balance', 'utils.balance', (['self.speech_data', 'self.balance_treshold'], {}), False, 'from bundestag import data, utils\n'), (119, 'tensorflow.keras.preprocessing.sequence.pad_sequences', 'pad_sequences', (['documents'], {'dtype': '"""float32"""', 'padding': '"""post"""', 'maxlen': 'self.pad_len'}), False, 'from tensorflow.keras.preprocessing.sequence import pad_sequences\n'), (126, 'pandas.DataFrame', 'pd.DataFrame', ([], {}), True, 'import pandas as pd\n'), (139, 'sklearn.model_selection.train_test_split', 'train_test_split', (['self.X', 'self.y'], {'test_size': '(0.3)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (142, 'bundestag.bundes_w2v.BundesW2V', 'BundesW2V', ([], {}), False, 'from bundestag.bundes_w2v import BundesW2V\n'), (146, 'tensorflow.keras.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras import Sequential, layers\n'), (169, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'patience': 'self.patience', 'restore_best_weights': '(True)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), (212, 'bundestag.utils.basic_preprocess', 'utils.basic_preprocess', (['text_string'], {}), False, 'from bundestag import data, utils\n'), (214, 'pandas.Series', 'pd.Series', (['processed_string_as_list'], {}), True, 'import pandas as pd\n'), (223, 'os.path.join', 'os.path.join', (['name'], {}), False, 'import os\n'), (228, 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['path'], {}), False, 'from tensorflow import keras\n'), (232, 'google.cloud.storage.Client', 'storage.Client', ([], {}), False, 'from google.cloud import storage\n'), (239, 'os.path.join', 'os.path.join', (['name'], {}), False, 'import os\n'), (244, 'bundestag.bundes_w2v.BundesW2V', 'BundesW2V', ([], {}), False, 'from bundestag.bundes_w2v import BundesW2V\n'), (267, 'mlflow.set_tracking_uri', 'mlflow.set_tracking_uri', (['MLFLOW_URL'], {}), False, 'import mlflow\n'), (268, 'mlflow.tracking.MlflowClient', 'MlflowClient', ([], {}), False, 'from mlflow.tracking import MlflowClient\n'), (147, 'tensorflow.keras.layers.Masking', 'layers.Masking', ([], {}), False, 'from tensorflow.keras import Sequential, layers\n'), (148, 'tensorflow.keras.layers.LSTM', 'layers.LSTM', (['self.lstm_nodes'], {'activation': '"""tanh"""'}), False, 'from tensorflow.keras import Sequential, layers\n'), (249, 'pickle.dump', 'pickle.dump', (['self.party_mapping', 'f'], {}), False, 'import pickle\n'), (254, 'pickle.load', 'pickle.load', (['f'], {}), False, 'import pickle\n'), (258, 'pickle.dump', 'pickle.dump', (['self.speech_data', 'f'], {}), False, 'import pickle\n'), (262, 'pickle.load', 'pickle.load', (['f'], {}), False, 'import pickle\n'), (128, 'sklearn.preprocessing.LabelEncoder', 'LabelEncoder', ([], {}), False, 'from sklearn.preprocessing import LabelEncoder\n'), (153, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['nodes'], {'activation': 'act'}), False, 'from tensorflow.keras import Sequential, layers\n'), (162, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.last_layer_nodes'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras import Sequential, layers\n')] |
powerbi1/keras-tuner | cfc6e20956cb8554ee29ef2a1ba4635da7d0228b | # Copyright 2019 The Keras Tuner Authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras import layers
from kerastuner.engine import hypermodel
class HyperXception(hypermodel.HyperModel):
"""An Xception HyperModel."""
def __init__(self,
include_top=True,
input_shape=None,
input_tensor=None,
classes=None):
super(HyperXception, self).__init__()
if include_top and classes is None:
raise ValueError('You must specify `classes` when '
'`include_top=True`')
if input_shape is None and input_tensor is None:
raise ValueError('You must specify either `input_shape` '
'or `input_tensor`.')
self.include_top = include_top
self.input_shape = input_shape
self.input_tensor = input_tensor
self.classes = classes
def build(self, hp):
activation = hp.Choice('activation', ['relu', 'selu'])
# Model definition.
if self.input_tensor is not None:
inputs = tf.keras.utils.get_source_inputs(self.input_tensor)
x = self.input_tensor
else:
inputs = layers.Input(shape=self.input_shape)
x = inputs
# Initial conv2d.
conv2d_num_filters = hp.Choice(
'conv2d_num_filters', [32, 64, 128], default=64)
kernel_size = hp.Choice('kernel_size', [3, 5])
initial_strides = hp.Choice('initial_strides', [2])
x = conv(x,
conv2d_num_filters,
kernel_size=kernel_size,
activation=activation,
strides=initial_strides)
# Separable convs.
sep_num_filters = hp.Range(
'sep_num_filters', 128, 768, step=128, default=256)
num_residual_blocks = hp.Range('num_residual_blocks', 2, 8, default=4)
for _ in range(num_residual_blocks):
x = residual(x,
sep_num_filters,
activation=activation,
max_pooling=False)
# Exit flow.
x = residual(x,
2*sep_num_filters,
activation=activation,
max_pooling=True)
pooling = hp.Choice('pooling', ['avg', 'flatten', 'max'])
if pooling == 'flatten':
x = layers.Flatten()(x)
elif pooling == 'avg':
x = layers.GlobalAveragePooling2D()(x)
else:
x = layers.GlobalMaxPooling2D()(x)
if self.include_top:
# Dense
num_dense_layers = hp.Range('num_dense_layers', 1, 3)
dropout_rate = hp.Linear(
'dropout_rate', 0.0, 0.6, resolution=0.1, default=0.5)
dense_use_bn = hp.Choice('dense_use_bn', [True, False])
for _ in range(num_dense_layers):
x = dense(x,
self.classes,
activation=activation,
batchnorm=dense_use_bn,
dropout_rate=dropout_rate)
output = layers.Dense(self.classes, activation='softmax')(x)
model = keras.Model(inputs, output, name='Xception')
model.compile(
optimizer=keras.optimizers.Adam(
hp.Choice('learning_rate', [1e-3, 1e-4, 1e-5])),
loss='categorical_crossentropy',
metrics=['accuracy'])
return model
else:
return keras.Model(inputs, x, name='Xception')
def sep_conv(x, num_filters, kernel_size=(3, 3), activation='relu'):
if activation == 'selu':
x = layers.SeparableConv2D(num_filters, kernel_size,
activation='selu',
padding='same',
kernel_initializer='lecun_normal')(x)
elif activation == 'relu':
x = layers.SeparableConv2D(num_filters, kernel_size,
padding='same',
use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
else:
ValueError('Unkown activation function: %s' % (activation,))
return x
def residual(x, num_filters,
kernel_size=(3, 3),
activation='relu',
pool_strides=(2, 2),
max_pooling=True):
"Residual block."
if max_pooling:
res = layers.Conv2D(num_filters, kernel_size=(
1, 1), strides=pool_strides, padding='same')(x)
elif num_filters != keras.backend.int_shape(x)[-1]:
res = layers.Conv2D(num_filters, kernel_size=(1, 1), padding='same')(x)
else:
res = x
x = sep_conv(x, num_filters, kernel_size, activation)
x = sep_conv(x, num_filters, kernel_size, activation)
if max_pooling:
x = layers.MaxPooling2D(
kernel_size, strides=pool_strides, padding='same')(x)
x = layers.add([x, res])
return x
def conv(x, num_filters,
kernel_size=(3, 3), activation='relu', strides=(2, 2)):
"2d convolution block."
if activation == 'selu':
x = layers.Conv2D(num_filters, kernel_size,
strides=strides, activation='selu',
padding='same', kernel_initializer='lecun_normal',
bias_initializer='zeros')(x)
elif activation == 'relu':
x = layers.Conv2D(num_filters, kernel_size,
strides=strides, padding='same', use_bias=False)(x)
x = layers.BatchNormalization()(x)
x = layers.Activation('relu')(x)
else:
msg = 'Unkown activation function: %s' % activation
ValueError(msg)
return x
def dense(x, dims, activation='relu', batchnorm=True, dropout_rate=0):
if activation == 'selu':
x = layers.Dense(dims, activation='selu',
kernel_initializer='lecun_normal',
bias_initializer='zeros')(x)
if dropout_rate:
x = layers.AlphaDropout(dropout_rate)(x)
elif activation == 'relu':
x = layers.Dense(dims, activation='relu')(x)
if batchnorm:
x = layers.BatchNormalization()(x)
if dropout_rate:
x = layers.Dropout(dropout_rate)(x)
else:
msg = 'Unkown activation function: %s' % activation
ValueError(msg)
return x
| [
"tensorflow.keras.utils.get_source_inputs",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.SeparableConv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.AlphaDropout",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
] | kerastuner/applications/xception.py | [(151, 'tensorflow.keras.layers.add', 'layers.add', (['[x, res]'], {}), False, 'from tensorflow.keras import layers\n'), (49, 'tensorflow.keras.utils.get_source_inputs', 'tf.keras.utils.get_source_inputs', (['self.input_tensor'], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'self.input_shape'}), False, 'from tensorflow.keras import layers\n'), (102, 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'output'], {'name': '"""Xception"""'}), True, 'import tensorflow.keras as keras\n'), (111, 'tensorflow.keras.Model', 'keras.Model', (['inputs', 'x'], {'name': '"""Xception"""'}), True, 'import tensorflow.keras as keras\n'), (116, 'tensorflow.keras.layers.SeparableConv2D', 'layers.SeparableConv2D', (['num_filters', 'kernel_size'], {'activation': '"""selu"""', 'padding': '"""same"""', 'kernel_initializer': '"""lecun_normal"""'}), False, 'from tensorflow.keras import layers\n'), (138, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_filters'], {'kernel_size': '(1, 1)', 'strides': 'pool_strides', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (148, 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', (['kernel_size'], {'strides': 'pool_strides', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (159, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_filters', 'kernel_size'], {'strides': 'strides', 'activation': '"""selu"""', 'padding': '"""same"""', 'kernel_initializer': '"""lecun_normal"""', 'bias_initializer': '"""zeros"""'}), False, 'from tensorflow.keras import layers\n'), (176, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['dims'], {'activation': '"""selu"""', 'kernel_initializer': '"""lecun_normal"""', 'bias_initializer': '"""zeros"""'}), False, 'from tensorflow.keras import layers\n'), (83, 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), False, 'from tensorflow.keras import layers\n'), (101, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['self.classes'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras import layers\n'), (121, 'tensorflow.keras.layers.SeparableConv2D', 'layers.SeparableConv2D', (['num_filters', 'kernel_size'], {'padding': '"""same"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers\n'), (124, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (125, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras import layers\n'), (140, 'tensorflow.keras.backend.int_shape', 'keras.backend.int_shape', (['x'], {}), True, 'import tensorflow.keras as keras\n'), (141, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_filters'], {'kernel_size': '(1, 1)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (164, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', (['num_filters', 'kernel_size'], {'strides': 'strides', 'padding': '"""same"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers\n'), (166, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (167, 'tensorflow.keras.layers.Activation', 'layers.Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras import layers\n'), (180, 'tensorflow.keras.layers.AlphaDropout', 'layers.AlphaDropout', (['dropout_rate'], {}), False, 'from tensorflow.keras import layers\n'), (182, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['dims'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras import layers\n'), (85, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'layers.GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras import layers\n'), (87, 'tensorflow.keras.layers.GlobalMaxPooling2D', 'layers.GlobalMaxPooling2D', ([], {}), False, 'from tensorflow.keras import layers\n'), (184, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (186, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout_rate'], {}), False, 'from tensorflow.keras import layers\n')] |
ishine/neurst | 2ba322393fcfed4261b33f4a657e12bbe321baaa | # Copyright 2020 ByteDance Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import pickle
import tensorflow as tf
import tensorflow.keras.backend as K
from absl import logging
from neurst.exps import register_exp
from neurst.exps.sequence_generator import SequenceGenerator
from neurst.utils import compat
from neurst.utils.flags_core import Flag
@register_exp(["mask_predict", "mask_generation"])
class MaskSequenceGenerator(SequenceGenerator):
""" Entry for sequence generation. """
def __init__(self, args, **kwargs):
""" Initializes a util class for sequence generation. """
self._loaded_mask = None
if args["mask_pkl"]:
logging.info(f"Loading mask from {args['mask_pkl']}")
with tf.io.gfile.GFile(args["mask_pkl"], 'rb') as f:
self._loaded_mask = pickle.load(f)
super(MaskSequenceGenerator, self).__init__(args, **kwargs)
@staticmethod
def class_or_method_args():
this_flags = super(MaskSequenceGenerator, MaskSequenceGenerator).class_or_method_args()
this_flags.append(Flag("mask_pkl", dtype=Flag.TYPE.STRING, default=None,
help="The path to the mask pkl file."), )
return this_flags
@staticmethod
def build_generation_model(task, model, search_layer, output_sequence_only=True):
""" Build keras model for generation.
Args:
task: The task object.
model: An instance of neurst.models.model.BaseModel
search_layer: A sequence search object.
output_sequence_only: Only generated sequences will output if True.
Returns: the generation model.
"""
if search_layer is None:
raise ValueError(
"The parameters for generation method must be provided: "
"search_method, search_method.params, ...")
inps = task.create_inputs(compat.ModeKeys.INFER)
formatted_inps = task.example_to_input(inps, compat.ModeKeys.INFER)
search_layer.set_model(model)
generation_ops = search_layer(formatted_inps)
if output_sequence_only:
generation_ops = generation_ops[0]
keras_model = tf.keras.Model(inps, generation_ops)
return keras_model
def apply_mask(self, model, masks):
tuples = []
for (weight, mask) in list(zip(model.trainable_weights, masks)):
masked_weight = weight * tf.cast(mask, weight.dtype.base_dtype)
tuples.append((weight, masked_weight))
K.batch_set_value(tuples)
def _build_and_restore_model(self):
""" Build a single model or ensemble model. """
model = super(MaskSequenceGenerator, self)._build_and_restore_model()
if self._loaded_mask is not None:
self.apply_mask(model, self._loaded_mask)
return model
| [
"tensorflow.io.gfile.GFile",
"tensorflow.cast",
"tensorflow.keras.backend.batch_set_value",
"tensorflow.keras.Model"
] | examples/prune_tune/src/mask_sequence_generator.py | [(26, 'neurst.exps.register_exp', 'register_exp', (["['mask_predict', 'mask_generation']"], {}), False, 'from neurst.exps import register_exp\n'), (68, 'tensorflow.keras.Model', 'tf.keras.Model', (['inps', 'generation_ops'], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.keras.backend.batch_set_value', 'K.batch_set_value', (['tuples'], {}), True, 'import tensorflow.keras.backend as K\n'), (34, 'absl.logging.info', 'logging.info', (['f"""Loading mask from {args[\'mask_pkl\']}"""'], {}), False, 'from absl import logging\n'), (42, 'neurst.utils.flags_core.Flag', 'Flag', (['"""mask_pkl"""'], {'dtype': 'Flag.TYPE.STRING', 'default': 'None', 'help': '"""The path to the mask pkl file."""'}), False, 'from neurst.utils.flags_core import Flag\n'), (35, 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (["args['mask_pkl']", '"""rb"""'], {}), True, 'import tensorflow as tf\n'), (36, 'pickle.load', 'pickle.load', (['f'], {}), False, 'import pickle\n'), (74, 'tensorflow.cast', 'tf.cast', (['mask', 'weight.dtype.base_dtype'], {}), True, 'import tensorflow as tf\n')] |
mediocretech/patchwork | ad21c81611f74569e93f563d765cba2259b1d4b3 | # -*- coding: utf-8 -*-
import param
import tensorflow as tf
from patchwork._layers import _next_layer
class GlobalPooling(param.Parameterized):
"""
Just a single pooling layer.
"""
pooling_type = param.ObjectSelector(default="max pool", objects=["max pool", "average pool", "flatten"])
description = """
A single pooling or flattening layer to map outputs of a feature extractor to a dense vector. No trainable parameters.
"""
def build(self, feature_shape):
#inpt = tf.keras.layers.Input((None, None, inpt_channels))
inpt = tf.keras.layers.Input(feature_shape)
if self.pooling_type == "max pool":
pool = tf.keras.layers.GlobalMaxPool2D()
elif self.pooling_type == "average pool":
pool = tf.keras.layers.GlobalAvgPool2D()
else:
pool = tf.keras.layers.Flatten()
# store a reference to the model in case we need it later
self._model = tf.keras.Model(inpt, pool(inpt))
return self._model
def model_params(self):
return {"fine_tuning_type":"GlobalPooling",
"pooling_type":self.pooling_type,
"num_params":self._model.count_params(),
"num_layers":len(self._model.layers)}
class ConvNet(param.Parameterized):
"""
Convolutional network
"""
layers = param.String(default="128,p,d,128", doc="Comma-separated list of filters")
kernel_size = param.ObjectSelector(default=1, objects=[1,3,5], doc="Spatial size of filters")
batchnorm = param.Boolean(False, doc="Whether to use batch normalization in convolutional layers")
separable_convolutions = param.Boolean(False, doc="Whether to use depthwise separable convolutions")
dropout_rate = param.Number(0.5, bounds=(0.05,0.95), doc="Spatial dropout rate.")
pooling_type = param.ObjectSelector(default="max pool", objects=["max pool", "average pool", "flatten"],
doc="Whether to use global mean or max pooling.")
_description = """
Convolutional network with global pooling at the end. Set dropout to 0 to disable.
"""
description = """
Convolutional network with global pooling at the end. \n\n
Use a comma-separated list to define layers: integer for a convolution, `p` for 2x2 max pooling, `d` for 2D spatial dropout, and `r` for a residual block.
"""
def build(self, feature_shape):
inpt = tf.keras.layers.Input(feature_shape)
net = inpt
for l in self.layers.split(","):
l = l.strip()
net = _next_layer(net, l, kernel_size=self.kernel_size,
dropout_rate=self.dropout_rate,
separable=self.separable_convolutions,
batchnorm=self.batchnorm)
if self.pooling_type == "max pool":
net = tf.keras.layers.GlobalMaxPool2D()(net)
elif self.pooling_type == "average pool":
net = tf.keras.layers.GlobalAvgPool2D()(net)
else:
net = tf.keras.layers.Flatten()(net)
# store reference to model in case we need it later
self._model = tf.keras.Model(inpt, net)
return self._model
def model_params(self):
return {"fine_tuning_type":"ConvNet",
"pooling_type":self.pooling_type,
"num_params":self._model.count_params(),
"num_layers":len(self._model.layers),
"kernel_size":self.kernel_size,
"separable":self.separable_convolutions,
"structure":self.layers}
| [
"tensorflow.keras.layers.GlobalMaxPool2D",
"tensorflow.keras.layers.GlobalAvgPool2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
] | patchwork/_fine_tuning_models.py | [(12, 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': '"""max pool"""', 'objects': "['max pool', 'average pool', 'flatten']"}), False, 'import param\n'), (42, 'param.String', 'param.String', ([], {'default': '"""128,p,d,128"""', 'doc': '"""Comma-separated list of filters"""'}), False, 'import param\n'), (43, 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': '(1)', 'objects': '[1, 3, 5]', 'doc': '"""Spatial size of filters"""'}), False, 'import param\n'), (44, 'param.Boolean', 'param.Boolean', (['(False)'], {'doc': '"""Whether to use batch normalization in convolutional layers"""'}), False, 'import param\n'), (45, 'param.Boolean', 'param.Boolean', (['(False)'], {'doc': '"""Whether to use depthwise separable convolutions"""'}), False, 'import param\n'), (46, 'param.Number', 'param.Number', (['(0.5)'], {'bounds': '(0.05, 0.95)', 'doc': '"""Spatial dropout rate."""'}), False, 'import param\n'), (47, 'param.ObjectSelector', 'param.ObjectSelector', ([], {'default': '"""max pool"""', 'objects': "['max pool', 'average pool', 'flatten']", 'doc': '"""Whether to use global mean or max pooling."""'}), False, 'import param\n'), (20, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['feature_shape'], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['feature_shape'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.keras.Model', 'tf.keras.Model', (['inpt', 'net'], {}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.keras.layers.GlobalMaxPool2D', 'tf.keras.layers.GlobalMaxPool2D', ([], {}), True, 'import tensorflow as tf\n'), (64, 'patchwork._layers._next_layer', '_next_layer', (['net', 'l'], {'kernel_size': 'self.kernel_size', 'dropout_rate': 'self.dropout_rate', 'separable': 'self.separable_convolutions', 'batchnorm': 'self.batchnorm'}), False, 'from patchwork._layers import _next_layer\n'), (24, 'tensorflow.keras.layers.GlobalAvgPool2D', 'tf.keras.layers.GlobalAvgPool2D', ([], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.keras.layers.GlobalMaxPool2D', 'tf.keras.layers.GlobalMaxPool2D', ([], {}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.keras.layers.GlobalAvgPool2D', 'tf.keras.layers.GlobalAvgPool2D', ([], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n')] |
jemiar/surgery-gesture-recog | 83b98c2ccd937c898eb731ccdf28c9248ce3df8d | import cv2
import os
import numpy as np
import tensorflow as tf
from tensorflow import keras
import tensorflow.keras.layers as layers
from data_generator import DataGenerator
base_directory = './'
# function used to read video data and save normal or transit blocks to folder
def save_data(fromarray=transcriptions, height=240, width=320, folder='data_001/', idnumber=1):
# array to store ids of normal blocks
normals = []
# array to store ids of transit blocks
transits = []
# dictionary to store target y value (class) of each block
labels = {}
os.chdir(base_directory + 'Suturing/video/')
# for each element in fromarray (store video file names)
for arr in fromarray:
# use CV2 to capture the video file
cap = cv2.VideoCapture(arr['file'][:-4] + '_capture1.avi')
i = 1
# Initialize numpy array to store frames of Red, Green, Blue channels of video
red_frames = np.empty((0, height, width))
green_frames = np.empty((0, height, width))
blue_frames = np.empty((0, height, width))
# while reading the capture, only store 1 frame for every 3 frames
while cap.isOpened():
ret, frame = cap.read()
if ret == False:
break
if i%3 == 1:
# Resize the frame to reduce the computation during training
frame = cv2.resize(frame, (width, height), interpolation=cv2.INTER_AREA)
# Cast the frame as a numpy array
f = np.asarray(frame)
# Update the color of frame from BGR to RGB
f = cv2.cvtColor(f, cv2.COLOR_BGR2RGB)
# Apprend frame to its appropriate channel
red_frames = np.append(red_frames, np.expand_dims(f[:,:,0], axis=0), axis=0)
green_frames = np.append(green_frames, np.expand_dims(f[:,:,1], axis=0), axis=0)
blue_frames = np.append(blue_frames, np.expand_dims(f[:,:,2], axis=0), axis=0)
i += 1
# Release the capture when finishing reading
cap.release()
# Normalize the value of each element to range [0, 1]
red_frames = red_frames / 255.0
green_frames = green_frames / 255.0
blue_frames = blue_frames / 255.0
# For each transciption (transcribe where each gesture starts and ends)
for k, t in enumerate(arr['transcription']):
# Save the normal block
# Calculate the left most frame of 1 gesture
left = (t[0] + 1) // 3
# Calculate the right most frame of 1 gesture
right = (t[1] - 1) // 3
# Calculate the number of normal blocks in a gesture
num_blocks = (right - left + 1) // 10
for index in range(num_blocks):
# Each block has shape (10, height, width, 3)
block = np.expand_dims(red_frames[left+index*10:left+(index+1)*10,:,:], axis=3)
block = np.append(block, np.expand_dims(green_frames[left+index*10:left+(index+1)*10,:,:], axis=3), axis=3)
block = np.append(block, np.expand_dims(blue_frames[left+index*10:left+(index+1)*10,:,:], axis=3), axis=3)
# Store normal block
npy_name = 'id_' + str(idnumber)
temp_obj = {'id': npy_name, 'file': arr['file'], 'label': 0}
normals.append(temp_obj)
labels[npy_name] = 0
np.save(base_directory + folder + npy_name + '.npy', block)
idnumber += 1
# Save transit blocks
if k < (len(arr['transcription']) - 1):
# Each transit block has the last 5 frames of 1 gesture and the 1st 5 frames of the next gesture
# Calculate the left most frame of a transit block
ind = (t[1] - 1) // 3 - 4
block = np.expand_dims(red_frames[ind:ind+10,:,:], axis=3)
block = np.append(block, np.expand_dims(green_frames[ind:ind+10,:,:], axis=3), axis=3)
block = np.append(block, np.expand_dims(blue_frames[ind:ind+10,:,:], axis=3), axis=3)
# Store transit block
npy_name = 'id_' + str(idnumber)
temp_obj = {'id': npy_name, 'file': arr['file'], 'label': 1}
transits.append(temp_obj)
labels[npy_name] = 1
np.save(base_directory + folder + npy_name + '.npy', block)
idnumber += 1
return normals, transits, labels
# function to create 3D CNN model to classify normal and transit blocks
def create_model(height=240, width=320):
# shape of input: 1 block has 10 frames x height x width x 3 channels (RGB)
input = tf.keras.Input((10, height, width, 3))
# 1st Conv3D block includes Conv3D with 8 filters, MaxPool3D and BatchNormalization
x = layers.Conv3D(filters=8, kernel_size=(3,3,3), activation='relu')(input)
x = layers.MaxPool3D(pool_size=(2,2,2))(x)
x = layers.BatchNormalization()(x)
# 2nd Conv3D block includes Conv3D with 16 filters, MaxPool3D and BatchNormalization
x = layers.Conv3D(filters=16, kernel_size=(3,3,3), activation='relu')(x)
x = layers.MaxPool3D(pool_size=(2,2,2))(x)
x = layers.BatchNormalization()(x)
# 3rd Conv3D block includes Conv3D with 32 filters, MaxPool3D and BatchNormalization
x = layers.Conv3D(filters=32, kernel_size=(3,3,3), activation='relu')(input)
x = layers.MaxPool3D(pool_size=(1,2,2))(x)
x = layers.BatchNormalization()(x)
# Fully-connected block includes GlobalAveragePooling3D, Fully-Connected layer with 512 units and DropOut for Regularization
x = layers.GlobalAveragePooling3D()(x)
x = layers.Dense(units=512, activation='relu')(x)
x = layers.DropOut(0.7)(x)
# output shape (1,) produces value between [0, 1]
output = layers.Dense(units=1, activation='sigmoid')(x)
model = tf.keras.Model(input, output, name='3DCNN')
return model
# Create model
model = create_model(240, 320)
# Create data generator for training and validation
params = {
'dim': (10, 240, 320),
'batch_size': 16,
'n_classes': 2,
'n_channels': 3,
'folder': 'data_001/',
'shuffle': True
}
train_generator = DataGenerator(training_ids, labels, **params)
val_generator = DataGenerator(validation_ids, labels, **params)
learning_rate = 0.001
metrics = [keras.metrics.Accuracy(), keras.metrics.Precision(), keras.metrics.Recall()]
# Compile model, using binary cross-entropy for loss
model.compile(loss='binary_crossentropy', optimizer=keras.optimizers.Adam(learning_rate=learning_rate), metrics=metrics)
# Train model in 100 epochs
model.fit_generator(generator=train_generator, validation_data=val_generator, epochs=100, shuffle=True)
| [
"numpy.expand_dims",
"tensorflow.keras.Input",
"tensorflow.keras.layers.MaxPool3D",
"numpy.asarray",
"tensorflow.keras.layers.GlobalAveragePooling3D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.DropOut",
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.Model",
"numpy.save",
"tensorflow.keras.metrics.Accuracy",
"tensorflow.keras.metrics.Precision",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.Recall",
"numpy.empty"
] | transit_classification.py | [(136, 'data_generator.DataGenerator', 'DataGenerator', (['training_ids', 'labels'], {}), False, 'from data_generator import DataGenerator\n'), (137, 'data_generator.DataGenerator', 'DataGenerator', (['validation_ids', 'labels'], {}), False, 'from data_generator import DataGenerator\n'), (19, 'os.chdir', 'os.chdir', (["(base_directory + 'Suturing/video/')"], {}), False, 'import os\n'), (96, 'tensorflow.keras.Input', 'tf.keras.Input', (['(10, height, width, 3)'], {}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.keras.Model', 'tf.keras.Model', (['input', 'output'], {'name': '"""3DCNN"""'}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.keras.metrics.Accuracy', 'keras.metrics.Accuracy', ([], {}), False, 'from tensorflow import keras\n'), (140, 'tensorflow.keras.metrics.Precision', 'keras.metrics.Precision', ([], {}), False, 'from tensorflow import keras\n'), (140, 'tensorflow.keras.metrics.Recall', 'keras.metrics.Recall', ([], {}), False, 'from tensorflow import keras\n'), (23, 'cv2.VideoCapture', 'cv2.VideoCapture', (["(arr['file'][:-4] + '_capture1.avi')"], {}), False, 'import cv2\n'), (26, 'numpy.empty', 'np.empty', (['(0, height, width)'], {}), True, 'import numpy as np\n'), (27, 'numpy.empty', 'np.empty', (['(0, height, width)'], {}), True, 'import numpy as np\n'), (28, 'numpy.empty', 'np.empty', (['(0, height, width)'], {}), True, 'import numpy as np\n'), (99, 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', ([], {'filters': '(8)', 'kernel_size': '(3, 3, 3)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (100, 'tensorflow.keras.layers.MaxPool3D', 'layers.MaxPool3D', ([], {'pool_size': '(2, 2, 2)'}), True, 'import tensorflow.keras.layers as layers\n'), (101, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (104, 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', ([], {'filters': '(16)', 'kernel_size': '(3, 3, 3)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (105, 'tensorflow.keras.layers.MaxPool3D', 'layers.MaxPool3D', ([], {'pool_size': '(2, 2, 2)'}), True, 'import tensorflow.keras.layers as layers\n'), (106, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (109, 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', ([], {'filters': '(32)', 'kernel_size': '(3, 3, 3)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (110, 'tensorflow.keras.layers.MaxPool3D', 'layers.MaxPool3D', ([], {'pool_size': '(1, 2, 2)'}), True, 'import tensorflow.keras.layers as layers\n'), (111, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (114, 'tensorflow.keras.layers.GlobalAveragePooling3D', 'layers.GlobalAveragePooling3D', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (115, 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': '(512)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (116, 'tensorflow.keras.layers.DropOut', 'layers.DropOut', (['(0.7)'], {}), True, 'import tensorflow.keras.layers as layers\n'), (119, 'tensorflow.keras.layers.Dense', 'layers.Dense', ([], {'units': '(1)', 'activation': '"""sigmoid"""'}), True, 'import tensorflow.keras.layers as layers\n'), (142, 'tensorflow.keras.optimizers.Adam', 'keras.optimizers.Adam', ([], {'learning_rate': 'learning_rate'}), False, 'from tensorflow import keras\n'), (36, 'cv2.resize', 'cv2.resize', (['frame', '(width, height)'], {'interpolation': 'cv2.INTER_AREA'}), False, 'import cv2\n'), (38, 'numpy.asarray', 'np.asarray', (['frame'], {}), True, 'import numpy as np\n'), (40, 'cv2.cvtColor', 'cv2.cvtColor', (['f', 'cv2.COLOR_BGR2RGB'], {}), False, 'import cv2\n'), (64, 'numpy.expand_dims', 'np.expand_dims', (['red_frames[left + index * 10:left + (index + 1) * 10, :, :]'], {'axis': '(3)'}), True, 'import numpy as np\n'), (72, 'numpy.save', 'np.save', (["(base_directory + folder + npy_name + '.npy')", 'block'], {}), True, 'import numpy as np\n'), (80, 'numpy.expand_dims', 'np.expand_dims', (['red_frames[ind:ind + 10, :, :]'], {'axis': '(3)'}), True, 'import numpy as np\n'), (88, 'numpy.save', 'np.save', (["(base_directory + folder + npy_name + '.npy')", 'block'], {}), True, 'import numpy as np\n'), (42, 'numpy.expand_dims', 'np.expand_dims', (['f[:, :, (0)]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (43, 'numpy.expand_dims', 'np.expand_dims', (['f[:, :, (1)]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (44, 'numpy.expand_dims', 'np.expand_dims', (['f[:, :, (2)]'], {'axis': '(0)'}), True, 'import numpy as np\n'), (65, 'numpy.expand_dims', 'np.expand_dims', (['green_frames[left + index * 10:left + (index + 1) * 10, :, :]'], {'axis': '(3)'}), True, 'import numpy as np\n'), (66, 'numpy.expand_dims', 'np.expand_dims', (['blue_frames[left + index * 10:left + (index + 1) * 10, :, :]'], {'axis': '(3)'}), True, 'import numpy as np\n'), (81, 'numpy.expand_dims', 'np.expand_dims', (['green_frames[ind:ind + 10, :, :]'], {'axis': '(3)'}), True, 'import numpy as np\n'), (82, 'numpy.expand_dims', 'np.expand_dims', (['blue_frames[ind:ind + 10, :, :]'], {'axis': '(3)'}), True, 'import numpy as np\n')] |
zcemjjw/MPHY0041_Segmentation | 5840eb6979bb9e3ad19898cd66e0ede8129e3680 | import tensorflow as tf
import numpy as np
import os
import matplotlib.pyplot as plt
from keras.optimizers import Adam, SGD
from tqdm import tqdm
import random
from metrics import dice_coef, dice_coef_loss
img_width = 128
img_height = 128
img_channels = 1
path_to_data = './data/datasets-promise12'
path_to_save = './result'
# Load Training Data
N = 50
idx_slice = 15
X_train = np.zeros((N, img_height, img_width, img_channels), dtype=np.float32)
Y_train = np.zeros((N, img_height, img_width, 1), dtype=np.float32)
print('','','')
print('','','')
print('Loading Training Data')
for n in tqdm(range(N)):
image = np.load(os.path.join(path_to_data, "image_train%02d.npy" % n))
label = np.load(os.path.join(path_to_data, "label_train%02d.npy" % n))
X_train[n] = image[idx_slice,:,:, np.newaxis]
Y_train[n] = label[idx_slice,:,:, np.newaxis]
print('Loaded Training Data')
# Load Testing Data
N_test = 30
X_test = np.zeros((N_test, img_height, img_width, img_channels), dtype=np.float32)
print('','','')
print('','','')
print('Loading Testing Data')
for n in tqdm(range(N_test)):
image = np.load(os.path.join(path_to_data, "image_test%02d.npy" % n))
X_test[n] = image[idx_slice,:,:, np.newaxis]
print('Loaded Testing Data')
print('','','')
print('','','')
# Randomly Show an Image
# image_x = random.randint(0, N-1)
# fig = plt.figure()
# ax1 = fig.add_subplot(121)
# plt.imshow(np.squeeze(X_train[image_x]), cmap='gray')
# ax2 = fig.add_subplot(122)
# ax1.title.set_text('Clinical Image')
# plt.imshow(np.squeeze(Y_train[image_x]), cmap='gray')
# ax2.title.set_text('Real Mask')
# plt.show()
# UNet Model
inputs = tf.keras.layers.Input((img_width, img_height, img_channels))
# Convert integers in image matrix to floating point
s = tf.keras.layers.Lambda(lambda x: x / 255)(inputs)
# Encoding
c1 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(s)
c1 = tf.keras.layers.Dropout(0.2)(c1)
c2 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c1)
p1 = tf.keras.layers.MaxPooling2D((2,2))(c1)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(p1)
c2 = tf.keras.layers.Dropout(0.2)(c2)
c2 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c2)
p2 = tf.keras.layers.MaxPooling2D((2,2))(c2)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(p2)
c3 = tf.keras.layers.Dropout(0.2)(c3)
c3 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c3)
p3 = tf.keras.layers.MaxPooling2D((2,2))(c3)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(p3)
c4 = tf.keras.layers.Dropout(0.2)(c4)
c4 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c4)
p4 = tf.keras.layers.MaxPooling2D((2,2))(c4)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(p4)
c5 = tf.keras.layers.Dropout(0.2)(c5)
c5 = tf.keras.layers.Conv2D(256, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c5)
p5 = tf.keras.layers.MaxPooling2D((2,2))(c5)
# Decoding Layers
u6 = tf.keras.layers.Conv2DTranspose(128, (2, 2), strides=(2, 2), padding='same',)(c5)
u6 = tf.keras.layers.concatenate([u6, c4])
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(u6)
c6 = tf.keras.layers.Dropout(0.2)(c6)
c6 = tf.keras.layers.Conv2D(128, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c6)
u7 = tf.keras.layers.Conv2DTranspose(64, (2, 2), strides=(2, 2), padding='same',)(c6)
u7 = tf.keras.layers.concatenate([u7, c3])
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(u7)
c7 = tf.keras.layers.Dropout(0.2)(c7)
c7 = tf.keras.layers.Conv2D(64, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c7)
u8 = tf.keras.layers.Conv2DTranspose(32, (2, 2), strides=(2, 2), padding='same',)(c7)
u8 = tf.keras.layers.concatenate([u8, c2])
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(u8)
c8 = tf.keras.layers.Dropout(0.2)(c8)
c8 = tf.keras.layers.Conv2D(32, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c8)
u9 = tf.keras.layers.Conv2DTranspose(16, (2, 2), strides=(2, 2), padding='same',)(c8)
u9 = tf.keras.layers.concatenate([u9, c1])
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(u9)
c9 = tf.keras.layers.Dropout(0.2)(c9)
c9 = tf.keras.layers.Conv2D(16, (3, 3), activation='relu',
kernel_initializer='he_normal', padding='same')(c9)
outputs = tf.keras.layers.Conv2D(1, (1, 1), activation='sigmoid')(c9)
model = tf.keras.Model(inputs=[inputs], outputs=[outputs])
model.compile(optimizer=tf.optimizers.Adam(1e-3), loss=dice_coef_loss, metrics=[dice_coef])
#model.summary()
# Checkpoints and Callbacks
checkpointer = tf.keras.callbacks.ModelCheckpoint('model_pros_segmentation.h5',
verbose=1, save_best_only=True)
callbacks = [
tf.keras.callbacks.EarlyStopping(patience=20, monitor='val_loss'),
tf.keras.callbacks.TensorBoard(log_dir='logs')]
results = model.fit(X_train, Y_train, validation_split=0.1, batch_size=15,
epochs=1000, callbacks=callbacks)
# Plot a Random Image when Running
idx = random.randint(0, N)
preds_train = model.predict(X_train[:int(X_train.shape[0]*0.9)], verbose=1)
preds_val = model.predict(X_train[int(X_train.shape[0]*0.9):], verbose=1)
preds_test = model.predict(X_test, verbose=1)
preds_train_t = (preds_train > 0.5).astype(np.uint8)
preds_val_t = (preds_val > 0.5).astype(np.uint8)
preds_test_t = (preds_test > 0.5).astype(np.uint8)
print('','','')
print('','','')
print('Saving 2D Segmentation Training Masks')
for ix in tqdm(range(len(preds_train))):
fig = plt.figure()
fig.suptitle(f'2D Segmentation Training Masks (ix={ix+1})', fontsize=12)
ax1 = fig.add_subplot(131)
plt.imshow(np.squeeze(X_train[ix]))
ax2 = fig.add_subplot(132)
plt.imshow(np.squeeze(Y_train[ix]))
ax3 = fig.add_subplot(133)
plt.imshow(np.squeeze(preds_train_t[ix]))
ax1.title.set_text('Clinical Image')
ax2.title.set_text('Real Mask')
ax3.title.set_text('Predicted Mask')
plt.savefig(f'plots_training/Training_Masks_ix_{ix+1}.png')
plt.close()
print('Finished Saving')
print('','','')
print('','','')
print('Saving 2D Segmentation Testing Masks')
for ix in tqdm(range(len(preds_test))):
fig = plt.figure()
fig.suptitle(f'2D Segmentation Testing Masks (ix={ix+1})', fontsize=12)
ax1 = fig.add_subplot(121)
plt.imshow(np.squeeze(X_test[ix]))
ax3 = fig.add_subplot(122)
plt.imshow(np.squeeze(preds_test_t[ix]))
ax1.title.set_text('Clinical Image')
ax2.title.set_text('Real Mask')
ax3.title.set_text('Predicted Mask')
plt.savefig(f'plots_testing/Testing_Masks_ix_{ix+1}.png')
plt.close()
print('Finished Saving')
print('','','')
print('','','')
print('Training Script has sucessfully completed') | [
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Lambda",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.Conv2D",
"tensorflow.optimizers.Adam",
"tensorflow.keras.layers.concatenate",
"tensorflow.keras.Model",
"tensorflow.keras.callbacks.EarlyStopping",
"matplotlib.pyplot.savefig",
"numpy.squeeze",
"matplotlib.pyplot.close",
"tensorflow.keras.callbacks.TensorBoard",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.MaxPooling2D",
"numpy.zeros",
"tensorflow.keras.layers.Input"
] | UNet_2D/training.py | [(20, 'numpy.zeros', 'np.zeros', (['(N, img_height, img_width, img_channels)'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (21, 'numpy.zeros', 'np.zeros', (['(N, img_height, img_width, 1)'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (37, 'numpy.zeros', 'np.zeros', (['(N_test, img_height, img_width, img_channels)'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (64, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['(img_width, img_height, img_channels)'], {}), True, 'import tensorflow as tf\n'), (111, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[u6, c4]'], {}), True, 'import tensorflow as tf\n'), (120, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[u7, c3]'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[u8, c2]'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.keras.layers.concatenate', 'tf.keras.layers.concatenate', (['[u9, c1]'], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[inputs]', 'outputs': '[outputs]'}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.keras.callbacks.ModelCheckpoint', 'tf.keras.callbacks.ModelCheckpoint', (['"""model_pros_segmentation.h5"""'], {'verbose': '(1)', 'save_best_only': '(True)'}), True, 'import tensorflow as tf\n'), (163, 'random.randint', 'random.randint', (['(0)', 'N'], {}), False, 'import random\n'), (67, 'tensorflow.keras.layers.Lambda', 'tf.keras.layers.Lambda', (['(lambda x: x / 255)'], {}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (72, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (91, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {}), True, 'import tensorflow as tf\n'), (94, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {}), True, 'import tensorflow as tf\n'), (102, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (104, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (105, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (107, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(128)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (114, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (115, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(64)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (121, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(32)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (132, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.keras.layers.Conv2DTranspose', 'tf.keras.layers.Conv2DTranspose', (['(16)', '(2, 2)'], {'strides': '(2, 2)', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '(3, 3)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(1)', '(1, 1)'], {'activation': '"""sigmoid"""'}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'patience': '(20)', 'monitor': '"""val_loss"""'}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.keras.callbacks.TensorBoard', 'tf.keras.callbacks.TensorBoard', ([], {'log_dir': '"""logs"""'}), True, 'import tensorflow as tf\n'), (178, 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (189, 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots_training/Training_Masks_ix_{ix + 1}.png"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (190, 'matplotlib.pyplot.close', 'plt.close', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (198, 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (207, 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots_testing/Testing_Masks_ix_{ix + 1}.png"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (208, 'matplotlib.pyplot.close', 'plt.close', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (28, 'os.path.join', 'os.path.join', (['path_to_data', "('image_train%02d.npy' % n)"], {}), False, 'import os\n'), (29, 'os.path.join', 'os.path.join', (['path_to_data', "('label_train%02d.npy' % n)"], {}), False, 'import os\n'), (44, 'os.path.join', 'os.path.join', (['path_to_data', "('image_test%02d.npy' % n)"], {}), False, 'import os\n'), (149, 'tensorflow.optimizers.Adam', 'tf.optimizers.Adam', (['(0.001)'], {}), True, 'import tensorflow as tf\n'), (181, 'numpy.squeeze', 'np.squeeze', (['X_train[ix]'], {}), True, 'import numpy as np\n'), (183, 'numpy.squeeze', 'np.squeeze', (['Y_train[ix]'], {}), True, 'import numpy as np\n'), (185, 'numpy.squeeze', 'np.squeeze', (['preds_train_t[ix]'], {}), True, 'import numpy as np\n'), (201, 'numpy.squeeze', 'np.squeeze', (['X_test[ix]'], {}), True, 'import numpy as np\n'), (203, 'numpy.squeeze', 'np.squeeze', (['preds_test_t[ix]'], {}), True, 'import numpy as np\n')] |
workingloong/elasticdl | 474146e4c347bab53c5f157441a6008dd204575c | import tensorflow as tf
from elasticdl.python.elasticdl.callbacks import LearningRateScheduler
from elasticdl_preprocessing.layers import SparseEmbedding
from elasticdl_preprocessing.layers.concatenate_with_offset import (
ConcatenateWithOffset,
)
from elasticdl_preprocessing.layers.discretization import Discretization
from elasticdl_preprocessing.layers.hashing import Hashing
from elasticdl_preprocessing.layers.index_lookup import IndexLookup
from elasticdl_preprocessing.layers.to_sparse import ToSparse
from model_zoo.census_model_sqlflow.wide_and_deep.feature_configs import (
FEATURE_TRANSFORM_INFO_EXECUTE_ARRAY,
INPUT_SCHEMAS,
TRANSFORM_OUTPUTS,
age_bucketize,
capital_gain_bucketize,
capital_loss_bucketize,
education_hash,
group1,
group1_embedding_deep,
group1_embedding_wide,
group2,
group2_embedding_deep,
group2_embedding_wide,
group3,
group3_embedding_deep,
hours_per_week_bucketize,
marital_status_lookup,
native_country_hash,
occupation_hash,
race_lookup,
relationship_lookup,
sex_lookup,
workclass_lookup,
)
from model_zoo.census_model_sqlflow.wide_and_deep.transform_ops import (
TransformOpType,
)
# The model definition from model zoo. It's functional style.
# Input Params:
# input_layers: The input layers dict of feature inputs
# wide_embeddings: The embedding list for the wide part
# deep_embeddings: The embedding list for the deep part
def wide_and_deep_classifier(input_layers, wide_embeddings, deep_embeddings):
# Wide Part
wide = tf.keras.layers.Concatenate()(wide_embeddings) # shape = (None, 3)
# Deep Part
dnn_input = tf.reshape(deep_embeddings, shape=(-1, 3 * 8))
for i in [16, 8, 4]:
dnn_input = tf.keras.layers.Dense(i)(dnn_input)
# Output Part
concat_input = tf.concat([wide, dnn_input], 1)
logits = tf.reduce_sum(concat_input, 1, keepdims=True)
probs = tf.reshape(tf.sigmoid(logits), shape=(-1,))
return tf.keras.Model(
inputs=input_layers,
outputs={"logits": logits, "probs": probs},
name="wide_deep",
)
# Build the input layers from the schema of the input features
def get_input_layers(input_schemas):
input_layers = {}
for schema_info in input_schemas:
input_layers[schema_info.name] = tf.keras.layers.Input(
name=schema_info.name, shape=(1,), dtype=schema_info.dtype
)
return input_layers
# Build the transform logic from the metadata in feature_configs.py.
def transform(inputs):
transformed = inputs.copy()
for feature_transform_info in FEATURE_TRANSFORM_INFO_EXECUTE_ARRAY:
if feature_transform_info.op_type == TransformOpType.HASH:
transformed[feature_transform_info.input] = ToSparse()(
transformed[feature_transform_info.input]
)
transformed[feature_transform_info.output] = Hashing(
feature_transform_info.hash_bucket_size
)(transformed[feature_transform_info.input])
elif feature_transform_info.op_type == TransformOpType.BUCKETIZE:
transformed[feature_transform_info.input] = ToSparse()(
transformed[feature_transform_info.input]
)
transformed[feature_transform_info.output] = Discretization(
feature_transform_info.boundaries
)(transformed[feature_transform_info.input])
elif feature_transform_info.op_type == TransformOpType.LOOKUP:
transformed[feature_transform_info.input] = ToSparse()(
transformed[feature_transform_info.input]
)
transformed[feature_transform_info.output] = IndexLookup(
feature_transform_info.vocabulary_list
)(transformed[feature_transform_info.input])
elif feature_transform_info.op_type == TransformOpType.CONCAT:
inputs_to_concat = [
transformed[name] for name in feature_transform_info.input
]
transformed[feature_transform_info.output] = ConcatenateWithOffset(
feature_transform_info.id_offsets
)(inputs_to_concat)
elif feature_transform_info.op_type == TransformOpType.EMBEDDING:
transformed[feature_transform_info.output] = SparseEmbedding(
input_dim=feature_transform_info.input_dim,
output_dim=feature_transform_info.output_dim,
)(transformed[feature_transform_info.input])
elif feature_transform_info.op_type == TransformOpType.ARRAY:
transformed[feature_transform_info.output] = [
transformed[name] for name in feature_transform_info.input
]
return tuple([transformed[name] for name in TRANSFORM_OUTPUTS])
# The following code has the same logic with the `transform` function above.
# It can be generated from the parsed meta in feature_configs using code_gen.
def transform_from_code_gen(source_inputs):
inputs = source_inputs.copy()
education_hash_out = Hashing(education_hash.hash_bucket_size)(
ToSparse()(inputs["education"])
)
occupation_hash_out = Hashing(occupation_hash.hash_bucket_size)(
ToSparse()(inputs["occupation"])
)
native_country_hash_out = Hashing(native_country_hash.hash_bucket_size)(
ToSparse()(inputs["native_country"])
)
workclass_lookup_out = IndexLookup(workclass_lookup.vocabulary_list)(
ToSparse()(inputs["workclass"])
)
marital_status_lookup_out = IndexLookup(
marital_status_lookup.vocabulary_list
)(ToSparse()(inputs["marital_status"]))
relationship_lookup_out = IndexLookup(relationship_lookup.vocabulary_list)(
ToSparse()(inputs["relationship"])
)
race_lookup_out = IndexLookup(race_lookup.vocabulary_list)(
ToSparse()(inputs["race"])
)
sex_lookup_out = IndexLookup(sex_lookup.vocabulary_list)(
ToSparse()(inputs["sex"])
)
age_bucketize_out = Discretization(age_bucketize.boundaries)(
ToSparse()(inputs["age"])
)
capital_gain_bucketize_out = Discretization(
capital_gain_bucketize.boundaries
)(ToSparse()(inputs["capital_gain"]))
capital_loss_bucketize_out = Discretization(
capital_loss_bucketize.boundaries
)(ToSparse()(inputs["capital_loss"]))
hours_per_week_bucketize_out = Discretization(
hours_per_week_bucketize.boundaries
)(ToSparse()(inputs["hours_per_week"]))
group1_out = ConcatenateWithOffset(group1.id_offsets)(
[
workclass_lookup_out,
hours_per_week_bucketize_out,
capital_gain_bucketize_out,
capital_loss_bucketize_out,
]
)
group2_out = ConcatenateWithOffset(group2.id_offsets)(
[
education_hash_out,
marital_status_lookup_out,
relationship_lookup_out,
occupation_hash_out,
]
)
group3_out = ConcatenateWithOffset(group3.id_offsets)(
[
age_bucketize_out,
sex_lookup_out,
race_lookup_out,
native_country_hash_out,
]
)
group1_embedding_wide_out = SparseEmbedding(
input_dim=group1_embedding_wide.input_dim,
output_dim=group1_embedding_wide.output_dim,
)(group1_out)
group2_embedding_wide_out = SparseEmbedding(
input_dim=group2_embedding_wide.input_dim,
output_dim=group2_embedding_wide.output_dim,
)(group2_out)
group1_embedding_deep_out = SparseEmbedding(
input_dim=group1_embedding_deep.input_dim,
output_dim=group1_embedding_deep.output_dim,
)(group1_out)
group2_embedding_deep_out = SparseEmbedding(
input_dim=group2_embedding_deep.input_dim,
output_dim=group2_embedding_deep.output_dim,
)(group2_out)
group3_embedding_deep_out = SparseEmbedding(
input_dim=group3_embedding_deep.input_dim,
output_dim=group3_embedding_deep.output_dim,
)(group3_out)
wide_embeddings_out = [
group1_embedding_wide_out,
group2_embedding_wide_out,
]
deep_embeddings_out = [
group1_embedding_deep_out,
group2_embedding_deep_out,
group3_embedding_deep_out,
]
return wide_embeddings_out, deep_embeddings_out
# The entry point of the submitter program
def custom_model():
input_layers = get_input_layers(input_schemas=INPUT_SCHEMAS)
wide_embeddings, deep_embeddings = transform_from_code_gen(input_layers)
return wide_and_deep_classifier(
input_layers, wide_embeddings, deep_embeddings
)
def loss(labels, predictions):
logits = predictions["logits"]
return tf.reduce_mean(
tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(tf.reshape(labels, (-1, 1)), tf.float32),
logits=logits,
)
)
def optimizer(lr=0.001):
return tf.keras.optimizers.Adam(learning_rate=lr)
def eval_metrics_fn():
return {
"logits": {
"accuracy": lambda labels, predictions: tf.equal(
tf.cast(tf.reshape(predictions, [-1]) > 0.5, tf.int32),
tf.cast(tf.reshape(labels, [-1]), tf.int32),
)
},
"probs": {"auc": tf.keras.metrics.AUC()},
}
def callbacks():
def _schedule(model_version):
if model_version < 5000:
return 0.0003
elif model_version < 12000:
return 0.0002
else:
return 0.0001
return [LearningRateScheduler(_schedule)]
if __name__ == "__main__":
model = custom_model()
print(model.summary())
output = model.call(
{
"education": tf.constant([["Bachelors"]], tf.string),
"occupation": tf.constant([["Tech-support"]], tf.string),
"native_country": tf.constant([["United-States"]], tf.string),
"workclass": tf.constant([["Private"]], tf.string),
"marital_status": tf.constant([["Separated"]], tf.string),
"relationship": tf.constant([["Husband"]], tf.string),
"race": tf.constant([["White"]], tf.string),
"sex": tf.constant([["Female"]], tf.string),
"age": tf.constant([[18]], tf.float32),
"capital_gain": tf.constant([[100.0]], tf.float32),
"capital_loss": tf.constant([[1.0]], tf.float32),
"hours_per_week": tf.constant([[40]], tf.float32),
}
)
print(output)
| [
"tensorflow.keras.layers.Concatenate",
"tensorflow.concat",
"tensorflow.constant",
"tensorflow.reduce_sum",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.sigmoid",
"tensorflow.keras.metrics.AUC",
"tensorflow.keras.Model",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Input"
] | model_zoo/census_model_sqlflow/wide_and_deep/wide_deep_functional_keras.py | [(52, 'tensorflow.reshape', 'tf.reshape', (['deep_embeddings'], {'shape': '(-1, 3 * 8)'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.concat', 'tf.concat', (['[wide, dnn_input]', '(1)'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['concat_input', '(1)'], {'keepdims': '(True)'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'input_layers', 'outputs': "{'logits': logits, 'probs': probs}", 'name': '"""wide_deep"""'}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.sigmoid', 'tf.sigmoid', (['logits'], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'name': 'schema_info.name', 'shape': '(1,)', 'dtype': 'schema_info.dtype'}), True, 'import tensorflow as tf\n'), (132, 'elasticdl_preprocessing.layers.hashing.Hashing', 'Hashing', (['education_hash.hash_bucket_size'], {}), False, 'from elasticdl_preprocessing.layers.hashing import Hashing\n'), (135, 'elasticdl_preprocessing.layers.hashing.Hashing', 'Hashing', (['occupation_hash.hash_bucket_size'], {}), False, 'from elasticdl_preprocessing.layers.hashing import Hashing\n'), (138, 'elasticdl_preprocessing.layers.hashing.Hashing', 'Hashing', (['native_country_hash.hash_bucket_size'], {}), False, 'from elasticdl_preprocessing.layers.hashing import Hashing\n'), (141, 'elasticdl_preprocessing.layers.index_lookup.IndexLookup', 'IndexLookup', (['workclass_lookup.vocabulary_list'], {}), False, 'from elasticdl_preprocessing.layers.index_lookup import IndexLookup\n'), (144, 'elasticdl_preprocessing.layers.index_lookup.IndexLookup', 'IndexLookup', (['marital_status_lookup.vocabulary_list'], {}), False, 'from elasticdl_preprocessing.layers.index_lookup import IndexLookup\n'), (147, 'elasticdl_preprocessing.layers.index_lookup.IndexLookup', 'IndexLookup', (['relationship_lookup.vocabulary_list'], {}), False, 'from elasticdl_preprocessing.layers.index_lookup import IndexLookup\n'), (150, 'elasticdl_preprocessing.layers.index_lookup.IndexLookup', 'IndexLookup', (['race_lookup.vocabulary_list'], {}), False, 'from elasticdl_preprocessing.layers.index_lookup import IndexLookup\n'), (153, 'elasticdl_preprocessing.layers.index_lookup.IndexLookup', 'IndexLookup', (['sex_lookup.vocabulary_list'], {}), False, 'from elasticdl_preprocessing.layers.index_lookup import IndexLookup\n'), (156, 'elasticdl_preprocessing.layers.discretization.Discretization', 'Discretization', (['age_bucketize.boundaries'], {}), False, 'from elasticdl_preprocessing.layers.discretization import Discretization\n'), (159, 'elasticdl_preprocessing.layers.discretization.Discretization', 'Discretization', (['capital_gain_bucketize.boundaries'], {}), False, 'from elasticdl_preprocessing.layers.discretization import Discretization\n'), (162, 'elasticdl_preprocessing.layers.discretization.Discretization', 'Discretization', (['capital_loss_bucketize.boundaries'], {}), False, 'from elasticdl_preprocessing.layers.discretization import Discretization\n'), (165, 'elasticdl_preprocessing.layers.discretization.Discretization', 'Discretization', (['hours_per_week_bucketize.boundaries'], {}), False, 'from elasticdl_preprocessing.layers.discretization import Discretization\n'), (169, 'elasticdl_preprocessing.layers.concatenate_with_offset.ConcatenateWithOffset', 'ConcatenateWithOffset', (['group1.id_offsets'], {}), False, 'from elasticdl_preprocessing.layers.concatenate_with_offset import ConcatenateWithOffset\n'), (177, 'elasticdl_preprocessing.layers.concatenate_with_offset.ConcatenateWithOffset', 'ConcatenateWithOffset', (['group2.id_offsets'], {}), False, 'from elasticdl_preprocessing.layers.concatenate_with_offset import ConcatenateWithOffset\n'), (185, 'elasticdl_preprocessing.layers.concatenate_with_offset.ConcatenateWithOffset', 'ConcatenateWithOffset', (['group3.id_offsets'], {}), False, 'from elasticdl_preprocessing.layers.concatenate_with_offset import ConcatenateWithOffset\n'), (194, 'elasticdl_preprocessing.layers.SparseEmbedding', 'SparseEmbedding', ([], {'input_dim': 'group1_embedding_wide.input_dim', 'output_dim': 'group1_embedding_wide.output_dim'}), False, 'from elasticdl_preprocessing.layers import SparseEmbedding\n'), (198, 'elasticdl_preprocessing.layers.SparseEmbedding', 'SparseEmbedding', ([], {'input_dim': 'group2_embedding_wide.input_dim', 'output_dim': 'group2_embedding_wide.output_dim'}), False, 'from elasticdl_preprocessing.layers import SparseEmbedding\n'), (203, 'elasticdl_preprocessing.layers.SparseEmbedding', 'SparseEmbedding', ([], {'input_dim': 'group1_embedding_deep.input_dim', 'output_dim': 'group1_embedding_deep.output_dim'}), False, 'from elasticdl_preprocessing.layers import SparseEmbedding\n'), (207, 'elasticdl_preprocessing.layers.SparseEmbedding', 'SparseEmbedding', ([], {'input_dim': 'group2_embedding_deep.input_dim', 'output_dim': 'group2_embedding_deep.output_dim'}), False, 'from elasticdl_preprocessing.layers import SparseEmbedding\n'), (211, 'elasticdl_preprocessing.layers.SparseEmbedding', 'SparseEmbedding', ([], {'input_dim': 'group3_embedding_deep.input_dim', 'output_dim': 'group3_embedding_deep.output_dim'}), False, 'from elasticdl_preprocessing.layers import SparseEmbedding\n'), (275, 'elasticdl.python.elasticdl.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['_schedule'], {}), False, 'from elasticdl.python.elasticdl.callbacks import LearningRateScheduler\n'), (54, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['i'], {}), True, 'import tensorflow as tf\n'), (133, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (136, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (139, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (142, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (146, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (148, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (151, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (154, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (157, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (161, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (164, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (167, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (262, 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {}), True, 'import tensorflow as tf\n'), (284, 'tensorflow.constant', 'tf.constant', (["[['Bachelors']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.constant', 'tf.constant', (["[['Tech-support']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.constant', 'tf.constant', (["[['United-States']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (287, 'tensorflow.constant', 'tf.constant', (["[['Private']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.constant', 'tf.constant', (["[['Separated']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.constant', 'tf.constant', (["[['Husband']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.constant', 'tf.constant', (["[['White']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (291, 'tensorflow.constant', 'tf.constant', (["[['Female']]", 'tf.string'], {}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.constant', 'tf.constant', (['[[18]]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.constant', 'tf.constant', (['[[100.0]]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (294, 'tensorflow.constant', 'tf.constant', (['[[1.0]]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.constant', 'tf.constant', (['[[40]]', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (87, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (90, 'elasticdl_preprocessing.layers.hashing.Hashing', 'Hashing', (['feature_transform_info.hash_bucket_size'], {}), False, 'from elasticdl_preprocessing.layers.hashing import Hashing\n'), (94, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (97, 'elasticdl_preprocessing.layers.discretization.Discretization', 'Discretization', (['feature_transform_info.boundaries'], {}), False, 'from elasticdl_preprocessing.layers.discretization import Discretization\n'), (244, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1, 1)'], {}), True, 'import tensorflow as tf\n'), (101, 'elasticdl_preprocessing.layers.to_sparse.ToSparse', 'ToSparse', ([], {}), False, 'from elasticdl_preprocessing.layers.to_sparse import ToSparse\n'), (104, 'elasticdl_preprocessing.layers.index_lookup.IndexLookup', 'IndexLookup', (['feature_transform_info.vocabulary_list'], {}), False, 'from elasticdl_preprocessing.layers.index_lookup import IndexLookup\n'), (259, 'tensorflow.reshape', 'tf.reshape', (['labels', '[-1]'], {}), True, 'import tensorflow as tf\n'), (111, 'elasticdl_preprocessing.layers.concatenate_with_offset.ConcatenateWithOffset', 'ConcatenateWithOffset', (['feature_transform_info.id_offsets'], {}), False, 'from elasticdl_preprocessing.layers.concatenate_with_offset import ConcatenateWithOffset\n'), (258, 'tensorflow.reshape', 'tf.reshape', (['predictions', '[-1]'], {}), True, 'import tensorflow as tf\n'), (115, 'elasticdl_preprocessing.layers.SparseEmbedding', 'SparseEmbedding', ([], {'input_dim': 'feature_transform_info.input_dim', 'output_dim': 'feature_transform_info.output_dim'}), False, 'from elasticdl_preprocessing.layers import SparseEmbedding\n')] |
SteffenBauer/Deep_RL | 6671c723098037cef1013af9a7f434df993c9d91 | #!/usr/bin/env python3
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] = 'true'
import tensorflow as tf
import tensorflow.keras as keras
tf.get_logger().setLevel('ERROR')
from rl.games import tromis
from rl.agents import dqn
from rl.memory import uniqmemory
from rl.callbacks import history
width, height = 7, 10
nb_frames = 1
game = tromis.Tromis(width, height, max_turn=512)
inp = keras.layers.Input(shape=(nb_frames, height, width, 3))
x = keras.layers.Conv3D(32,5,padding='same',strides=1,activation='relu')(inp)
x = keras.layers.AveragePooling3D(padding='same')(x)
x = keras.layers.Conv3D(64,3,padding='same',strides=1,activation='relu')(x)
x = keras.layers.GlobalAveragePooling3D()(x)
x = keras.layers.Dense(128, activation='relu')(x)
act = keras.layers.Dense(game.nb_actions, activation='linear')(x)
model = keras.models.Model(inputs=inp, outputs=act)
model.compile(keras.optimizers.RMSprop(), keras.losses.LogCosh())
model.summary()
params = {
'batch_size': 256,
'epochs': 200,
'episodes': 100,
'train_freq': 32,
'target_sync': 512,
'epsilon_start': 0.5,
'epsilon_decay': 0.75,
'epsilon_final': 0.0,
'gamma': 0.92,
'reset_memory': False,
'observe': 100
}
rlparams = {
'rl.memory': 'UniqMemory',
'rl.memory_size': 65536,
'rl.optimizer': 'RMSprop',
'rl.with_target': True,
'rl.nb_frames': nb_frames
}
gameparams = {
'game.width': game.width,
'game.height': game.height,
'game.max_turn': game.max_turn
}
memory = uniqmemory.UniqMemory(memory_size=rlparams['rl.memory_size'])
agent = dqn.Agent(model, memory, with_target=rlparams['rl.with_target'])
#history = history.HistoryLog("tromis", {**params, **rlparams, **gameparams})
agent.train(game, verbose=1, callbacks=[], **params)
| [
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.GlobalAveragePooling3D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.losses.LogCosh",
"tensorflow.get_logger",
"tensorflow.keras.layers.AveragePooling3D",
"tensorflow.keras.layers.Input"
] | train_tromis.py | [(18, 'rl.games.tromis.Tromis', 'tromis.Tromis', (['width', 'height'], {'max_turn': '(512)'}), False, 'from rl.games import tromis\n'), (20, 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': '(nb_frames, height, width, 3)'}), True, 'import tensorflow.keras as keras\n'), (28, 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'inp', 'outputs': 'act'}), True, 'import tensorflow.keras as keras\n'), (60, 'rl.memory.uniqmemory.UniqMemory', 'uniqmemory.UniqMemory', ([], {'memory_size': "rlparams['rl.memory_size']"}), False, 'from rl.memory import uniqmemory\n'), (61, 'rl.agents.dqn.Agent', 'dqn.Agent', (['model', 'memory'], {'with_target': "rlparams['rl.with_target']"}), False, 'from rl.agents import dqn\n'), (21, 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['(32)', '(5)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (22, 'tensorflow.keras.layers.AveragePooling3D', 'keras.layers.AveragePooling3D', ([], {'padding': '"""same"""'}), True, 'import tensorflow.keras as keras\n'), (23, 'tensorflow.keras.layers.Conv3D', 'keras.layers.Conv3D', (['(64)', '(3)'], {'padding': '"""same"""', 'strides': '(1)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (24, 'tensorflow.keras.layers.GlobalAveragePooling3D', 'keras.layers.GlobalAveragePooling3D', ([], {}), True, 'import tensorflow.keras as keras\n'), (25, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (26, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['game.nb_actions'], {'activation': '"""linear"""'}), True, 'import tensorflow.keras as keras\n'), (29, 'tensorflow.keras.optimizers.RMSprop', 'keras.optimizers.RMSprop', ([], {}), True, 'import tensorflow.keras as keras\n'), (29, 'tensorflow.keras.losses.LogCosh', 'keras.losses.LogCosh', ([], {}), True, 'import tensorflow.keras as keras\n'), (8, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n')] |
sujeet-ap/keras-idiomatic-programmer | 4db490afea8acf9381cbf3d607583451a2f40a3a | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# SE-ResNet (50/101/152) v1.0
# Paper: https://arxiv.org/pdf/1709.01507.pdf
import tensorflow as tf
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add
def stem(inputs):
""" Construct the Stem Convolutional Group
inputs : the input vector
"""
# The 224x224 images are zero padded (black - no signal) to be 230x230 images prior to the first convolution
x = ZeroPadding2D(padding=(3, 3))(inputs)
# First Convolutional layer which uses a large (coarse) filter
x = Conv2D(64, (7, 7), strides=(2, 2), padding='valid', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Pooled feature maps will be reduced by 75%
x = ZeroPadding2D(padding=(1, 1))(x)
x = MaxPooling2D((3, 3), strides=(2, 2))(x)
return x
def learner(x, groups, ratio):
""" Construct the Learner
x : input to the learner
groups: list of groups: number of filters and blocks
ratio : amount of filter reduction in squeeze
"""
# First Residual Block Group (not strided)
n_filters, n_blocks = groups.pop(0)
x = group(x, n_filters, n_blocks, ratio, strides=(1, 1))
# Remaining Residual Block Groups (strided)
for n_filters, n_blocks in groups:
x = group(x, n_filters, n_blocks, ratio)
return x
def group(x, n_filters, n_blocks, ratio, strides=(2, 2)):
""" Construct the Squeeze-Excite Group
x : input to the group
n_blocks : number of blocks
n_filters: number of filters
ratio : amount of filter reduction during squeeze
strides : whether projection block is strided
"""
# first block uses linear projection to match the doubling of filters between groups
x = projection_block(x, n_filters, strides=strides, ratio=ratio)
# remaining blocks use identity link
for _ in range(n_blocks-1):
x = identity_block(x, n_filters, ratio=ratio)
return x
def squeeze_excite_block(x, ratio=16):
""" Create a Squeeze and Excite block
x : input to the block
ratio : amount of filter reduction during squeeze
"""
# Remember the input
shortcut = x
# Get the number of filters on the input
filters = x.shape[-1]
# Squeeze (dimensionality reduction)
# Do global average pooling across the filters, which will output a 1D vector
x = GlobalAveragePooling2D()(x)
# Reshape into 1x1 feature maps (1x1xC)
x = Reshape((1, 1, filters))(x)
# Reduce the number of filters (1x1xC/r)
x = Dense(filters // ratio, activation='relu', kernel_initializer='he_normal', use_bias=False)(x)
# Excitation (dimensionality restoration)
# Restore the number of filters (1x1xC)
x = Dense(filters, activation='sigmoid', kernel_initializer='he_normal', use_bias=False)(x)
# Scale - multiply the squeeze/excitation output with the input (WxHxC)
x = Multiply()([shortcut, x])
return x
def identity_block(x, n_filters, ratio=16):
""" Create a Bottleneck Residual Block with Identity Link
x : input into the block
n_filters: number of filters
ratio : amount of filter reduction during squeeze
"""
# Save input vector (feature maps) for the identity link
shortcut = x
## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)
# Dimensionality reduction
x = Conv2D(n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Bottleneck layer
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding="same", use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Dimensionality restoration - increase the number of output filters by 4X
x = Conv2D(n_filters * 4, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
# Pass the output through the squeeze and excitation block
x = squeeze_excite_block(x, ratio)
# Add the identity link (input) to the output of the residual block
x = Add()([shortcut, x])
x = ReLU()(x)
return x
def projection_block(x, n_filters, strides=(2,2), ratio=16):
""" Create Bottleneck Residual Block with Projection Shortcut
Increase the number of filters by 4X
x : input into the block
n_filters: number of filters
strides : whether entry convolution is strided (i.e., (2, 2) vs (1, 1))
ratio : amount of filter reduction during squeeze
"""
# Construct the projection shortcut
# Increase filters by 4X to match shape when added to output of block
shortcut = Conv2D(4 * n_filters, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal')(x)
shortcut = BatchNormalization()(shortcut)
## Construct the 1x1, 3x3, 1x1 residual block (fig 3c)
# Dimensionality reduction
# Feature pooling when strides=(2, 2)
x = Conv2D(n_filters, (1, 1), strides=strides, use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Bottleneck layer
x = Conv2D(n_filters, (3, 3), strides=(1, 1), padding='same', use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
x = ReLU()(x)
# Dimensionality restoration - increase the number of filters by 4X
x = Conv2D(4 * n_filters, (1, 1), strides=(1, 1), use_bias=False, kernel_initializer='he_normal')(x)
x = BatchNormalization()(x)
# Pass the output through the squeeze and excitation block
x = squeeze_excite_block(x, ratio)
# Add the projection shortcut link to the output of the residual block
x = Add()([x, shortcut])
x = ReLU()(x)
return x
def classifier(x, n_classes):
""" Create the Classifier Group
x : input to the classifier
n_classes : number of output classes
"""
# Pool at the end of all the convolutional residual blocks
x = GlobalAveragePooling2D()(x)
# Final Dense Outputting Layer for the outputs
outputs = Dense(n_classes, activation='softmax', kernel_initializer='he_normal')(x)
return outputs
# Meta-parameter: # Meta-parameter: list of groups: filter size and number of blocks
groups = { 50 : [ (64, 3), (128, 4), (256, 6), (512, 3) ], # SE-ResNet50
101: [ (64, 3), (128, 4), (256, 23), (512, 3) ], # SE-ResNet101
152: [ (64, 3), (128, 8), (256, 36), (512, 3) ] # SE-ResNet152
}
# Meta-parameter: Amount of filter reduction in squeeze operation
ratio = 16
# The input tensor
inputs = Input(shape=(224, 224, 3))
# The Stem Group
x = stem(inputs)
# The Learnet
x = learner(x, groups[50], ratio)
# The Classifier for 1000 classes
outputs = classifier(x, 1000)
# Instantiate the Model
model = Model(inputs, outputs)
| [
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.Input",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Multiply",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.MaxPooling2D"
] | zoo/senet/se_resnet.py | [(193, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(224, 224, 3)'}), False, 'from tensorflow.keras import Input, Model\n'), (205, 'tensorflow.keras.Model', 'Model', (['inputs', 'outputs'], {}), False, 'from tensorflow.keras import Input, Model\n'), (28, 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(3, 3)'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (31, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(7, 7)'], {'strides': '(2, 2)', 'padding': '"""valid"""', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (32, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (33, 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (36, 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', ([], {'padding': '(1, 1)'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (37, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', (['(3, 3)'], {'strides': '(2, 2)'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (84, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (87, 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, filters)'], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (90, 'tensorflow.keras.layers.Dense', 'Dense', (['(filters // ratio)'], {'activation': '"""relu"""', 'kernel_initializer': '"""he_normal"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (94, 'tensorflow.keras.layers.Dense', 'Dense', (['filters'], {'activation': '"""sigmoid"""', 'kernel_initializer': '"""he_normal"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (97, 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (112, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_filters', '(1, 1)'], {'strides': '(1, 1)', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (113, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (114, 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (117, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_filters', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (118, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (119, 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (122, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(n_filters * 4)', '(1, 1)'], {'strides': '(1, 1)', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (123, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (129, 'tensorflow.keras.layers.Add', 'Add', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (130, 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (143, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4 * n_filters)', '(1, 1)'], {'strides': 'strides', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (144, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (150, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_filters', '(1, 1)'], {'strides': 'strides', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (151, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (152, 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (155, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['n_filters', '(3, 3)'], {'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (156, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (157, 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (160, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4 * n_filters)', '(1, 1)'], {'strides': '(1, 1)', 'use_bias': '(False)', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (161, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (167, 'tensorflow.keras.layers.Add', 'Add', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (168, 'tensorflow.keras.layers.ReLU', 'ReLU', ([], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D, Conv2D, MaxPooling2D, BatchNormalization, ReLU\n'), (177, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n'), (180, 'tensorflow.keras.layers.Dense', 'Dense', (['n_classes'], {'activation': '"""softmax"""', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Reshape, Multiply, Add\n')] |
1ucky40nc3/models | 1933222e454f0d2ab8582e48fcc46f26c36ace87 | # coding=utf-8
# Copyright 2021 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Keras-based einsum layer.
Copied from
https://github.com/tensorflow/models/blob/master/official/nlp/modeling/layers/dense_einsum.py.
"""
# Copied from:
# https://github.com/google-research/google-research/blob/master/performer/fast_attention/tensorflow/util.py
import tensorflow as tf
_CHR_IDX = ["a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m"]
@tf.keras.utils.register_keras_serializable(package="Text")
class DenseEinsum(tf.keras.layers.Layer):
"""A densely connected layer that uses tf.einsum as the backing computation.
This layer can perform einsum calculations of arbitrary dimensionality.
Arguments:
output_shape: Positive integer or tuple, dimensionality of the output space.
num_summed_dimensions: The number of dimensions to sum over. Standard 2D
matmul should use 1, 3D matmul should use 2, and so forth.
activation: Activation function to use. If you don't specify anything, no
activation is applied
(ie. "linear" activation: `a(x) = x`).
use_bias: Boolean, whether the layer uses a bias vector.
kernel_initializer: Initializer for the `kernel` weights matrix.
bias_initializer: Initializer for the bias vector.
kernel_regularizer: Regularizer function applied to the `kernel` weights
matrix.
bias_regularizer: Regularizer function applied to the bias vector.
activity_regularizer: Regularizer function applied to the output of the
layer (its "activation")..
kernel_constraint: Constraint function applied to the `kernel` weights
matrix.
bias_constraint: Constraint function applied to the bias vector.
Input shape:
N-D tensor with shape: `(batch_size, ..., input_dim)`. The most common
situation would be a 2D input with shape `(batch_size, input_dim)`.
Output shape:
N-D tensor with shape: `(batch_size, ..., units)`. For instance, for a 2D
input with shape `(batch_size, input_dim)`, the output would have shape
`(batch_size, units)`.
"""
def __init__(self,
output_shape,
num_summed_dimensions=1,
activation=None,
use_bias=True,
kernel_initializer="glorot_uniform",
bias_initializer="zeros",
kernel_regularizer=None,
bias_regularizer=None,
activity_regularizer=None,
kernel_constraint=None,
bias_constraint=None,
**kwargs):
super(DenseEinsum, self).__init__(**kwargs)
self._output_shape = output_shape if isinstance(
output_shape, (list, tuple)) else (output_shape,)
self._activation = tf.keras.activations.get(activation)
self._use_bias = use_bias
self._kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self._bias_initializer = tf.keras.initializers.get(bias_initializer)
self._kernel_regularizer = tf.keras.regularizers.get(kernel_regularizer)
self._bias_regularizer = tf.keras.regularizers.get(bias_regularizer)
self._kernel_constraint = tf.keras.constraints.get(kernel_constraint)
self._bias_constraint = tf.keras.constraints.get(bias_constraint)
self._num_summed_dimensions = num_summed_dimensions
self._einsum_string = None
def _build_einsum_string(self, free_input_dims, bound_dims, output_dims):
input_str = ""
kernel_str = ""
output_str = ""
letter_offset = 0
for i in range(free_input_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
output_str += char
letter_offset += free_input_dims
for i in range(bound_dims):
char = _CHR_IDX[i + letter_offset]
input_str += char
kernel_str += char
letter_offset += bound_dims
for i in range(output_dims):
char = _CHR_IDX[i + letter_offset]
kernel_str += char
output_str += char
return input_str + "," + kernel_str + "->" + output_str
def build(self, input_shape):
input_shape = tf.TensorShape(input_shape)
input_rank = input_shape.rank
free_input_dims = input_rank - self._num_summed_dimensions
output_dims = len(self._output_shape)
self._einsum_string = self._build_einsum_string(free_input_dims,
self._num_summed_dimensions,
output_dims)
# This is only saved for testing purposes.
self._kernel_shape = (
input_shape[free_input_dims:].concatenate(self._output_shape))
self._kernel = self.add_weight(
"kernel",
shape=self._kernel_shape,
initializer=self._kernel_initializer,
regularizer=self._kernel_regularizer,
constraint=self._kernel_constraint,
dtype=self.dtype,
trainable=True)
if self._use_bias:
self._bias = self.add_weight(
"bias",
shape=self._output_shape,
initializer=self._bias_initializer,
regularizer=self._bias_regularizer,
constraint=self._bias_constraint,
dtype=self.dtype,
trainable=True)
else:
self._bias = None
super(DenseEinsum, self).build(input_shape)
def get_config(self):
config = {
"output_shape":
self._output_shape,
"num_summed_dimensions":
self._num_summed_dimensions,
"activation":
tf.keras.activations.serialize(self._activation),
"use_bias":
self._use_bias,
"kernel_initializer":
tf.keras.initializers.serialize(self._kernel_initializer),
"bias_initializer":
tf.keras.initializers.serialize(self._bias_initializer),
"kernel_regularizer":
tf.keras.regularizers.serialize(self._kernel_regularizer),
"bias_regularizer":
tf.keras.regularizers.serialize(self._bias_regularizer),
"activity_regularizer":
tf.keras.regularizers.serialize(self._activity_regularizer),
"kernel_constraint":
tf.keras.constraints.serialize(self._kernel_constraint),
"bias_constraint":
tf.keras.constraints.serialize(self._bias_constraint)
}
base_config = super(DenseEinsum, self).get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs):
ret = tf.einsum(self._einsum_string, inputs, self._kernel)
if self._use_bias:
ret += self._bias
if self._activation is not None:
ret = self._activation(ret)
return ret | [
"tensorflow.TensorShape",
"tensorflow.keras.constraints.get",
"tensorflow.keras.activations.serialize",
"tensorflow.keras.constraints.serialize",
"tensorflow.keras.regularizers.get",
"tensorflow.keras.initializers.serialize",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.keras.regularizers.serialize",
"tensorflow.einsum",
"tensorflow.keras.activations.get",
"tensorflow.keras.initializers.get"
] | official/nlp/keras_nlp/layers/fast_attention_util.py | [(44, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {'package': '"""Text"""'}), True, 'import tensorflow as tf\n'), (93, 'tensorflow.keras.activations.get', 'tf.keras.activations.get', (['activation'], {}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['kernel_initializer'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.keras.initializers.get', 'tf.keras.initializers.get', (['bias_initializer'], {}), True, 'import tensorflow as tf\n'), (97, 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['kernel_regularizer'], {}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.keras.regularizers.get', 'tf.keras.regularizers.get', (['bias_regularizer'], {}), True, 'import tensorflow as tf\n'), (99, 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['kernel_constraint'], {}), True, 'import tensorflow as tf\n'), (100, 'tensorflow.keras.constraints.get', 'tf.keras.constraints.get', (['bias_constraint'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.TensorShape', 'tf.TensorShape', (['input_shape'], {}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.einsum', 'tf.einsum', (['self._einsum_string', 'inputs', 'self._kernel'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.keras.activations.serialize', 'tf.keras.activations.serialize', (['self._activation'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self._kernel_initializer'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.keras.initializers.serialize', 'tf.keras.initializers.serialize', (['self._bias_initializer'], {}), True, 'import tensorflow as tf\n'), (178, 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self._kernel_regularizer'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self._bias_regularizer'], {}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.keras.regularizers.serialize', 'tf.keras.regularizers.serialize', (['self._activity_regularizer'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', (['self._kernel_constraint'], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.keras.constraints.serialize', 'tf.keras.constraints.serialize', (['self._bias_constraint'], {}), True, 'import tensorflow as tf\n')] |
UTokyo-ICEPP/multiml_htautau | 5f926c2291a55f57419aa0130d07e2a793fc7353 | from . import HiggsID_BaseTask
class HiggsID_MassTask(HiggsID_BaseTask):
''' HiggsID MLP task
'''
def __init__(self,
layers=None,
activation=None,
batch_norm=False,
scale_mass=1.,
**kwargs):
"""
Args:
layers (list(int)): the number of nodes in hidden layers in MLP that used for mass transformation.
activation (str): activation function for MLP.
batch_norm (bool): use batch normalization
scale_mass (float): scaling output of mass layer
**kwargs: Arbitrary keyword arguments
"""
super().__init__(**kwargs)
self._layers = layers
self._activation = activation
self._batch_norm = batch_norm
self._scale_mass = scale_mass
def mass_layer(self, tau_4vec):
import tensorflow as tf
from tensorflow.keras.layers import Concatenate
from tensorflow.keras import backend as K
tau_4vec = K.reshape(tau_4vec, (-1, self._njets, self._n_features))
pt = K.exp(K.clip(tau_4vec[:, :, 0], -7., 7.)) - 0.1
eta = tau_4vec[:, :, 1]
phi = tau_4vec[:, :, 2]
mass = 1.777
px = pt * K.cos(phi)
py = pt * K.sin(phi)
pz = pt * tf.math.sinh(K.clip(eta, -5, 5))
epsilon = 0.1 # avoid nan when e=0. sqrt(x)^' = -1/2 * 1/sqrt(x)
e = K.sqrt(epsilon + px**2 + py**2 + pz**2 + mass**2)
px = K.reshape(px, (-1, self._njets, 1))
py = K.reshape(py, (-1, self._njets, 1))
pz = K.reshape(pz, (-1, self._njets, 1))
e = K.reshape(e, (-1, self._njets, 1))
tau_4vec = Concatenate(axis=2)([px, py, pz, e])
tau_4vec = K.sum(tau_4vec, axis=1)
px = tau_4vec[:, 0]
py = tau_4vec[:, 1]
pz = tau_4vec[:, 2]
e = tau_4vec[:, 3]
masssq = e**2 - (px**2 + py**2 + pz**2)
mass = K.sqrt(epsilon + masssq)
mass = K.reshape(mass, [-1, 1])
return mass
def build_model(self):
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Lambda
from multiml.task.keras.modules import MLPBlock
input = self.get_inputs()[0]
x = input
x = Lambda(self.mass_layer, output_shape=(1, ))(x)
x *= self._scale_mass
mlp = MLPBlock(layers=self._layers,
activation=self._activation,
activation_last=self._activation_last,
batch_norm=self._batch_norm)
x = mlp(x)
self._model = Model(inputs=input, outputs=x)
self.compile_model()
def _get_custom_objects(self):
return {"mass_layer": self.mass_layer}
| [
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.backend.cos",
"tensorflow.keras.backend.sin",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.backend.sqrt",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.reshape",
"tensorflow.keras.backend.clip"
] | multiml_htautau/task/keras/higgsId_mass.py | [(33, 'tensorflow.keras.backend.reshape', 'K.reshape', (['tau_4vec', '(-1, self._njets, self._n_features)'], {}), True, 'from tensorflow.keras import backend as K\n'), (43, 'tensorflow.keras.backend.sqrt', 'K.sqrt', (['(epsilon + px ** 2 + py ** 2 + pz ** 2 + mass ** 2)'], {}), True, 'from tensorflow.keras import backend as K\n'), (44, 'tensorflow.keras.backend.reshape', 'K.reshape', (['px', '(-1, self._njets, 1)'], {}), True, 'from tensorflow.keras import backend as K\n'), (45, 'tensorflow.keras.backend.reshape', 'K.reshape', (['py', '(-1, self._njets, 1)'], {}), True, 'from tensorflow.keras import backend as K\n'), (46, 'tensorflow.keras.backend.reshape', 'K.reshape', (['pz', '(-1, self._njets, 1)'], {}), True, 'from tensorflow.keras import backend as K\n'), (47, 'tensorflow.keras.backend.reshape', 'K.reshape', (['e', '(-1, self._njets, 1)'], {}), True, 'from tensorflow.keras import backend as K\n'), (49, 'tensorflow.keras.backend.sum', 'K.sum', (['tau_4vec'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (55, 'tensorflow.keras.backend.sqrt', 'K.sqrt', (['(epsilon + masssq)'], {}), True, 'from tensorflow.keras import backend as K\n'), (56, 'tensorflow.keras.backend.reshape', 'K.reshape', (['mass', '[-1, 1]'], {}), True, 'from tensorflow.keras import backend as K\n'), (71, 'multiml.task.keras.modules.MLPBlock', 'MLPBlock', ([], {'layers': 'self._layers', 'activation': 'self._activation', 'activation_last': 'self._activation_last', 'batch_norm': 'self._batch_norm'}), False, 'from multiml.task.keras.modules import MLPBlock\n'), (77, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'input', 'outputs': 'x'}), False, 'from tensorflow.keras.models import Model\n'), (39, 'tensorflow.keras.backend.cos', 'K.cos', (['phi'], {}), True, 'from tensorflow.keras import backend as K\n'), (40, 'tensorflow.keras.backend.sin', 'K.sin', (['phi'], {}), True, 'from tensorflow.keras import backend as K\n'), (48, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {'axis': '(2)'}), False, 'from tensorflow.keras.layers import Concatenate\n'), (67, 'tensorflow.keras.layers.Lambda', 'Lambda', (['self.mass_layer'], {'output_shape': '(1,)'}), False, 'from tensorflow.keras.layers import Lambda\n'), (34, 'tensorflow.keras.backend.clip', 'K.clip', (['tau_4vec[:, :, (0)]', '(-7.0)', '(7.0)'], {}), True, 'from tensorflow.keras import backend as K\n'), (41, 'tensorflow.keras.backend.clip', 'K.clip', (['eta', '(-5)', '(5)'], {}), True, 'from tensorflow.keras import backend as K\n')] |
UTokyo-ICEPP/multiml_htautau | 5f926c2291a55f57419aa0130d07e2a793fc7353 | from . import Tau4vec_BaseTask
class Tau4vec_ZeroTask(Tau4vec_BaseTask):
''' Tau4vec Zero task
'''
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._trainable_model = False
def build_model(self):
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Lambda
from tensorflow.keras import backend as K
from .modules import zero_layer
input_energy, input_jet = self.get_inputs()
x = K.reshape(input_jet, (-1, self._n_features))[:, 0:3] # mass is not used
x = K.reshape(x, (-1, self._njets * (self._n_features - 1)))
x = Lambda(zero_layer)(x)
x = K.reshape(x, (-1, len(self._output_var_names)))
self._model = Model(inputs=[input_energy, input_jet], outputs=[x])
self.compile_model()
| [
"tensorflow.keras.backend.reshape",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.models.Model"
] | multiml_htautau/task/keras/tau4vec_zero.py | [(21, 'tensorflow.keras.backend.reshape', 'K.reshape', (['x', '(-1, self._njets * (self._n_features - 1))'], {}), True, 'from tensorflow.keras import backend as K\n'), (26, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input_energy, input_jet]', 'outputs': '[x]'}), False, 'from tensorflow.keras.models import Model\n'), (20, 'tensorflow.keras.backend.reshape', 'K.reshape', (['input_jet', '(-1, self._n_features)'], {}), True, 'from tensorflow.keras import backend as K\n'), (23, 'tensorflow.keras.layers.Lambda', 'Lambda', (['zero_layer'], {}), False, 'from tensorflow.keras.layers import Lambda\n')] |
jbgh2/speech-denoising-wavenet | 386662527b8da69fb3314531a2a7cff087eac557 | # A Wavenet For Speech Denoising - Dario Rethage - 19.05.2017
# Util.py
# Utility functions for dealing with audio signals and training a Denoising Wavenet
import os
import numpy as np
import json
import warnings
import scipy.signal
import scipy.stats
import soundfile as sf
import tensorflow.keras as keras
def l1_l2_loss(y_true, y_pred, l1_weight, l2_weight):
loss = 0
if l1_weight != 0:
loss += l1_weight * keras.losses.mean_absolute_error(y_true, y_pred)
if l2_weight != 0:
loss += l2_weight * keras.losses.mean_squared_error(y_true, y_pred)
return loss
def compute_receptive_field_length(stacks, dilations, filter_length, target_field_length):
half_filter_length = (filter_length-1)/2
length = 0
for d in dilations:
length += d*half_filter_length
length = 2*length
length = stacks * length
length += target_field_length
return int(length)
def snr_db(rms_amplitude_A, rms_amplitude_B):
return 20.0*np.log10(rms_amplitude_A/rms_amplitude_B)
def wav_to_float(x):
try:
max_value = np.iinfo(x.dtype).max
min_value = np.iinfo(x.dtype).min
except:
max_value = np.finfo(x.dtype).max
min_value = np.finfo(x.dtype).min
x = x.astype('float64', casting='safe')
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def float_to_uint8(x):
x += 1.
x /= 2.
uint8_max_value = np.iinfo('uint8').max
x *= uint8_max_value
x = x.astype('uint8')
return x
def keras_float_to_uint8(x):
x += 1.
x /= 2.
uint8_max_value = 255
x *= uint8_max_value
return x
def linear_to_ulaw(x, u=255):
x = np.sign(x) * (np.log(1 + u * np.abs(x)) / np.log(1 + u))
return x
def keras_linear_to_ulaw(x, u=255.0):
x = keras.backend.sign(x) * (keras.backend.log(1 + u * keras.backend.abs(x)) / keras.backend.log(1 + u))
return x
def uint8_to_float(x):
max_value = np.iinfo('uint8').max
min_value = np.iinfo('uint8').min
x = x.astype('float32', casting='unsafe')
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def keras_uint8_to_float(x):
max_value = 255
min_value = 0
x -= min_value
x /= ((max_value - min_value) / 2.)
x -= 1.
return x
def ulaw_to_linear(x, u=255.0):
y = np.sign(x) * (1 / float(u)) * (((1 + float(u)) ** np.abs(x)) - 1)
return y
def keras_ulaw_to_linear(x, u=255.0):
y = keras.backend.sign(x) * (1 / u) * (((1 + u) ** keras.backend.abs(x)) - 1)
return y
def one_hot_encode(x, num_values=256):
if isinstance(x, int):
x = np.array([x])
if isinstance(x, list):
x = np.array(x)
return np.eye(num_values, dtype='uint8')[x.astype('uint8')]
def one_hot_decode(x):
return np.argmax(x, axis=-1)
def preemphasis(signal, alpha=0.95):
return np.append(signal[0], signal[1:] - alpha * signal[:-1])
def binary_encode(x, max_value):
if isinstance(x, int):
x = np.array([x])
if isinstance(x, list):
x = np.array(x)
width = np.ceil(np.log2(max_value)).astype(int)
return (((x[:, None] & (1 << np.arange(width)))) > 0).astype(int)
def get_condition_input_encode_func(representation):
if representation == 'binary':
return binary_encode
else:
return one_hot_encode
def ensure_keys_in_dict(keys, dictionary):
if all (key in dictionary for key in keys):
return True
return False
def get_subdict_from_dict(keys, dictionary):
return dict((k, dictionary[k]) for k in keys if k in dictionary)
def pretty_json_dump(values, file_path=None):
if file_path is None:
print(json.dumps(values, sort_keys=True, indent=4, separators=(',', ': ')))
else:
json.dump(values, open(file_path, 'w'), sort_keys=True, indent=4, separators=(',', ': '))
def read_wav(filename):
# Reads in a wav audio file, takes the first channel, converts the signal to float64 representation
audio_signal, sample_rate = sf.read(filename)
if audio_signal.ndim > 1:
audio_signal = audio_signal[:, 0]
if audio_signal.dtype != 'float64':
audio_signal = wav_to_float(audio_signal)
return audio_signal, sample_rate
def load_wav(wav_path, desired_sample_rate):
sequence, sample_rate = read_wav(wav_path)
sequence = ensure_sample_rate(sequence, desired_sample_rate, sample_rate)
return sequence
def write_wav(x, filename, sample_rate):
if type(x) != np.ndarray:
x = np.array(x)
with warnings.catch_warnings():
warnings.simplefilter("error")
sf.write(filename, x, sample_rate)
def ensure_sample_rate(x, desired_sample_rate, file_sample_rate):
if file_sample_rate != desired_sample_rate:
return scipy.signal.resample_poly(x, desired_sample_rate, file_sample_rate)
return x
def rms(x):
return np.sqrt(np.mean(np.square(x), axis=-1))
def normalize(x):
max_peak = np.max(np.abs(x))
return x / max_peak
def get_subsequence_with_speech_indices(full_sequence):
signal_magnitude = np.abs(full_sequence)
chunk_length = 800
chunks_energies = []
for i in range(0, len(signal_magnitude), chunk_length):
chunks_energies.append(np.mean(signal_magnitude[i:i + chunk_length]))
threshold = np.max(chunks_energies) * .1
onset_chunk_i = 0
for i in range(0, len(chunks_energies)):
if chunks_energies[i] >= threshold:
onset_chunk_i = i
break
termination_chunk_i = len(chunks_energies)
for i in range(len(chunks_energies) - 1, 0, -1):
if chunks_energies[i] >= threshold:
termination_chunk_i = i
break
num_pad_chunks = 4
onset_chunk_i = np.max((0, onset_chunk_i - num_pad_chunks))
termination_chunk_i = np.min((len(chunks_energies), termination_chunk_i + num_pad_chunks))
return [onset_chunk_i*chunk_length, (termination_chunk_i+1)*chunk_length]
def extract_subsequence_with_speech(full_sequence):
indices = get_subsequence_with_speech_indices(full_sequence)
return full_sequence[indices[0]:indices[1]]
def dir_contains_files(path):
for f in os.listdir(path):
if not f.startswith('.'):
return True
return False
| [
"tensorflow.keras.backend.sign",
"numpy.max",
"numpy.mean",
"numpy.iinfo",
"tensorflow.keras.backend.log",
"numpy.square",
"numpy.arange",
"numpy.eye",
"numpy.finfo",
"numpy.argmax",
"numpy.log",
"tensorflow.keras.backend.abs",
"numpy.append",
"numpy.log10",
"numpy.array",
"numpy.log2",
"numpy.abs",
"numpy.sign",
"tensorflow.keras.losses.mean_absolute_error",
"tensorflow.keras.losses.mean_squared_error"
] | util.py | [(121, 'numpy.argmax', 'np.argmax', (['x'], {'axis': '(-1)'}), True, 'import numpy as np\n'), (125, 'numpy.append', 'np.append', (['signal[0]', '(signal[1:] - alpha * signal[:-1])'], {}), True, 'import numpy as np\n'), (166, 'soundfile.read', 'sf.read', (['filename'], {}), True, 'import soundfile as sf\n'), (210, 'numpy.abs', 'np.abs', (['full_sequence'], {}), True, 'import numpy as np\n'), (233, 'numpy.max', 'np.max', (['(0, onset_chunk_i - num_pad_chunks)'], {}), True, 'import numpy as np\n'), (246, 'os.listdir', 'os.listdir', (['path'], {}), False, 'import os\n'), (39, 'numpy.log10', 'np.log10', (['(rms_amplitude_A / rms_amplitude_B)'], {}), True, 'import numpy as np\n'), (59, 'numpy.iinfo', 'np.iinfo', (['"""uint8"""'], {}), True, 'import numpy as np\n'), (74, 'numpy.sign', 'np.sign', (['x'], {}), True, 'import numpy as np\n'), (79, 'tensorflow.keras.backend.sign', 'keras.backend.sign', (['x'], {}), True, 'import tensorflow.keras as keras\n'), (84, 'numpy.iinfo', 'np.iinfo', (['"""uint8"""'], {}), True, 'import numpy as np\n'), (85, 'numpy.iinfo', 'np.iinfo', (['"""uint8"""'], {}), True, 'import numpy as np\n'), (114, 'numpy.array', 'np.array', (['[x]'], {}), True, 'import numpy as np\n'), (116, 'numpy.array', 'np.array', (['x'], {}), True, 'import numpy as np\n'), (117, 'numpy.eye', 'np.eye', (['num_values'], {'dtype': '"""uint8"""'}), True, 'import numpy as np\n'), (130, 'numpy.array', 'np.array', (['[x]'], {}), True, 'import numpy as np\n'), (132, 'numpy.array', 'np.array', (['x'], {}), True, 'import numpy as np\n'), (187, 'numpy.array', 'np.array', (['x'], {}), True, 'import numpy as np\n'), (189, 'warnings.catch_warnings', 'warnings.catch_warnings', ([], {}), False, 'import warnings\n'), (190, 'warnings.simplefilter', 'warnings.simplefilter', (['"""error"""'], {}), False, 'import warnings\n'), (191, 'soundfile.write', 'sf.write', (['filename', 'x', 'sample_rate'], {}), True, 'import soundfile as sf\n'), (205, 'numpy.abs', 'np.abs', (['x'], {}), True, 'import numpy as np\n'), (218, 'numpy.max', 'np.max', (['chunks_energies'], {}), True, 'import numpy as np\n'), (18, 'tensorflow.keras.losses.mean_absolute_error', 'keras.losses.mean_absolute_error', (['y_true', 'y_pred'], {}), True, 'import tensorflow.keras as keras\n'), (21, 'tensorflow.keras.losses.mean_squared_error', 'keras.losses.mean_squared_error', (['y_true', 'y_pred'], {}), True, 'import tensorflow.keras as keras\n'), (44, 'numpy.iinfo', 'np.iinfo', (['x.dtype'], {}), True, 'import numpy as np\n'), (45, 'numpy.iinfo', 'np.iinfo', (['x.dtype'], {}), True, 'import numpy as np\n'), (74, 'numpy.log', 'np.log', (['(1 + u)'], {}), True, 'import numpy as np\n'), (79, 'tensorflow.keras.backend.log', 'keras.backend.log', (['(1 + u)'], {}), True, 'import tensorflow.keras as keras\n'), (103, 'numpy.sign', 'np.sign', (['x'], {}), True, 'import numpy as np\n'), (108, 'tensorflow.keras.backend.sign', 'keras.backend.sign', (['x'], {}), True, 'import tensorflow.keras as keras\n'), (159, 'json.dumps', 'json.dumps', (['values'], {'sort_keys': '(True)', 'indent': '(4)', 'separators': "(',', ': ')"}), False, 'import json\n'), (201, 'numpy.square', 'np.square', (['x'], {}), True, 'import numpy as np\n'), (216, 'numpy.mean', 'np.mean', (['signal_magnitude[i:i + chunk_length]'], {}), True, 'import numpy as np\n'), (47, 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), True, 'import numpy as np\n'), (48, 'numpy.finfo', 'np.finfo', (['x.dtype'], {}), True, 'import numpy as np\n'), (103, 'numpy.abs', 'np.abs', (['x'], {}), True, 'import numpy as np\n'), (108, 'tensorflow.keras.backend.abs', 'keras.backend.abs', (['x'], {}), True, 'import tensorflow.keras as keras\n'), (133, 'numpy.log2', 'np.log2', (['max_value'], {}), True, 'import numpy as np\n'), (74, 'numpy.abs', 'np.abs', (['x'], {}), True, 'import numpy as np\n'), (79, 'tensorflow.keras.backend.abs', 'keras.backend.abs', (['x'], {}), True, 'import tensorflow.keras as keras\n'), (134, 'numpy.arange', 'np.arange', (['width'], {}), True, 'import numpy as np\n')] |
Knarik1/transformers | c2a7d7280250addae38a49c31a57ddd897be2065 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 RoFormer model. """
import math
from typing import Dict, Optional, Tuple, Union
import numpy as np
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFBaseModelOutputWithPooling,
TFCausalLMOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFCausalLanguageModelingLoss,
TFMaskedLanguageModelingLoss,
TFModelInputType,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_roformer import RoFormerConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "junnyu/roformer_chinese_base"
_CONFIG_FOR_DOC = "RoFormerConfig"
_TOKENIZER_FOR_DOC = "RoFormerTokenizer"
TF_ROFORMER_PRETRAINED_MODEL_ARCHIVE_LIST = [
"junnyu/roformer_chinese_small",
"junnyu/roformer_chinese_base",
"junnyu/roformer_chinese_char_small",
"junnyu/roformer_chinese_char_base",
"junnyu/roformer_small_discriminator",
"junnyu/roformer_small_generator"
# See all RoFormer models at https://huggingface.co/models?filter=roformer
]
class TFRoFormerSinusoidalPositionalEmbedding(tf.keras.layers.Layer):
"""This module produces sinusoidal positional embeddings of any length."""
def __init__(self, num_positions: int, embedding_dim: int, **kwargs):
super().__init__(**kwargs)
if embedding_dim % 2 != 0:
raise NotImplementedError(f"odd embedding_dim {embedding_dim} not supported")
self.embedding_dim = embedding_dim
self.num_positions = num_positions
def build(self, input_shape: tf.TensorShape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
weight = self._init_weight(self.num_positions, self.embedding_dim)
self.weight = self.add_weight(
name="embeddings",
shape=[self.num_positions, self.embedding_dim],
)
weight = tf.cast(weight, dtype=self.weight.dtype)
self.weight.assign(weight)
super().build(input_shape)
@staticmethod
def _init_weight(n_pos: int, dim: int):
"""
Identical to the XLM create_sinusoidal_embeddings except features are not interleaved. The cos features are in
the 2nd half of the vector. [dim // 2:]
"""
position_enc = np.array(
[[pos / np.power(10000, 2 * (j // 2) / dim) for j in range(dim)] for pos in range(n_pos)]
)
table = np.zeros_like(position_enc)
# index 0 is all zero
table[:, 0 : dim // 2] = np.sin(position_enc[:, 0::2])
table[:, dim // 2 :] = np.cos(position_enc[:, 1::2])
# convert to tensor
table = tf.convert_to_tensor(table)
tf.stop_gradient(table)
return table
def call(self, input_shape: tf.TensorShape, past_key_values_length: int = 0):
"""Input is expected to be of size [bsz x seqlen]."""
bsz, seq_len = input_shape[:2]
positions = tf.range(past_key_values_length, seq_len + past_key_values_length, delta=1, name="range")
return tf.gather(self.weight, positions)
class TFRoFormerEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
def call(
self,
input_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (:obj:`tf.Tensor`): output embedding tensor.
"""
assert not (input_ids is None and inputs_embeds is None)
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFRoFormerSelfAttention(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number "
f"of attention heads ({config.num_attention_heads})"
)
self.num_attention_heads = config.num_attention_heads
self.attention_head_size = int(config.hidden_size / config.num_attention_heads)
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.sqrt_att_head_size = math.sqrt(self.attention_head_size)
self.query = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
units=self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.dropout = tf.keras.layers.Dropout(rate=config.attention_probs_dropout_prob)
self.rotary_value = config.rotary_value
def transpose_for_scores(self, tensor: tf.Tensor, batch_size: int) -> tf.Tensor:
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
tensor = tf.reshape(tensor=tensor, shape=(batch_size, -1, self.num_attention_heads, self.attention_head_size))
# Transpose the tensor from [batch_size, seq_length, num_attention_heads, attention_head_size] to [batch_size, num_attention_heads, seq_length, attention_head_size]
return tf.transpose(tensor, perm=[0, 2, 1, 3])
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
sinusoidal_pos: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(inputs=hidden_states)
mixed_key_layer = self.key(inputs=hidden_states)
mixed_value_layer = self.value(inputs=hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
value_layer = self.transpose_for_scores(mixed_value_layer, batch_size)
if sinusoidal_pos is not None:
if self.rotary_value:
query_layer, key_layer, value_layer = self.apply_rotary_position_embeddings(
sinusoidal_pos, query_layer, key_layer, value_layer
)
else:
query_layer, key_layer = self.apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer)
# Take the dot product between "query" and "key" to get the raw attention scores.
# (batch size, num_heads, seq_len_q, seq_len_k)
attention_scores = tf.matmul(query_layer, key_layer, transpose_b=True)
dk = tf.cast(self.sqrt_att_head_size, dtype=attention_scores.dtype)
attention_scores = tf.divide(attention_scores, dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFRoFormerModel call() function)
attention_scores = tf.add(attention_scores, attention_mask)
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(logits=attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(inputs=attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = tf.multiply(attention_probs, head_mask)
attention_output = tf.matmul(attention_probs, value_layer)
attention_output = tf.transpose(attention_output, perm=[0, 2, 1, 3])
# (batch_size, seq_len_q, all_head_size)
attention_output = tf.reshape(tensor=attention_output, shape=(batch_size, -1, self.all_head_size))
outputs = (attention_output, attention_probs) if output_attentions else (attention_output,)
return outputs
@staticmethod
def apply_rotary_position_embeddings(sinusoidal_pos, query_layer, key_layer, value_layer=None):
# https://kexue.fm/archives/8265
# sin [batch_size, num_heads, sequence_length, embed_size_per_head//2]
# cos [batch_size, num_heads, sequence_length, embed_size_per_head//2]
sin, cos = tf.split(sinusoidal_pos, num_or_size_splits=2, axis=-1)
# sin [θ0,θ1,θ2......θd/2-1]-> sin_pos [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
# cos [θ0,θ1,θ2......θd/2-1]-> cos_pos [θ0,θ0,θ1,θ1,θ2,θ2......θd/2-1,θd/2-1]
sin_pos = tf.repeat(sin, 2, axis=-1)
cos_pos = tf.repeat(cos, 2, axis=-1)
# rotate_half_query_layer [-q1,q0,-q3,q2......,-qd-1,qd-2]
rotate_half_query_layer = tf.stack([-query_layer[..., 1::2], query_layer[..., ::2]], axis=-1)
rotate_half_query_layer = tf.reshape(rotate_half_query_layer, shape_list(query_layer))
query_layer = query_layer * cos_pos + rotate_half_query_layer * sin_pos
# rotate_half_key_layer [-k1,k0,-k3,k2......,-kd-1,kd-2]
rotate_half_key_layer = tf.stack([-key_layer[..., 1::2], key_layer[..., ::2]], axis=-1)
rotate_half_key_layer = tf.reshape(rotate_half_key_layer, shape_list(key_layer))
key_layer = key_layer * cos_pos + rotate_half_key_layer * sin_pos
if value_layer is not None:
# rotate_half_value_layer [-v1,v0,-v3,v2......,-vd-1,vd-2]
rotate_half_value_layer = tf.stack([-value_layer[..., 1::2], value_layer[..., ::2]], axis=-1)
rotate_half_value_layer = tf.reshape(rotate_half_value_layer, shape_list(value_layer))
value_layer = value_layer * cos_pos + rotate_half_value_layer * sin_pos
return query_layer, key_layer, value_layer
return query_layer, key_layer
# Copied from transformers.models.bert.modeling_tf_bert.TFBertSelfOutput with Bert->RoFormer
class TFRoFormerSelfOutput(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
class TFRoFormerAttention(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFRoFormerSelfAttention(config, name="self")
self.dense_output = TFRoFormerSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(
self,
input_tensor: tf.Tensor,
attention_mask: tf.Tensor,
sinusoidal_pos: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
self_outputs = self.self_attention(
hidden_states=input_tensor,
attention_mask=attention_mask,
sinusoidal_pos=sinusoidal_pos,
head_mask=head_mask,
output_attentions=output_attentions,
training=training,
)
attention_output = self.dense_output(
hidden_states=self_outputs[0], input_tensor=input_tensor, training=training
)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
# Copied from transformers.models.bert.modeling_tf_bert.TFBertIntermediate with Bert->RoFormer
class TFRoFormerIntermediate(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertOutput with Bert->RoFormer
class TFRoFormerOutput(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def call(self, hidden_states: tf.Tensor, input_tensor: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.LayerNorm(inputs=hidden_states + input_tensor)
return hidden_states
class TFRoFormerLayer(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.attention = TFRoFormerAttention(config, name="attention")
self.intermediate = TFRoFormerIntermediate(config, name="intermediate")
self.roformer_output = TFRoFormerOutput(config, name="output")
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
sinusoidal_pos: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
training: bool = False,
) -> Tuple[tf.Tensor]:
attention_outputs = self.attention(
input_tensor=hidden_states,
attention_mask=attention_mask,
sinusoidal_pos=sinusoidal_pos,
head_mask=head_mask,
output_attentions=output_attentions,
training=training,
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(hidden_states=attention_output)
layer_output = self.roformer_output(
hidden_states=intermediate_output, input_tensor=attention_output, training=training
)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFRoFormerEncoder(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.embed_positions = TFRoFormerSinusoidalPositionalEmbedding(
config.max_position_embeddings,
config.hidden_size // config.num_attention_heads,
name="embed_positions",
)
self.layer = [TFRoFormerLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states: tf.Tensor,
attention_mask: tf.Tensor,
head_mask: tf.Tensor,
output_attentions: bool,
output_hidden_states: bool,
return_dict: bool,
training: bool = False,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
# [sequence_length, embed_size_per_head] -> [batch_size, num_heads, sequence_length, embed_size_per_head]
sinusoidal_pos = self.embed_positions(shape_list(hidden_states)[:-1])[None, None, :, :]
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states=hidden_states,
attention_mask=attention_mask,
sinusoidal_pos=sinusoidal_pos,
head_mask=head_mask[i],
output_attentions=output_attentions,
training=training,
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFRoFormerPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
units=config.embedding_size,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(inputs=hidden_states)
return hidden_states
class TFRoFormerLMPredictionHead(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.transform = TFRoFormerPredictionHeadTransform(config, name="transform")
# The output weights are the same as the input embeddings, but there is
# an output-only bias for each token.
self.input_embeddings = input_embeddings
def build(self, input_shape: tf.TensorShape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self) -> tf.keras.layers.Layer:
return self.input_embeddings
def set_output_embeddings(self, value: tf.Variable):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self) -> Dict[str, tf.Variable]:
return {"bias": self.bias}
def set_bias(self, value: tf.Variable):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states: tf.Tensor) -> tf.Tensor:
hidden_states = self.transform(hidden_states=hidden_states)
seq_length = shape_list(hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
# Copied from transformers.models.bert.modeling_tf_bert.TFBertMLMHead with Bert->RoFormer
class TFRoFormerMLMHead(tf.keras.layers.Layer):
def __init__(self, config: RoFormerConfig, input_embeddings: tf.keras.layers.Layer, **kwargs):
super().__init__(**kwargs)
self.predictions = TFRoFormerLMPredictionHead(config, input_embeddings, name="predictions")
def call(self, sequence_output: tf.Tensor) -> tf.Tensor:
prediction_scores = self.predictions(hidden_states=sequence_output)
return prediction_scores
@keras_serializable
class TFRoFormerMainLayer(tf.keras.layers.Layer):
config_class = RoFormerConfig
def __init__(self, config: RoFormerConfig, add_pooling_layer: bool = True, **kwargs):
super().__init__(**kwargs)
self.config = config
self.embeddings = TFRoFormerEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFRoFormerEncoder(config, name="encoder")
def get_input_embeddings(self) -> tf.keras.layers.Layer:
return self.embeddings
def set_input_embeddings(self, value: tf.Variable):
self.embeddings.weight = value
self.embeddings.vocab_size = shape_list(value)[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: bool = False,
**kwargs,
) -> Union[TFBaseModelOutput, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(dims=input_shape, value=1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(dims=input_shape, value=0)
embedding_output = self.embeddings(
input_ids=inputs["input_ids"],
token_type_ids=inputs["token_type_ids"],
inputs_embeds=inputs["inputs_embeds"],
training=inputs["training"],
)
if hasattr(self, "embeddings_project"):
embedding_output = self.embeddings_project(embedding_output, training=inputs["training"])
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(inputs["attention_mask"], (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype=embedding_output.dtype)
one_cst = tf.constant(1.0, dtype=embedding_output.dtype)
ten_thousand_cst = tf.constant(-10000.0, dtype=embedding_output.dtype)
extended_attention_mask = tf.multiply(tf.subtract(one_cst, extended_attention_mask), ten_thousand_cst)
# Prepare head mask if needed
# 1.0 in head_mask indicate we keep the head
# attention_probs has shape bsz x n_heads x N x N
# input head_mask has shape [num_heads] or [num_hidden_layers x num_heads]
# and head_mask is converted to shape [num_hidden_layers x batch x num_heads x seq_length x seq_length]
if inputs["head_mask"] is not None:
raise NotImplementedError
else:
inputs["head_mask"] = [None] * self.config.num_hidden_layers
encoder_outputs = self.encoder(
hidden_states=embedding_output,
attention_mask=extended_attention_mask,
head_mask=inputs["head_mask"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = encoder_outputs[0]
if not inputs["return_dict"]:
return (sequence_output,) + encoder_outputs[1:]
return TFBaseModelOutput(
last_hidden_state=sequence_output,
hidden_states=encoder_outputs.hidden_states,
attentions=encoder_outputs.attentions,
)
class TFRoFormerPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = RoFormerConfig
base_model_prefix = "roformer"
ROFORMER_START_DOCSTRING = r"""
This model inherits from :class:`~transformers.TFPreTrainedModel`. Check the superclass documentation for the
generic methods the library implements for all its model (such as downloading or saving, resizing the input
embeddings, pruning heads etc.)
This model is also a `tf.keras.Model <https://www.tensorflow.org/api_docs/python/tf/keras/Model>`__ subclass. Use
it as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage
and behavior.
.. note::
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using :meth:`tf.keras.Model.fit` method which currently requires having all
the tensors in the first argument of the model call function: :obj:`model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in
the first positional argument :
- a single Tensor with :obj:`input_ids` only and nothing else: :obj:`model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
:obj:`model([input_ids, attention_mask])` or :obj:`model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
:obj:`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
Args:
config (:class:`~transformers.RoFormerConfig`): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the :meth:`~transformers.PreTrainedModel.from_pretrained` method to load the model
weights.
"""
ROFORMER_INPUTS_DOCSTRING = r"""
Args:
input_ids (:obj:`np.ndarray`, :obj:`tf.Tensor`, :obj:`List[tf.Tensor]` :obj:`Dict[str, tf.Tensor]` or :obj:`Dict[str, np.ndarray]` and each example must have the shape :obj:`({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using :class:`~transformers.RoFormerTokenizer`. See
:func:`transformers.PreTrainedTokenizer.__call__` and :func:`transformers.PreTrainedTokenizer.encode` for
details.
`What are input IDs? <../glossary.html#input-ids>`__
attention_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Mask to avoid performing attention on padding token indices. Mask values selected in ``[0, 1]``:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
`What are attention masks? <../glossary.html#attention-mask>`__
token_type_ids (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0})`, `optional`):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in ``[0,
1]``:
- 0 corresponds to a `sentence A` token,
- 1 corresponds to a `sentence B` token.
`What are token type IDs? <../glossary.html#token-type-ids>`__
head_mask (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`(num_heads,)` or :obj:`(num_layers, num_heads)`, `optional`):
Mask to nullify selected heads of the self-attention modules. Mask values selected in ``[0, 1]``:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (:obj:`np.ndarray` or :obj:`tf.Tensor` of shape :obj:`({0}, hidden_size)`, `optional`):
Optionally, instead of passing :obj:`input_ids` you can choose to directly pass an embedded representation.
This is useful if you want more control over how to convert :obj:`input_ids` indices into associated
vectors than the model's internal embedding lookup matrix.
output_attentions (:obj:`bool`, `optional`):
Whether or not to return the attentions tensors of all attention layers. See ``attentions`` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (:obj:`bool`, `optional`):
Whether or not to return the hidden states of all layers. See ``hidden_states`` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (:obj:`bool`, `optional`):
Whether or not to return a :class:`~transformers.file_utils.ModelOutput` instead of a plain tuple. This
argument can be used in eager mode, in graph mode the value will always be set to True.
training (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare RoFormer Model transformer outputing raw hidden-states without any specific head on top.",
ROFORMER_START_DOCSTRING,
)
class TFRoFormerModel(TFRoFormerPreTrainedModel):
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.roformer = TFRoFormerMainLayer(config, name="roformer")
@add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutputWithPooling,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFBaseModelOutputWithPooling, Tuple[tf.Tensor]]:
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output: TFBaseModelOutput) -> TFBaseModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
@add_start_docstrings("""RoFormer Model with a `language modeling` head on top. """, ROFORMER_START_DOCSTRING)
class TFRoFormerForMaskedLM(TFRoFormerPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
if config.is_decoder:
logger.warning(
"If you want to use `TFRoFormerForMaskedLM` make sure `config.is_decoder=False` for "
"bi-directional self-attention."
)
self.roformer = TFRoFormerMainLayer(config, name="roformer")
self.mlm = TFRoFormerMLMHead(config, input_embeddings=self.roformer.embeddings, name="mlm___cls")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.mlm.predictions
@add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMaskedLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the masked language modeling loss. Indices should be in ``[-100, 0, ...,
config.vocab_size]`` (see ``input_ids`` docstring) Tokens with indices set to ``-100`` are ignored
(masked), the loss is only computed for the tokens with labels in ``[0, ..., config.vocab_size]``
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
prediction_scores = self.mlm(sequence_output=sequence_output, training=inputs["training"])
loss = (
None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=prediction_scores)
)
if not inputs["return_dict"]:
output = (prediction_scores,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFMaskedLMOutput) -> TFMaskedLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""RoFormer Model with a `language modeling` head on top for CLM fine-tuning. """, ROFORMER_START_DOCSTRING
)
class TFRoFormerForCausalLM(TFRoFormerPreTrainedModel, TFCausalLanguageModelingLoss):
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
if not config.is_decoder:
logger.warning("If you want to use `TFRoFormerForCausalLM` as a standalone, add `is_decoder=True.`")
self.roformer = TFRoFormerMainLayer(config, name="roformer")
self.mlm = TFRoFormerMLMHead(config, input_embeddings=self.roformer.embeddings, name="mlm___cls")
def get_lm_head(self) -> tf.keras.layers.Layer:
return self.mlm.predictions
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFCausalLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFCausalLMOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the cross entropy classification loss. Indices should be in ``[0, ...,
config.vocab_size - 1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.mlm(sequence_output=sequence_output, training=inputs["training"])
loss = None
if inputs["labels"] is not None:
# shift labels to the left and cut last logit token
logits = logits[:, :-1]
labels = inputs["labels"][:, 1:]
loss = self.compute_loss(labels=labels, logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFCausalLMOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFCausalLMOutput) -> TFCausalLMOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFCausalLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFRoFormerClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
self.dense = tf.keras.layers.Dense(
units=config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.out_proj = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
if isinstance(config.hidden_act, str):
self.classifier_act_fn = get_tf_activation(config.hidden_act)
else:
self.classifier_act_fn = config.hidden_act
def call(self, hidden_states: tf.Tensor, training: bool = False) -> tf.Tensor:
hidden_states = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.dense(inputs=hidden_states)
hidden_states = self.classifier_act_fn(hidden_states)
hidden_states = self.dropout(inputs=hidden_states, training=training)
hidden_states = self.out_proj(hidden_states)
return hidden_states
@add_start_docstrings(
"""
RoFormer Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
ROFORMER_START_DOCSTRING,
)
class TFRoFormerForSequenceClassification(TFRoFormerPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roformer = TFRoFormerMainLayer(config, name="roformer")
self.classifier = TFRoFormerClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFSequenceClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the sequence classification/regression loss. Indices should be in :obj:`[0, ...,
config.num_labels - 1]`. If :obj:`config.num_labels == 1` a regression loss is computed (Mean-Square loss),
If :obj:`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(hidden_states=outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFSequenceClassifierOutput) -> TFSequenceClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
RoFormer Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
ROFORMER_START_DOCSTRING,
)
class TFRoFormerForMultipleChoice(TFRoFormerPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.roformer = TFRoFormerMainLayer(config, name="roformer")
self.sequence_summary = TFSequenceSummary(config, config.initializer_range, name="sequence_summary")
self.classifier = tf.keras.layers.Dense(
units=1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.constant(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
ROFORMER_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFMultipleChoiceModelOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for computing the multiple choice classification loss. Indices should be in ``[0, ...,
num_choices]`` where :obj:`num_choices` is the size of the second dimension of the input tensors. (See
:obj:`input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = (
tf.reshape(tensor=inputs["input_ids"], shape=(-1, seq_length)) if inputs["input_ids"] is not None else None
)
flat_attention_mask = (
tf.reshape(tensor=inputs["attention_mask"], shape=(-1, seq_length))
if inputs["attention_mask"] is not None
else None
)
flat_token_type_ids = (
tf.reshape(tensor=inputs["token_type_ids"], shape=(-1, seq_length))
if inputs["token_type_ids"] is not None
else None
)
flat_inputs_embeds = (
tf.reshape(tensor=inputs["inputs_embeds"], shape=(-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.roformer(
input_ids=flat_input_ids,
attention_mask=flat_attention_mask,
token_type_ids=flat_token_type_ids,
head_mask=inputs["head_mask"],
inputs_embeds=flat_inputs_embeds,
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(inputs=outputs[0], training=inputs["training"])
logits = self.classifier(inputs=logits)
reshaped_logits = tf.reshape(tensor=logits, shape=(-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs: Dict[str, tf.Tensor]) -> TFMultipleChoiceModelOutput:
output = self.call(input_ids=inputs)
return self.serving_output(output)
def serving_output(self, output: TFMultipleChoiceModelOutput) -> TFMultipleChoiceModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
RoFormer Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
ROFORMER_START_DOCSTRING,
)
class TFRoFormerForTokenClassification(TFRoFormerPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roformer = TFRoFormerMainLayer(config, name="roformer")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
self.classifier = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
labels: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFTokenClassifierOutput, Tuple[tf.Tensor]]:
r"""
labels (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size, sequence_length)`, `optional`):
Labels for computing the token classification loss. Indices should be in ``[0, ..., config.num_labels -
1]``.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(inputs=sequence_output, training=inputs["training"])
logits = self.classifier(inputs=sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(labels=inputs["labels"], logits=logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFTokenClassifierOutput) -> TFTokenClassifierOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
RoFormer Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
ROFORMER_START_DOCSTRING,
)
class TFRoFormerForQuestionAnswering(TFRoFormerPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config: RoFormerConfig, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.roformer = TFRoFormerMainLayer(config, name="roformer")
self.qa_outputs = tf.keras.layers.Dense(
units=config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(ROFORMER_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids: Optional[TFModelInputType] = None,
attention_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
token_type_ids: Optional[Union[np.ndarray, tf.Tensor]] = None,
head_mask: Optional[Union[np.ndarray, tf.Tensor]] = None,
inputs_embeds: Optional[Union[np.ndarray, tf.Tensor]] = None,
output_attentions: Optional[bool] = None,
output_hidden_states: Optional[bool] = None,
return_dict: Optional[bool] = None,
start_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
end_positions: Optional[Union[np.ndarray, tf.Tensor]] = None,
training: Optional[bool] = False,
**kwargs,
) -> Union[TFQuestionAnsweringModelOutput, Tuple[tf.Tensor]]:
r"""
start_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
end_positions (:obj:`tf.Tensor` or :obj:`np.ndarray` of shape :obj:`(batch_size,)`, `optional`):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (:obj:`sequence_length`). Position outside of the
sequence are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.roformer(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(inputs=sequence_output)
start_logits, end_logits = tf.split(value=logits, num_or_size_splits=2, axis=-1)
start_logits = tf.squeeze(input=start_logits, axis=-1)
end_logits = tf.squeeze(input=end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels=labels, logits=(start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[2:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output: TFQuestionAnsweringModelOutput) -> TFQuestionAnsweringModelOutput:
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
| [
"tensorflow.convert_to_tensor",
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.stack",
"tensorflow.cast",
"numpy.zeros_like",
"tensorflow.squeeze",
"numpy.sin",
"tensorflow.stop_gradient",
"tensorflow.gather",
"tensorflow.divide",
"tensorflow.subtract",
"tensorflow.add",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.fill",
"numpy.power",
"tensorflow.keras.layers.Dense",
"tensorflow.split",
"tensorflow.nn.bias_add",
"tensorflow.nn.softmax",
"tensorflow.transpose",
"tensorflow.constant",
"tensorflow.range",
"tensorflow.multiply",
"tensorflow.reshape",
"numpy.cos",
"tensorflow.repeat",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
] | src/transformers/models/roformer/modeling_tf_roformer.py | [(101, 'tensorflow.cast', 'tf.cast', (['weight'], {'dtype': 'self.weight.dtype'}), True, 'import tensorflow as tf\n'), (116, 'numpy.zeros_like', 'np.zeros_like', (['position_enc'], {}), True, 'import numpy as np\n'), (118, 'numpy.sin', 'np.sin', (['position_enc[:, 0::2]'], {}), True, 'import numpy as np\n'), (119, 'numpy.cos', 'np.cos', (['position_enc[:, 1::2]'], {}), True, 'import numpy as np\n'), (121, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['table'], {}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.stop_gradient', 'tf.stop_gradient', (['table'], {}), True, 'import tensorflow as tf\n'), (129, 'tensorflow.range', 'tf.range', (['past_key_values_length', '(seq_len + past_key_values_length)'], {'delta': '(1)', 'name': '"""range"""'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.gather', 'tf.gather', (['self.weight', 'positions'], {}), True, 'import tensorflow as tf\n'), (143, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (144, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.hidden_dropout_prob'}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.token_type_embeddings', 'indices': 'token_type_ids'}), True, 'import tensorflow as tf\n'), (208, 'math.sqrt', 'math.sqrt', (['self.attention_head_size'], {}), False, 'import math\n'), (219, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.attention_probs_dropout_prob'}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'tensor', 'shape': '(batch_size, -1, self.num_attention_heads, self.attention_head_size)'}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.transpose', 'tf.transpose', (['tensor'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.matmul', 'tf.matmul', (['query_layer', 'key_layer'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (257, 'tensorflow.cast', 'tf.cast', (['self.sqrt_att_head_size'], {'dtype': 'attention_scores.dtype'}), True, 'import tensorflow as tf\n'), (258, 'tensorflow.divide', 'tf.divide', (['attention_scores', 'dk'], {}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.nn.softmax', 'tf.nn.softmax', ([], {'logits': 'attention_scores', 'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (275, 'tensorflow.matmul', 'tf.matmul', (['attention_probs', 'value_layer'], {}), True, 'import tensorflow as tf\n'), (276, 'tensorflow.transpose', 'tf.transpose', (['attention_output'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'attention_output', 'shape': '(batch_size, -1, self.all_head_size)'}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.split', 'tf.split', (['sinusoidal_pos'], {'num_or_size_splits': '(2)', 'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (292, 'tensorflow.repeat', 'tf.repeat', (['sin', '(2)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (293, 'tensorflow.repeat', 'tf.repeat', (['cos', '(2)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (295, 'tensorflow.stack', 'tf.stack', (['[-query_layer[(...), 1::2], query_layer[(...), ::2]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (299, 'tensorflow.stack', 'tf.stack', (['[-key_layer[(...), 1::2], key_layer[(...), ::2]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (319, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (320, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.hidden_dropout_prob'}), True, 'import tensorflow as tf\n'), (394, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (395, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.hidden_dropout_prob'}), True, 'import tensorflow as tf\n'), (510, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (555, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'hidden_states', 'shape': '[-1, self.embedding_size]'}), True, 'import tensorflow as tf\n'), (556, 'tensorflow.matmul', 'tf.matmul', ([], {'a': 'hidden_states', 'b': 'self.input_embeddings.weight', 'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (557, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'hidden_states', 'shape': '[-1, seq_length, self.vocab_size]'}), True, 'import tensorflow as tf\n'), (558, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', ([], {'value': 'hidden_states', 'bias': 'self.bias'}), True, 'import tensorflow as tf\n'), (662, 'tensorflow.reshape', 'tf.reshape', (["inputs['attention_mask']", '(input_shape[0], 1, 1, input_shape[1])'], {}), True, 'import tensorflow as tf\n'), (669, 'tensorflow.cast', 'tf.cast', (['extended_attention_mask'], {'dtype': 'embedding_output.dtype'}), True, 'import tensorflow as tf\n'), (670, 'tensorflow.constant', 'tf.constant', (['(1.0)'], {'dtype': 'embedding_output.dtype'}), True, 'import tensorflow as tf\n'), (671, 'tensorflow.constant', 'tf.constant', (['(-10000.0)'], {'dtype': 'embedding_output.dtype'}), True, 'import tensorflow as tf\n'), (1067, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.hidden_dropout_prob'}), True, 'import tensorflow as tf\n'), (1289, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'logits', 'shape': '(-1, num_choices)'}), True, 'import tensorflow as tf\n'), (1339, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.hidden_dropout_prob'}), True, 'import tensorflow as tf\n'), (1498, 'tensorflow.split', 'tf.split', ([], {'value': 'logits', 'num_or_size_splits': '(2)', 'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (1499, 'tensorflow.squeeze', 'tf.squeeze', ([], {'input': 'start_logits', 'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (1500, 'tensorflow.squeeze', 'tf.squeeze', ([], {'input': 'end_logits', 'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.name_scope', 'tf.name_scope', (['"""word_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.name_scope', 'tf.name_scope', (['"""token_type_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.weight', 'indices': 'input_ids'}), True, 'import tensorflow as tf\n'), (185, 'tensorflow.fill', 'tf.fill', ([], {'dims': 'input_shape', 'value': '(0)'}), True, 'import tensorflow as tf\n'), (262, 'tensorflow.add', 'tf.add', (['attention_scores', 'attention_mask'], {}), True, 'import tensorflow as tf\n'), (273, 'tensorflow.multiply', 'tf.multiply', (['attention_probs', 'head_mask'], {}), True, 'import tensorflow as tf\n'), (304, 'tensorflow.stack', 'tf.stack', (['[-value_layer[(...), 1::2], value_layer[(...), ::2]]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (587, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.hidden_size'], {'name': '"""embeddings_project"""'}), True, 'import tensorflow as tf\n'), (643, 'tensorflow.fill', 'tf.fill', ([], {'dims': 'input_shape', 'value': '(1)'}), True, 'import tensorflow as tf\n'), (646, 'tensorflow.fill', 'tf.fill', ([], {'dims': 'input_shape', 'value': '(0)'}), True, 'import tensorflow as tf\n'), (672, 'tensorflow.subtract', 'tf.subtract', (['one_cst', 'extended_attention_mask'], {}), True, 'import tensorflow as tf\n'), (863, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (864, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (957, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (958, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1052, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1053, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1172, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1173, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1204, 'tensorflow.constant', 'tf.constant', (['MULTIPLE_CHOICE_DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (1259, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': "inputs['input_ids']", 'shape': '(-1, seq_length)'}), True, 'import tensorflow as tf\n'), (1262, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': "inputs['attention_mask']", 'shape': '(-1, seq_length)'}), True, 'import tensorflow as tf\n'), (1267, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': "inputs['token_type_ids']", 'shape': '(-1, seq_length)'}), True, 'import tensorflow as tf\n'), (1319, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1320, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1413, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1414, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1521, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1522, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1307, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (1308, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""attention_mask"""'}), True, 'import tensorflow as tf\n'), (1309, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""token_type_ids"""'}), True, 'import tensorflow as tf\n'), (114, 'numpy.power', 'np.power', (['(10000)', '(2 * (j // 2) / dim)'], {}), True, 'import numpy as np\n')] |
manhhv87/densenet_bottleneck | fd08eb88514dacaff1bcec8bc52a77ea56ab72c7 | import argparse
import os
from pathlib import Path
import numpy as np
import tensorflow as tf
from sklearn.model_selection import StratifiedKFold
from finetuning.utils import ecg_feature_extractor, train_test_split
from transplant.evaluation import auc, f1, multi_f1, CustomCheckpoint
from transplant.utils import (create_predictions_frame, load_pkl, is_multiclass)
def _create_dataset_from_data(data):
"""
input: data = {'x': x,
'y': labels.to_numpy(),
'record_ids': labels.index.to_numpy(),
'classes': labels.columns.to_numpy()}
return: data and label
"""
return tf.data.Dataset.from_tensor_slices((data['x'], data['y']))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--job-dir', type=Path, required=True, help='Job output directory.')
parser.add_argument('--train', type=Path, required=True, help='Path to the train file.')
parser.add_argument('--val', type=Path, help='Path to the validation file.\n'
'Overrides --val-size.')
parser.add_argument('--test', type=Path, help='Path to the test file.')
parser.add_argument('--weights-file', type=Path, help='Path to pretrained weights or a checkpoint of the model.')
parser.add_argument('--val-size', type=float, default=None,
help='Size of the validation set or proportion of the train set.')
parser.add_argument('--subset', type=float, default=None, help='Size of a subset of the train set '
'or proportion of the train set.')
parser.add_argument('--batch-size', type=int, default=32, help='Batch size.')
parser.add_argument('--val-metric', default='loss',
help='Validation metric used to find the best model at each epoch. Supported metrics are:'
'`loss`, `acc`, `f1`, `auc`.')
parser.add_argument('--channel', type=int, default=None, help='Use only the selected channel. '
'By default use all available channels.')
parser.add_argument('--epochs', type=int, default=1, help='Number of epochs.')
parser.add_argument('--seed', type=int, default=None, help='Random state.')
parser.add_argument('--verbose', action='store_true', help='Show debug messages.')
args, _ = parser.parse_known_args()
if args.val_metric not in ['loss', 'acc', 'f1', 'auc']:
raise ValueError('Unknown metric: {}'.format(args.val_metric))
os.makedirs(name=str(args.job_dir), exist_ok=True)
print('Creating working directory in {}'.format(args.job_dir))
seed = args.seed or np.random.randint(2 ** 16)
print('Setting random state {}'.format(seed))
np.random.seed(seed)
# Không sử dụng val file riêng biệt mà chia val từ train set
if not args.val and args.val_size:
if args.val_size >= 1: # Lấy theo số lượng của patients, nếu <= 1 thì đó là theo tỷ lệ của train set
args.val_size = int(args.val_size)
# Tiếp tục chia train set sau khi đã chia thành train và val set
# Lớn hơn hoặc bằng 1 thì sẽ là lấy theo số lượng patient, nếu <= 1 thì đó là theo tỷ lệ của train set
if args.subset and args.subset >= 1:
args.subset = int(args.subset)
print('Loading train data from {} ...'.format(args.train))
train = load_pkl(file=str(args.train))
if args.val: # Loading val set từ val file
print('Loading validation data from {} ...'.format(args.val))
val = load_pkl(file=str(args.val))
elif args.val_size: # Chia tỷ lệ val set từ train set mà không phải là load từ file
original_train_size = len(train['x']) # Kích thước của toàn bộ dữ liệu dataset
train, val = train_test_split(train, test_size=args.val_size, stratify=train['y']) # Chia thành train và val set
new_train_size = len(train['x']) # Trả về kích thước của train set mới
new_val_size = len(val['x']) # trả về kích thước của val set mới
print('Split data into train {:.2%} and validation {:.2%}'.format(
new_train_size / original_train_size, new_val_size / original_train_size))
else: # Không sử dụng val set
val = None
if args.test: # Sử dụng test set file riêng biệt
print('Loading test data from {} ...'.format(args.test))
test = load_pkl(str(args.test))
else: # Không sử dụng test set
test = None
if args.subset: # Tiếp tục chia train set sau khi đã chia train dataset ban đầu thành new train set và val set
original_train_size = len(train['x']) # Trả về kích thước của train set
train, _ = train_test_split(train, train_size=args.subset, stratify=train['y']) # Trả về new train set
new_train_size = len(train['x']) # Trả về kích thước của new train set
print('Using only {:.2%} of train data'.format(new_train_size / original_train_size))
if args.channel is not None:
train['x'] = train['x'][:, :, args.channel:args.channel + 1]
if val:
val['x'] = val['x'][:, :, args.channel:args.channel + 1]
if test:
test['x'] = test['x'][:, :, args.channel:args.channel + 1]
print('Train data shape:', train['x'].shape)
train_data = _create_dataset_from_data(train).shuffle(len(train['x'])).batch(args.batch_size)
val_data = _create_dataset_from_data(val).batch(args.batch_size) if val else None
test_data = _create_dataset_from_data(test).batch(args.batch_size) if test else None
strategy = tf.distribute.MirroredStrategy()
with strategy.scope():
print('Building model ...')
num_classes = len(train['classes'])
if is_multiclass(train['y']):
activation = 'sigmoid'
loss = tf.keras.losses.BinaryCrossentropy()
accuracy = tf.keras.metrics.BinaryAccuracy(name='acc')
else:
activation = 'softmax'
loss = tf.keras.losses.CategoricalCrossentropy()
accuracy = tf.keras.metrics.CategoricalAccuracy(name='acc')
# not include fc layer
model = ecg_feature_extractor(arch=args.arch)
model.add(tf.keras.layers.Dense(units=num_classes, activation=activation))
# initialize the weights of the model
inputs = tf.keras.layers.Input(shape=train['x'].shape[1:], dtype=train['x'].dtype)
model(inputs) # complete model
print('# model parameters: {:,d}'.format(model.count_params()))
if args.weights_file: # Sử dụng trọng số đã được pre-trained
# initialize weights (excluding the optimizer state) to load the pretrained resnet
# the optimizer state is randomly initialized in the `model.compile` function
print('Loading weights from file {} ...'.format(args.weights_file))
model.load_weights(str(args.weights_file))
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=loss,
metrics=[accuracy])
callbacks = []
logger = tf.keras.callbacks.CSVLogger(filename=str(args.job_dir / 'history.csv'))
callbacks.append(logger)
if args.val_metric in ['loss', 'acc']:
monitor = ('val_' + args.val_metric) if val else args.val_metric
checkpoint = tf.keras.callbacks.ModelCheckpoint(filepath=str(args.job_dir / 'best_model.weights'),
monitor=monitor,
save_best_only=True,
save_weights_only=True,
mode='auto',
verbose=1)
elif args.val_metric == 'f1':
if is_multiclass(train['y']):
score_fn = multi_f1
else:
score_fn = f1
checkpoint = CustomCheckpoint(filepath=str(args.job_dir / 'best_model.weights'),
data=(val_data, val['y']) if val else (train_data, train['y']),
score_fn=score_fn,
save_best_only=True,
verbose=1)
elif args.val_metric == 'auc':
checkpoint = CustomCheckpoint(filepath=str(args.job_dir / 'best_model.weights'),
data=(val_data, val['y']) if val else (train_data, train['y']),
score_fn=auc,
save_best_only=True,
verbose=1)
else:
raise ValueError('Unknown metric: {}'.format(args.val_metric))
callbacks.append(checkpoint)
if val:
# new adding
rl_stopping = tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=7,
verbose=1, min_lr=1e-7)
callbacks.append(rl_stopping)
early_stopping = tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=30, verbose=1)
callbacks.append(early_stopping)
model.fit(train_data, epochs=args.epochs, verbose=2, validation_data=val_data, callbacks=callbacks)
# load best model for inference
print('Loading the best weights from file {} ...'.format(str(args.job_dir / 'best_model.weights')))
model.load_weights(filepath=str(args.job_dir / 'best_model.weights'))
print('Predicting training data ...')
train_y_prob = model.predict(x=train['x'], batch_size=args.batch_size)
train_predictions = create_predictions_frame(y_prob=train_y_prob,
y_true=train['y'],
class_names=train['classes'],
record_ids=train['record_ids'])
train_predictions.to_csv(path_or_buf=args.job_dir / 'train_predictions.csv', index=False)
if val:
print('Predicting validation data ...')
val_y_prob = model.predict(x=val['x'], batch_size=args.batch_size)
val_predictions = create_predictions_frame(y_prob=val_y_prob, y_true=val['y'],
class_names=train['classes'],
record_ids=val['record_ids'])
val_predictions.to_csv(path_or_buf=args.job_dir / 'val_predictions.csv', index=False)
if test:
print('Predicting test data ...')
test_y_prob = model.predict(x=test['x'], batch_size=args.batch_size)
test_predictions = create_predictions_frame(y_prob=test_y_prob, y_true=test['y'],
class_names=train['classes'],
record_ids=test['record_ids'])
test_predictions.to_csv(path_or_buf=args.job_dir / 'test_predictions.csv', index=False)
| [
"tensorflow.keras.metrics.BinaryAccuracy",
"tensorflow.keras.losses.CategoricalCrossentropy",
"numpy.random.seed",
"tensorflow.distribute.MirroredStrategy",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.optimizers.Adam",
"numpy.random.randint",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Input"
] | finetuning/trainer_old.py | [(23, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (["(data['x'], data['y'])"], {}), True, 'import tensorflow as tf\n'), (27, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), False, 'import argparse\n'), (57, 'numpy.random.seed', 'np.random.seed', (['seed'], {}), True, 'import numpy as np\n'), (110, 'tensorflow.distribute.MirroredStrategy', 'tf.distribute.MirroredStrategy', ([], {}), True, 'import tensorflow as tf\n'), (55, 'numpy.random.randint', 'np.random.randint', (['(2 ** 16)'], {}), True, 'import numpy as np\n'), (93, 'finetuning.utils.train_test_split', 'train_test_split', (['train'], {'train_size': 'args.subset', 'stratify': "train['y']"}), False, 'from finetuning.utils import ecg_feature_extractor, train_test_split\n'), (116, 'transplant.utils.is_multiclass', 'is_multiclass', (["train['y']"], {}), False, 'from transplant.utils import create_predictions_frame, load_pkl, is_multiclass\n'), (126, 'finetuning.utils.ecg_feature_extractor', 'ecg_feature_extractor', ([], {'arch': 'args.arch'}), False, 'from finetuning.utils import ecg_feature_extractor, train_test_split\n'), (130, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', ([], {'shape': "train['x'].shape[1:]", 'dtype': "train['x'].dtype"}), True, 'import tensorflow as tf\n'), (198, 'transplant.utils.create_predictions_frame', 'create_predictions_frame', ([], {'y_prob': 'train_y_prob', 'y_true': "train['y']", 'class_names': "train['classes']", 'record_ids': "train['record_ids']"}), False, 'from transplant.utils import create_predictions_frame, load_pkl, is_multiclass\n'), (77, 'finetuning.utils.train_test_split', 'train_test_split', (['train'], {'test_size': 'args.val_size', 'stratify': "train['y']"}), False, 'from finetuning.utils import ecg_feature_extractor, train_test_split\n'), (118, 'tensorflow.keras.losses.BinaryCrossentropy', 'tf.keras.losses.BinaryCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (119, 'tensorflow.keras.metrics.BinaryAccuracy', 'tf.keras.metrics.BinaryAccuracy', ([], {'name': '"""acc"""'}), True, 'import tensorflow as tf\n'), (122, 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (123, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {'name': '"""acc"""'}), True, 'import tensorflow as tf\n'), (127, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'num_classes', 'activation': 'activation'}), True, 'import tensorflow as tf\n'), (183, 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'tf.keras.callbacks.ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(7)', 'verbose': '(1)', 'min_lr': '(1e-07)'}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.keras.callbacks.EarlyStopping', 'tf.keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(30)', 'verbose': '(1)'}), True, 'import tensorflow as tf\n'), (207, 'transplant.utils.create_predictions_frame', 'create_predictions_frame', ([], {'y_prob': 'val_y_prob', 'y_true': "val['y']", 'class_names': "train['classes']", 'record_ids': "val['record_ids']"}), False, 'from transplant.utils import create_predictions_frame, load_pkl, is_multiclass\n'), (215, 'transplant.utils.create_predictions_frame', 'create_predictions_frame', ([], {'y_prob': 'test_y_prob', 'y_true': "test['y']", 'class_names': "train['classes']", 'record_ids': "test['record_ids']"}), False, 'from transplant.utils import create_predictions_frame, load_pkl, is_multiclass\n'), (141, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (159, 'transplant.utils.is_multiclass', 'is_multiclass', (["train['y']"], {}), False, 'from transplant.utils import create_predictions_frame, load_pkl, is_multiclass\n')] |
adfoucart/deephisto | f70fbaad9f95a9b9f2e420c9c33d46bdfab5bdf9 | import tensorflow as tf
class F1Metric(tf.keras.metrics.Metric):
def __init__(self, name=None, dtype=None):
super(F1Metric, self).__init__(name=name, dtype=dtype)
self.tp_ = tf.keras.metrics.TruePositives()
self.fp_ = tf.keras.metrics.FalsePositives()
self.fn_ = tf.keras.metrics.FalseNegatives()
def reset_states(self):
self.tp_.reset_states()
self.fp_.reset_states()
self.fn_.reset_states()
def update_state(self, y_true, y_pred, sample_weight=None):
self.tp_.update_state(y_true[:,:,:,1], y_pred[:,:,:,1], sample_weight)
self.fp_.update_state(y_true[:,:,:,1], y_pred[:,:,:,1], sample_weight)
self.fn_.update_state(y_true[:,:,:,1], y_pred[:,:,:,1], sample_weight)
def result(self):
tp = self.tp_.result()
fp = self.fp_.result()
fn = self.fn_.result()
return (2*tp)/(2*tp+fp+fn) | [
"tensorflow.keras.metrics.TruePositives",
"tensorflow.keras.metrics.FalsePositives",
"tensorflow.keras.metrics.FalseNegatives"
] | model/F1Metric.py | [(6, 'tensorflow.keras.metrics.TruePositives', 'tf.keras.metrics.TruePositives', ([], {}), True, 'import tensorflow as tf\n'), (7, 'tensorflow.keras.metrics.FalsePositives', 'tf.keras.metrics.FalsePositives', ([], {}), True, 'import tensorflow as tf\n'), (8, 'tensorflow.keras.metrics.FalseNegatives', 'tf.keras.metrics.FalseNegatives', ([], {}), True, 'import tensorflow as tf\n')] |
sahilparekh/imgclsmob | 74d52457b4bf00c82d063b3f4a1a73fb6ba3863a | """
WRN for ImageNet-1K, implemented in TensorFlow.
Original paper: 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
"""
__all__ = ['WRN', 'wrn50_2']
import os
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import Conv2d, MaxPool2d, flatten, is_channels_first
class WRNConv(nn.Layer):
"""
WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
kernel_size : int or tuple/list of 2 int
Convolution window size.
strides : int or tuple/list of 2 int
Strides of the convolution.
padding : int or tuple/list of 2 int
Padding value for convolution layer.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
kernel_size,
strides,
padding,
activate,
data_format="channels_last",
**kwargs):
super(WRNConv, self).__init__(**kwargs)
self.activate = activate
self.conv = Conv2d(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=kernel_size,
strides=strides,
padding=padding,
use_bias=True,
data_format=data_format,
name="conv")
if self.activate:
self.activ = nn.ReLU()
def call(self, x, training=None):
x = self.conv(x)
if self.activate:
x = self.activ(x)
return x
def wrn_conv1x1(in_channels,
out_channels,
strides,
activate,
data_format="channels_last",
**kwargs):
"""
1x1 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=1,
strides=strides,
padding=0,
activate=activate,
data_format=data_format,
**kwargs)
def wrn_conv3x3(in_channels,
out_channels,
strides,
activate,
data_format="channels_last",
**kwargs):
"""
3x3 version of the WRN specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
activate : bool
Whether activate the convolution block.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
return WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=3,
strides=strides,
padding=1,
activate=activate,
data_format=data_format,
**kwargs)
class WRNBottleneck(nn.Layer):
"""
WRN bottleneck block for residual path in WRN unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width_factor,
data_format="channels_last",
**kwargs):
super(WRNBottleneck, self).__init__(**kwargs)
mid_channels = int(round(out_channels // 4 * width_factor))
self.conv1 = wrn_conv1x1(
in_channels=in_channels,
out_channels=mid_channels,
strides=1,
activate=True,
data_format=data_format,
name="conv1")
self.conv2 = wrn_conv3x3(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activate=True,
data_format=data_format,
name="conv2")
self.conv3 = wrn_conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
strides=1,
activate=False,
data_format=data_format,
name="conv3")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x, training=training)
x = self.conv3(x, training=training)
return x
class WRNUnit(nn.Layer):
"""
WRN unit with residual connection.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
width_factor : float
Wide scale factor for width of layers.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
width_factor,
data_format="channels_last",
**kwargs):
super(WRNUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = WRNBottleneck(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width_factor=width_factor,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = wrn_conv1x1(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
activate=False,
data_format=data_format,
name="identity_conv")
self.activ = nn.ReLU()
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
x = self.activ(x)
return x
class WRNInitBlock(nn.Layer):
"""
WRN specific initial block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
data_format="channels_last",
**kwargs):
super(WRNInitBlock, self).__init__(**kwargs)
self.conv = WRNConv(
in_channels=in_channels,
out_channels=out_channels,
kernel_size=7,
strides=2,
padding=3,
activate=True,
data_format=data_format,
name="conv")
self.pool = MaxPool2d(
pool_size=3,
strides=2,
padding=1,
data_format=data_format,
name="pool")
def call(self, x, training=None):
x = self.conv(x, training=training)
x = self.pool(x)
return x
class WRN(tf.keras.Model):
"""
WRN model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
width_factor : float
Wide scale factor for width of layers.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
width_factor,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(WRN, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = tf.keras.Sequential(name="features")
self.features.add(WRNInitBlock(
in_channels=in_channels,
out_channels=init_block_channels,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = tf.keras.Sequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and (i != 0) else 1
stage.add(WRNUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
width_factor=width_factor,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = nn.Dense(
units=classes,
input_dim=in_channels,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = flatten(x, self.data_format)
x = self.output1(x)
return x
def get_wrn(blocks,
width_factor,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create WRN model with specific parameters.
Parameters:
----------
blocks : int
Number of blocks.
width_factor : float
Wide scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
if blocks == 50:
layers = [3, 4, 6, 3]
elif blocks == 101:
layers = [3, 4, 23, 3]
elif blocks == 152:
layers = [3, 8, 36, 3]
elif blocks == 200:
layers = [3, 24, 36, 3]
else:
raise ValueError("Unsupported WRN with number of blocks: {}".format(blocks))
init_block_channels = 64
channels_per_layers = [256, 512, 1024, 2048]
channels = [[ci] * li for (ci, li) in zip(channels_per_layers, layers)]
net = WRN(
channels=channels,
init_block_channels=init_block_channels,
width_factor=width_factor,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def wrn50_2(**kwargs):
"""
WRN-50-2 model from 'Wide Residual Networks,' https://arxiv.org/abs/1605.07146.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_wrn(blocks=50, width_factor=2.0, model_name="wrn50_2", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
wrn50_2,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch = 14
x = tf.random.normal((batch, 3, 224, 224) if is_channels_first(data_format) else (batch, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != wrn50_2 or weight_count == 68849128)
if __name__ == "__main__":
_test()
| [
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.layers.ReLU",
"tensorflow.keras.backend.get_value",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.Sequential"
] | tensorflow2/tf2cv/models/wrn.py | [(361, 'os.path.join', 'os.path.join', (['"""~"""', '""".tensorflow"""', '"""models"""'], {}), False, 'import os\n'), (230, 'tensorflow.keras.layers.ReLU', 'nn.ReLU', ([], {}), True, 'import tensorflow.keras.layers as nn\n'), (319, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {'name': '"""features"""'}), True, 'import tensorflow as tf\n'), (345, 'tensorflow.keras.layers.Dense', 'nn.Dense', ([], {'units': 'classes', 'input_dim': 'in_channels', 'name': '"""output1"""'}), True, 'import tensorflow.keras.layers as nn\n'), (57, 'tensorflow.keras.layers.ReLU', 'nn.ReLU', ([], {}), True, 'import tensorflow.keras.layers as nn\n'), (339, 'tensorflow.keras.layers.AveragePooling2D', 'nn.AveragePooling2D', ([], {'pool_size': '(7)', 'strides': '(1)', 'data_format': 'data_format', 'name': '"""final_pool"""'}), True, 'import tensorflow.keras.layers as nn\n'), (452, 'tensorflow.keras.backend.get_value', 'K.get_value', (['w'], {}), True, 'import tensorflow.keras.backend as K\n')] |
xvalad/ML | baf71a32fe5c5cf8e9f79a7ec46f59b878f87965 | #!/usr/bin/python3
# LinearRegressionWIthSyntheticData by Google
# https://colab.research.google.com/github/google/eng-edu/blob/master/ml/cc/exercises/linear_regression_with_synthetic_data.ipynb
#
import pandas as pd
import tensorflow as tf
from matplotlib import pyplot as plt
#
## DEFINE FUNCTIONS THAT BUILD AND TRAIN A MODEL
def build_model(my_learning_rate):
"""Create and compile a simple linear regression model."""
# Most simple tf.keras models are sequential.
# A sequential model contains one or more layers.
model = tf.keras.models.Sequential()
# Describe the topography of the model.
# The topography of a simple linear regression model
# is a single node in a single layer.
model.add(tf.keras.layers.Dense(units=1,
input_shape=(1,)))
# Compile the model topography into code that
# TensorFlow can efficiently execute. Configure
# training to minimize the model's mean squared error.
model.compile(optimizer=tf.keras.optimizers.RMSprop(lr=my_learning_rate),
loss="mean_squared_error",
metrics=[tf.keras.metrics.RootMeanSquaredError()])
return model
def train_model(model, feature, label, epochs, batch_size):
"""Train the model by feeding it data."""
# Feed the feature values and the label values to the
# model. The model will train for the specified number
# of epochs, gradually learning how the feature values
# relate to the label values.
history = model.fit(x=feature,
y=label,
batch_size=batch_size,
epochs=epochs)
# Gather the trained model's weight and bias.
trained_weight = model.get_weights()[0]
trained_bias = model.get_weights()[1]
# The list of epochs is stored separately from the
# rest of history.
epochs = history.epoch
# Gather the history (a snapshot) of each epoch.
hist = pd.DataFrame(history.history)
# Specifically gather the model's root mean
#squared error at each epoch.
rmse = hist["root_mean_squared_error"]
return trained_weight, trained_bias, epochs, rmse
print("Defined create_model and train_model")
# #
## DEFINE PLOTTING FUNCTIONS
def plot_the_model(trained_weight, trained_bias, feature, label):
"""Plot the trained model against the training feature and label."""
# Label the axes.
plt.xlabel("feature")
plt.ylabel("label")
# Plot the feature values vs. label values.
plt.scatter(feature, label)
# Create a red line representing the model. The red line starts
# at coordinates (x0, y0) and ends at coordinates (x1, y1).
x0 = 0
y0 = trained_bias
x1 = my_feature[-1]
y1 = trained_bias + (trained_weight * x1)
plt.plot([x0, x1], [y0, y1], c='r')
# Render the scatter plot and the red line.
plt.show()
def plot_the_loss_curve(epochs, rmse):
"""Plot the loss curve, which shows loss vs. epoch."""
plt.figure()
plt.xlabel("Epoch")
plt.ylabel("Root Mean Squared Error")
plt.plot(epochs, rmse, label="Loss")
plt.legend()
plt.ylim([rmse.min()*0.97, rmse.max()])
plt.show()
print("Defined the plot_the_model and plot_the_loss_curve functions.")
# #
#
my_feature = ([1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0, 10.0, 11.0, 12.0])
my_label = ([5.0, 8.8, 9.6, 14.2, 18.8, 19.5, 21.4, 26.8, 28.9, 32.0, 33.8, 38.2])
#
learning_rate=0.01
epochs = 10
my_batch_size = 12
#
my_model = build_model(learning_rate)
trained_weight, trained_bias, epochs, rmse = train_model(my_model,my_feature,my_label,epochs,my_batch_size)
plot_the_model(trained_weight,trained_bias,my_feature,my_label)
plot_the_loss_curve(epochs,rmse) | [
"matplotlib.pyplot.legend",
"matplotlib.pyplot.scatter",
"matplotlib.pyplot.figure",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.optimizers.RMSprop",
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"tensorflow.keras.metrics.RootMeanSquaredError",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"tensorflow.keras.models.Sequential",
"matplotlib.pyplot.ylabel"
] | LinearRegressionWithSyntheticData.py | [(14, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (53, 'pandas.DataFrame', 'pd.DataFrame', (['history.history'], {}), True, 'import pandas as pd\n'), (68, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""feature"""'], {}), True, 'from matplotlib import pyplot as plt\n'), (69, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""label"""'], {}), True, 'from matplotlib import pyplot as plt\n'), (72, 'matplotlib.pyplot.scatter', 'plt.scatter', (['feature', 'label'], {}), True, 'from matplotlib import pyplot as plt\n'), (80, 'matplotlib.pyplot.plot', 'plt.plot', (['[x0, x1]', '[y0, y1]'], {'c': '"""r"""'}), True, 'from matplotlib import pyplot as plt\n'), (83, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'from matplotlib import pyplot as plt\n'), (88, 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), True, 'from matplotlib import pyplot as plt\n'), (89, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Epoch"""'], {}), True, 'from matplotlib import pyplot as plt\n'), (90, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Root Mean Squared Error"""'], {}), True, 'from matplotlib import pyplot as plt\n'), (92, 'matplotlib.pyplot.plot', 'plt.plot', (['epochs', 'rmse'], {'label': '"""Loss"""'}), True, 'from matplotlib import pyplot as plt\n'), (93, 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), True, 'from matplotlib import pyplot as plt\n'), (95, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'from matplotlib import pyplot as plt\n'), (19, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(1)', 'input_shape': '(1,)'}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'lr': 'my_learning_rate'}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.keras.metrics.RootMeanSquaredError', 'tf.keras.metrics.RootMeanSquaredError', ([], {}), True, 'import tensorflow as tf\n')] |
svenvanderburg/EEG_age_prediction | 958e8d6445bf277a445608e05d779315dbd9b376 | #!/usr/bin/env python
# ================ IMPORT LIBRARIES ================ #
import sys, os, fnmatch, time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
sys.path.insert(0, os.path.dirname(os.getcwd()))
from dataset_generator import DataGenerator
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow import keras
from tensorflow.keras import layers, Input, Sequential
from tensorflow.keras.layers import Bidirectional, LSTM, Dropout, BatchNormalization, Dense, Conv1D, LeakyReLU, AveragePooling1D, Flatten, Reshape, MaxPooling1D
from tensorflow.keras.optimizers import Adam, Adadelta, SGD
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError
n_timesteps = 501
n_features = 30
n_outputs = 1
COUNT_MODEL = "FINAL" # This will be appended to the saved model's name. To make sure to not overwrite models, increase this.
MAX_QUEUE_SIZE = 5000
WORKERS = 6
input_shape = (n_timesteps, n_features)
# Input and output folders
PATH_DATA_PROCESSED_DL = sys.argv[1]
PATH_OUTPUT = sys.argv[2]
# ================ INITIAL LOGS ================ #
print("LOGGING: Imported all modules")
# ================ LOAD PREPROCESSED DATA ================ #
# Step 1: Get all the files in the output folder
file_names = os.listdir(PATH_DATA_PROCESSED_DL)
# Step 2: Get the full paths of the files (without extensions)
files = [os.path.splitext(os.path.join(PATH_DATA_PROCESSED_DL, file_name))[0] for file_name in fnmatch.filter(file_names, "*.zarr")]
# Step 3: Load all the metadata
frames = []
for idx, feature_file in enumerate(files):
df_metadata = pd.read_csv(feature_file.replace("processed_raw_", "processed_metadata_") + ".csv")
frames.append(df_metadata)
df_metadata = pd.concat(frames)
# Step 4: Add missing age information based on the age group the subject is in
df_metadata['age_months'].fillna(df_metadata['age_group'], inplace=True)
df_metadata['age_days'].fillna(df_metadata['age_group']*30, inplace=True)
df_metadata['age_years'].fillna(df_metadata['age_group']/12, inplace=True)
# Step 5: List all the unique subject IDs
subject_ids = sorted(list(set(df_metadata["code"].tolist())))
# Step 6: Split the subjects into train, val and test
IDs_train, IDs_temp = train_test_split(subject_ids, test_size=0.3, random_state=42)
IDs_test, IDs_val = train_test_split(IDs_temp, test_size=0.5, random_state=42)
# Step 7: Initialize DataGenerators
train_generator_noise = DataGenerator(list_IDs = IDs_train,
BASE_PATH = PATH_DATA_PROCESSED_DL,
metadata = df_metadata,
n_average = 30,
batch_size = 10,
gaussian_noise=0.01,
iter_per_epoch = 30,
n_timepoints = 501,
n_channels=30,
shuffle=True)
val_generator = DataGenerator(list_IDs = IDs_val,
BASE_PATH = PATH_DATA_PROCESSED_DL,
metadata = df_metadata,
n_average = 30,
batch_size = 10,
iter_per_epoch = 100,
n_timepoints = 501,
n_channels=30,
shuffle=True)
print("LOGGING: Loaded all data and created generators")
# ================ Encoder model ================ #
try:
def encoder_model():
""" Returns the Encoder model from Ismail Fawaz et al. (2019). """
input_layer = keras.layers.Input(input_shape)
# conv block -1
conv1 = keras.layers.Conv1D(filters=128,kernel_size=5,strides=1,padding='same')(input_layer)
conv1 = tfa.layers.InstanceNormalization()(conv1)
conv1 = keras.layers.PReLU(shared_axes=[1])(conv1)
conv1 = keras.layers.Dropout(rate=0.2)(conv1)
conv1 = keras.layers.MaxPooling1D(pool_size=2)(conv1)
# conv block -2
conv2 = keras.layers.Conv1D(filters=256,kernel_size=11,strides=1,padding='same')(conv1)
conv2 = tfa.layers.InstanceNormalization()(conv2)
conv2 = keras.layers.PReLU(shared_axes=[1])(conv2)
conv2 = keras.layers.Dropout(rate=0.2)(conv2)
conv2 = keras.layers.MaxPooling1D(pool_size=2)(conv2)
# conv block -3
conv3 = keras.layers.Conv1D(filters=512,kernel_size=21,strides=1,padding='same')(conv2)
conv3 = tfa.layers.InstanceNormalization()(conv3)
conv3 = keras.layers.PReLU(shared_axes=[1])(conv3)
conv3 = keras.layers.Dropout(rate=0.2)(conv3)
# split for attention
attention_data = keras.layers.Lambda(lambda x: x[:,:,:256])(conv3)
attention_softmax = keras.layers.Lambda(lambda x: x[:,:,256:])(conv3)
# attention mechanism
attention_softmax = keras.layers.Softmax()(attention_softmax)
multiply_layer = keras.layers.Multiply()([attention_softmax,attention_data])
# last layer
dense_layer = keras.layers.Dense(units=256,activation='sigmoid')(multiply_layer)
dense_layer = tfa.layers.InstanceNormalization()(dense_layer)
# output layer
flatten_layer = keras.layers.Flatten()(dense_layer)
output_layer = keras.layers.Dense(1)(flatten_layer)
model = keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
model = encoder_model()
optimizer = Adam(learning_rate=0.00001)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=[RootMeanSquaredError(), MeanAbsoluteError()])
output_filename = f'Encoder_regressor_{COUNT_MODEL}'
output_file = os.path.join(PATH_OUTPUT, output_filename)
checkpointer = ModelCheckpoint(filepath = output_file + ".hdf5", monitor='val_loss', verbose=1, save_best_only=True)
epochs = 500
print("LOGGING: Starting Encoder model training")
# fit network
history = model.fit(x=train_generator_noise,
validation_data=val_generator,
epochs=epochs,
verbose=2,
max_queue_size=MAX_QUEUE_SIZE,
workers=WORKERS,
callbacks=[checkpointer])
print("LOGGING: Finished Encoder model training")
except Exception as e:
print("LOGGING: Failed Encoder model training:")
print(e)
pass
| [
"tensorflow.keras.callbacks.ModelCheckpoint",
"pandas.concat",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.PReLU",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.MaxPooling1D",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Multiply",
"tensorflow.keras.metrics.MeanAbsoluteError",
"tensorflow.keras.metrics.RootMeanSquaredError",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Softmax",
"tensorflow.keras.layers.Input"
] | scripts/DL_final_Encoder_regressor.py | [(44, 'os.listdir', 'os.listdir', (['PATH_DATA_PROCESSED_DL'], {}), False, 'import sys, os, fnmatch, time\n'), (56, 'pandas.concat', 'pd.concat', (['frames'], {}), True, 'import pandas as pd\n'), (67, 'sklearn.model_selection.train_test_split', 'train_test_split', (['subject_ids'], {'test_size': '(0.3)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (68, 'sklearn.model_selection.train_test_split', 'train_test_split', (['IDs_temp'], {'test_size': '(0.5)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (71, 'dataset_generator.DataGenerator', 'DataGenerator', ([], {'list_IDs': 'IDs_train', 'BASE_PATH': 'PATH_DATA_PROCESSED_DL', 'metadata': 'df_metadata', 'n_average': '(30)', 'batch_size': '(10)', 'gaussian_noise': '(0.01)', 'iter_per_epoch': '(30)', 'n_timepoints': '(501)', 'n_channels': '(30)', 'shuffle': '(True)'}), False, 'from dataset_generator import DataGenerator\n'), (82, 'dataset_generator.DataGenerator', 'DataGenerator', ([], {'list_IDs': 'IDs_val', 'BASE_PATH': 'PATH_DATA_PROCESSED_DL', 'metadata': 'df_metadata', 'n_average': '(30)', 'batch_size': '(10)', 'iter_per_epoch': '(100)', 'n_timepoints': '(501)', 'n_channels': '(30)', 'shuffle': '(True)'}), False, 'from dataset_generator import DataGenerator\n'), (137, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(1e-05)'}), False, 'from tensorflow.keras.optimizers import Adam, Adadelta, SGD\n'), (144, 'os.path.join', 'os.path.join', (['PATH_OUTPUT', 'output_filename'], {}), False, 'import sys, os, fnmatch, time\n'), (146, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(output_file + '.hdf5')", 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), (10, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import sys, os, fnmatch, time\n'), (47, 'fnmatch.filter', 'fnmatch.filter', (['file_names', '"""*.zarr"""'], {}), False, 'import sys, os, fnmatch, time\n'), (99, 'tensorflow.keras.layers.Input', 'keras.layers.Input', (['input_shape'], {}), False, 'from tensorflow import keras\n'), (131, 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer'}), False, 'from tensorflow import keras\n'), (47, 'os.path.join', 'os.path.join', (['PATH_DATA_PROCESSED_DL', 'file_name'], {}), False, 'import sys, os, fnmatch, time\n'), (102, 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(128)', 'kernel_size': '(5)', 'strides': '(1)', 'padding': '"""same"""'}), False, 'from tensorflow import keras\n'), (103, 'tensorflow_addons.layers.InstanceNormalization', 'tfa.layers.InstanceNormalization', ([], {}), True, 'import tensorflow_addons as tfa\n'), (104, 'tensorflow.keras.layers.PReLU', 'keras.layers.PReLU', ([], {'shared_axes': '[1]'}), False, 'from tensorflow import keras\n'), (105, 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', ([], {'rate': '(0.2)'}), False, 'from tensorflow import keras\n'), (106, 'tensorflow.keras.layers.MaxPooling1D', 'keras.layers.MaxPooling1D', ([], {'pool_size': '(2)'}), False, 'from tensorflow import keras\n'), (108, 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(256)', 'kernel_size': '(11)', 'strides': '(1)', 'padding': '"""same"""'}), False, 'from tensorflow import keras\n'), (109, 'tensorflow_addons.layers.InstanceNormalization', 'tfa.layers.InstanceNormalization', ([], {}), True, 'import tensorflow_addons as tfa\n'), (110, 'tensorflow.keras.layers.PReLU', 'keras.layers.PReLU', ([], {'shared_axes': '[1]'}), False, 'from tensorflow import keras\n'), (111, 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', ([], {'rate': '(0.2)'}), False, 'from tensorflow import keras\n'), (112, 'tensorflow.keras.layers.MaxPooling1D', 'keras.layers.MaxPooling1D', ([], {'pool_size': '(2)'}), False, 'from tensorflow import keras\n'), (114, 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(512)', 'kernel_size': '(21)', 'strides': '(1)', 'padding': '"""same"""'}), False, 'from tensorflow import keras\n'), (115, 'tensorflow_addons.layers.InstanceNormalization', 'tfa.layers.InstanceNormalization', ([], {}), True, 'import tensorflow_addons as tfa\n'), (116, 'tensorflow.keras.layers.PReLU', 'keras.layers.PReLU', ([], {'shared_axes': '[1]'}), False, 'from tensorflow import keras\n'), (117, 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', ([], {'rate': '(0.2)'}), False, 'from tensorflow import keras\n'), (119, 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda x: x[:, :, :256])'], {}), False, 'from tensorflow import keras\n'), (120, 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda x: x[:, :, 256:])'], {}), False, 'from tensorflow import keras\n'), (122, 'tensorflow.keras.layers.Softmax', 'keras.layers.Softmax', ([], {}), False, 'from tensorflow import keras\n'), (123, 'tensorflow.keras.layers.Multiply', 'keras.layers.Multiply', ([], {}), False, 'from tensorflow import keras\n'), (125, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(256)', 'activation': '"""sigmoid"""'}), False, 'from tensorflow import keras\n'), (126, 'tensorflow_addons.layers.InstanceNormalization', 'tfa.layers.InstanceNormalization', ([], {}), True, 'import tensorflow_addons as tfa\n'), (128, 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), False, 'from tensorflow import keras\n'), (129, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(1)'], {}), False, 'from tensorflow import keras\n'), (141, 'tensorflow.keras.metrics.RootMeanSquaredError', 'RootMeanSquaredError', ([], {}), False, 'from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n'), (141, 'tensorflow.keras.metrics.MeanAbsoluteError', 'MeanAbsoluteError', ([], {}), False, 'from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n')] |
svenvanderburg/EEG_age_prediction | 958e8d6445bf277a445608e05d779315dbd9b376 | #!/usr/bin/env python
# ================ IMPORT LIBRARIES ================ #
import sys, os, fnmatch, time
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
sys.path.insert(0, os.path.dirname(os.getcwd()))
from dataset_generator import DataGenerator
import tensorflow as tf
import tensorflow_addons as tfa
from tensorflow import keras
from tensorflow.keras import layers, Input, Sequential
from tensorflow.keras.layers import Bidirectional, LSTM, Dropout, BatchNormalization, Dense, Conv1D, LeakyReLU, AveragePooling1D, Flatten, Reshape, MaxPooling1D
from tensorflow.keras.optimizers import Adam, Adadelta, SGD
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError
n_timesteps = 501
n_features = 30
n_outputs = 1
COUNT_MODEL = "FINAL" # This will be appended to the saved model's name. To make sure to not overwrite models, increase this.
MAX_QUEUE_SIZE = 5000
WORKERS = 6
input_shape = (n_timesteps, n_features)
# Input and output folders
PATH_DATA_PROCESSED_DL = sys.argv[1]
PATH_OUTPUT = sys.argv[2]
# ================ INITIAL LOGS ================ #
print("LOGGING: Imported all modules")
# ================ LOAD PREPROCESSED DATA ================ #
# Step 1: Get all the files in the output folder
file_names = os.listdir(PATH_DATA_PROCESSED_DL)
# Step 2: Get the full paths of the files (without extensions)
files = [os.path.splitext(os.path.join(PATH_DATA_PROCESSED_DL, file_name))[0] for file_name in fnmatch.filter(file_names, "*.zarr")]
# Step 3: Load all the metadata
frames = []
for idx, feature_file in enumerate(files):
df_metadata = pd.read_csv(feature_file.replace("processed_raw_", "processed_metadata_") + ".csv")
frames.append(df_metadata)
df_metadata = pd.concat(frames)
# Step 4: Add missing age information based on the age group the subject is in
df_metadata['age_months'].fillna(df_metadata['age_group'], inplace=True)
df_metadata['age_days'].fillna(df_metadata['age_group']*30, inplace=True)
df_metadata['age_years'].fillna(df_metadata['age_group']/12, inplace=True)
# Step 5: List all the unique subject IDs
subject_ids = sorted(list(set(df_metadata["code"].tolist())))
# Step 6: Split the subjects into train, val and test
IDs_train, IDs_temp = train_test_split(subject_ids, test_size=0.3, random_state=42)
IDs_test, IDs_val = train_test_split(IDs_temp, test_size=0.5, random_state=42)
# Step 7: Initialize DataGenerators
train_generator_noise = DataGenerator(list_IDs = IDs_train,
BASE_PATH = PATH_DATA_PROCESSED_DL,
metadata = df_metadata,
n_average = 30,
batch_size = 10,
gaussian_noise=0.01,
iter_per_epoch = 30,
n_timepoints = 501,
n_channels=30,
shuffle=True)
val_generator = DataGenerator(list_IDs = IDs_val,
BASE_PATH = PATH_DATA_PROCESSED_DL,
metadata = df_metadata,
n_average = 30,
batch_size = 10,
iter_per_epoch = 100,
n_timepoints = 501,
n_channels=30,
shuffle=True)
print("LOGGING: Loaded all data and created generators")
# ================ InceptionTime model ================ #
try:
class Regressor_Inception:
def __init__(self, output_directory, input_shape, verbose=False, build=True, batch_size=64,
nb_filters=32, use_residual=True, use_bottleneck=True, depth=6, kernel_size=41, nb_epochs=1500):
self.output_directory = output_directory
self.nb_filters = nb_filters
self.use_residual = use_residual
self.use_bottleneck = use_bottleneck
self.depth = depth
self.kernel_size = kernel_size - 1
self.callbacks = None
self.batch_size = batch_size
self.bottleneck_size = 32
self.nb_epochs = nb_epochs
if build == True:
self.model = self.build_model(input_shape)
if (verbose == True):
self.model.summary()
self.verbose = verbose
self.model.save_weights(self.output_directory + '/inception_model_init.hdf5')
def _inception_module(self, input_tensor, stride=1, activation='linear'):
if self.use_bottleneck and int(input_tensor.shape[-1]) > 1:
input_inception = tf.keras.layers.Conv1D(filters=self.bottleneck_size, kernel_size=1,
padding='same', activation=activation, use_bias=False)(input_tensor)
else:
input_inception = input_tensor
# kernel_size_s = [3, 5, 8, 11, 17]
kernel_size_s = [self.kernel_size // (2 ** i) for i in range(3)]
conv_list = []
for i in range(len(kernel_size_s)):
conv_list.append(tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=kernel_size_s[i],
strides=stride, padding='same', activation=activation, use_bias=False)(
input_inception))
max_pool_1 = tf.keras.layers.MaxPool1D(pool_size=3, strides=stride, padding='same')(input_tensor)
conv_6 = tf.keras.layers.Conv1D(filters=self.nb_filters, kernel_size=1,
padding='same', activation=activation, use_bias=False)(max_pool_1)
conv_list.append(conv_6)
x = tf.keras.layers.Concatenate(axis=2)(conv_list)
x = tf.keras.layers.BatchNormalization()(x)
x = tf.keras.layers.Activation(activation='relu')(x)
return x
def _shortcut_layer(self, input_tensor, out_tensor):
shortcut_y = tf.keras.layers.Conv1D(filters=int(out_tensor.shape[-1]), kernel_size=1,
padding='same', use_bias=False)(input_tensor)
shortcut_y = tf.keras.layers.BatchNormalization()(shortcut_y)
x = tf.keras.layers.Add()([shortcut_y, out_tensor])
x = tf.keras.layers.Activation('relu')(x)
return x
def build_model(self, input_shape):
input_layer = tf.keras.layers.Input(input_shape)
x = input_layer
input_res = input_layer
for d in range(self.depth):
x = self._inception_module(x)
if self.use_residual and d % 3 == 2:
x = self._shortcut_layer(input_res, x)
input_res = x
pooling_layer = tf.keras.layers.AveragePooling1D(pool_size=50)(x)
flat_layer = tf.keras.layers.Flatten()(pooling_layer)
dense_layer = tf.keras.layers.Dense(128, activation='relu')(flat_layer)
output_layer = tf.keras.layers.Dense(1)(dense_layer)
model = tf.keras.models.Model(inputs=input_layer, outputs=output_layer)
return model
model = Regressor_Inception(PATH_OUTPUT, input_shape, verbose=False).model
optimizer = Adam(learning_rate=0.01)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=[RootMeanSquaredError(), MeanAbsoluteError()])
output_filename = f'Inception_regressor_{COUNT_MODEL}'
output_file = os.path.join(PATH_OUTPUT, output_filename)
checkpointer = ModelCheckpoint(filepath = output_file + ".hdf5", monitor='val_loss', verbose=1, save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=100, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=20, min_lr=0.0001, verbose=1)
epochs = 1500
# fit network
print("LOGGING: Starting InceptionTime model training")
history = model.fit(x=train_generator_noise,
validation_data=val_generator,
epochs=epochs,
verbose=2,
max_queue_size=MAX_QUEUE_SIZE,
workers=WORKERS,
callbacks = [checkpointer, earlystopper, reduce_lr])
print("LOGGING: Finished InceptionTime model training")
except Exception as e:
print("LOGGING: Failed InceptionTime model training:")
print(e)
pass | [
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.Add",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.callbacks.ModelCheckpoint",
"pandas.concat",
"tensorflow.keras.layers.AveragePooling1D",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.MaxPool1D",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.metrics.MeanAbsoluteError",
"tensorflow.keras.metrics.RootMeanSquaredError",
"tensorflow.keras.layers.Input"
] | scripts/DL_final_InceptionTime_regressor.py | [(44, 'os.listdir', 'os.listdir', (['PATH_DATA_PROCESSED_DL'], {}), False, 'import sys, os, fnmatch, time\n'), (56, 'pandas.concat', 'pd.concat', (['frames'], {}), True, 'import pandas as pd\n'), (67, 'sklearn.model_selection.train_test_split', 'train_test_split', (['subject_ids'], {'test_size': '(0.3)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (68, 'sklearn.model_selection.train_test_split', 'train_test_split', (['IDs_temp'], {'test_size': '(0.5)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (71, 'dataset_generator.DataGenerator', 'DataGenerator', ([], {'list_IDs': 'IDs_train', 'BASE_PATH': 'PATH_DATA_PROCESSED_DL', 'metadata': 'df_metadata', 'n_average': '(30)', 'batch_size': '(10)', 'gaussian_noise': '(0.01)', 'iter_per_epoch': '(30)', 'n_timepoints': '(501)', 'n_channels': '(30)', 'shuffle': '(True)'}), False, 'from dataset_generator import DataGenerator\n'), (82, 'dataset_generator.DataGenerator', 'DataGenerator', ([], {'list_IDs': 'IDs_val', 'BASE_PATH': 'PATH_DATA_PROCESSED_DL', 'metadata': 'df_metadata', 'n_average': '(30)', 'batch_size': '(10)', 'iter_per_epoch': '(100)', 'n_timepoints': '(501)', 'n_channels': '(30)', 'shuffle': '(True)'}), False, 'from dataset_generator import DataGenerator\n'), (185, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.01)'}), False, 'from tensorflow.keras.optimizers import Adam, Adadelta, SGD\n'), (192, 'os.path.join', 'os.path.join', (['PATH_OUTPUT', 'output_filename'], {}), False, 'import sys, os, fnmatch, time\n'), (194, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(output_file + '.hdf5')", 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), (195, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(100)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), (196, 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(20)', 'min_lr': '(0.0001)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), (10, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import sys, os, fnmatch, time\n'), (47, 'fnmatch.filter', 'fnmatch.filter', (['file_names', '"""*.zarr"""'], {}), False, 'import sys, os, fnmatch, time\n'), (47, 'os.path.join', 'os.path.join', (['PATH_DATA_PROCESSED_DL', 'file_name'], {}), False, 'import sys, os, fnmatch, time\n'), (161, 'tensorflow.keras.layers.Input', 'tf.keras.layers.Input', (['input_shape'], {}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'input_layer', 'outputs': 'output_layer'}), True, 'import tensorflow as tf\n'), (139, 'tensorflow.keras.layers.MaxPool1D', 'tf.keras.layers.MaxPool1D', ([], {'pool_size': '(3)', 'strides': 'stride', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (141, 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': 'self.nb_filters', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': 'activation', 'use_bias': '(False)'}), True, 'import tensorflow as tf\n'), (146, 'tensorflow.keras.layers.Concatenate', 'tf.keras.layers.Concatenate', ([], {'axis': '(2)'}), True, 'import tensorflow as tf\n'), (147, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (148, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', ([], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.keras.layers.Add', 'tf.keras.layers.Add', ([], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.keras.layers.AveragePooling1D', 'tf.keras.layers.AveragePooling1D', ([], {'pool_size': '(50)'}), True, 'import tensorflow as tf\n'), (175, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.keras.metrics.RootMeanSquaredError', 'RootMeanSquaredError', ([], {}), False, 'from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n'), (189, 'tensorflow.keras.metrics.MeanAbsoluteError', 'MeanAbsoluteError', ([], {}), False, 'from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n'), (124, 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': 'self.bottleneck_size', 'kernel_size': '(1)', 'padding': '"""same"""', 'activation': 'activation', 'use_bias': '(False)'}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.keras.layers.Conv1D', 'tf.keras.layers.Conv1D', ([], {'filters': 'self.nb_filters', 'kernel_size': 'kernel_size_s[i]', 'strides': 'stride', 'padding': '"""same"""', 'activation': 'activation', 'use_bias': '(False)'}), True, 'import tensorflow as tf\n')] |
svenvanderburg/EEG_age_prediction | 958e8d6445bf277a445608e05d779315dbd9b376 | import sys, os, fnmatch, csv
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.utils import shuffle
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.layers import Dropout, Dense, BatchNormalization
from tensorflow.keras.optimizers import Adam, Adadelta
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau
from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError
sys.path.insert(0, os.path.dirname(os.getcwd()))
# Input and output folders
PATH_DATA_PROCESSED_ML= sys.argv[1]
PATH_OUTPUT = sys.argv[2]
MAX_QUEUE_SIZE = 5000
WORKERS = 6
# Step 1: Get all the files in the output folder
file_names = os.listdir(PATH_DATA_PROCESSED_ML)
# Step 2: Get the full paths of the files (without extensions)
files = [os.path.splitext(os.path.join(PATH_DATA_PROCESSED_ML, file_name))[0] for file_name in fnmatch.filter(file_names, "*.h5")]
# Step 3: Load the features
frames = []
for idx, feature_file in enumerate(files):
df_features = pd.read_hdf(feature_file + ".h5")
df_metadata = pd.read_csv(feature_file.replace("extracted_features_", "processed_data_") + ".csv")
# Step 4: Assign labels
df_features['label'] = df_metadata['age_months'][0]
# Step 5: Assign subject code
df_features['code'] = df_metadata['code'][0]
frames.append(df_features)
df = pd.concat(frames)
# Step 6: List all the unique subject IDs
subject_ids = sorted(list(set(df["code"].tolist())))
IDs_train, IDs_temp = train_test_split(subject_ids, test_size=0.3, random_state=42)
IDs_test, IDs_val = train_test_split(IDs_temp, test_size=0.5, random_state=42)
# Step 7: Split the DataFrames into train, validation and test
df_train = df[df['code'].isin(IDs_train)]
df_val = df[df['code'].isin(IDs_val)]
df_test = df[df['code'].isin(IDs_test)]
feature_names = df.columns.values
X_train = df_train.drop(['label', 'code'], axis=1).reset_index(drop=True)
y_train = df_train['label'].reset_index(drop=True)
codes_train = df_train['code'].reset_index(drop=True)
X_val = df_val.drop(['label', 'code'], axis=1).reset_index(drop=True)
y_val = df_val['label'].reset_index(drop=True)
codes_val = df_val['code'].reset_index(drop=True)
X_test = df_test.drop(['label', 'code'], axis=1).reset_index(drop=True)
y_test = df_test['label'].reset_index(drop=True)
codes_test = df_test['code'].reset_index(drop=True)
scaler = StandardScaler()
# MARK: reducing from 64 bit float to 32 bit float, to reduce memory usage
X_train = pd.DataFrame(scaler.fit_transform(X_train)).astype('float32')
X_val = pd.DataFrame(scaler.fit_transform(X_val)).astype('float32')
X_test = pd.DataFrame(scaler.fit_transform(X_test)).astype('float32')
del(file_names, files, df, frames, df_features, df_metadata, df_train, df_test, df_val, IDs_train, IDs_val, IDs_test, IDs_temp)
input_shape=(450, )
try:
def fully_connected_model():
model = keras.Sequential()
model.add(Dense(300, activation='tanh', input_shape=input_shape))
model.add(BatchNormalization())
model.add(Dropout(0.3))
model.add(Dense(200, activation='tanh'))
model.add(BatchNormalization())
model.add(Dense(1))
return model
model = fully_connected_model()
optimizer = Adadelta(learning_rate=0.01)
model.compile(loss='mean_squared_error',
optimizer=optimizer,
metrics=[RootMeanSquaredError(), MeanAbsoluteError()])
output_filename = 'FC_regressor_03'
output_file = os.path.join(PATH_OUTPUT, output_filename)
checkpointer = ModelCheckpoint(filepath = output_file + ".hdf5", monitor='val_loss', verbose=1, save_best_only=True)
earlystopper = EarlyStopping(monitor='val_loss', patience=1000, verbose=1)
reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.5, patience=50, min_lr=0.0001, verbose=1)
epochs = 5000
print("LOGGING: Starting FC_regressor_03 training")
# fit network
history = model.fit(x=X_train,
y=y_train,
validation_data=(X_val, y_val),
epochs=epochs,
verbose=2,
max_queue_size=MAX_QUEUE_SIZE,
workers=WORKERS,
callbacks = [checkpointer, earlystopper, reduce_lr])
except Exception as e:
print("LOGGING: Failed FC_regressor_03 training:")
print(e)
pass
| [
"tensorflow.keras.callbacks.ModelCheckpoint",
"pandas.concat",
"pandas.read_hdf",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.ReduceLROnPlateau",
"tensorflow.keras.Sequential",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.optimizers.Adadelta",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.metrics.MeanAbsoluteError",
"tensorflow.keras.metrics.RootMeanSquaredError",
"tensorflow.keras.layers.Dropout",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.callbacks.EarlyStopping"
] | scripts/ML_train_03.py | [(27, 'os.listdir', 'os.listdir', (['PATH_DATA_PROCESSED_ML'], {}), False, 'import sys, os, fnmatch, csv\n'), (46, 'pandas.concat', 'pd.concat', (['frames'], {}), True, 'import pandas as pd\n'), (51, 'sklearn.model_selection.train_test_split', 'train_test_split', (['subject_ids'], {'test_size': '(0.3)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (52, 'sklearn.model_selection.train_test_split', 'train_test_split', (['IDs_temp'], {'test_size': '(0.5)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (73, 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), False, 'from sklearn.preprocessing import StandardScaler\n'), (36, 'pandas.read_hdf', 'pd.read_hdf', (["(feature_file + '.h5')"], {}), True, 'import pandas as pd\n'), (101, 'tensorflow.keras.optimizers.Adadelta', 'Adadelta', ([], {'learning_rate': '(0.01)'}), False, 'from tensorflow.keras.optimizers import Adam, Adadelta\n'), (107, 'os.path.join', 'os.path.join', (['PATH_OUTPUT', 'output_filename'], {}), False, 'import sys, os, fnmatch, csv\n'), (109, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': "(output_file + '.hdf5')", 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), (110, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(1000)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), (111, 'tensorflow.keras.callbacks.ReduceLROnPlateau', 'ReduceLROnPlateau', ([], {'monitor': '"""val_loss"""', 'factor': '(0.5)', 'patience': '(50)', 'min_lr': '(0.0001)', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping, ReduceLROnPlateau\n'), (17, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import sys, os, fnmatch, csv\n'), (30, 'fnmatch.filter', 'fnmatch.filter', (['file_names', '"""*.h5"""'], {}), False, 'import sys, os, fnmatch, csv\n'), (86, 'tensorflow.keras.Sequential', 'keras.Sequential', ([], {}), False, 'from tensorflow import keras\n'), (30, 'os.path.join', 'os.path.join', (['PATH_DATA_PROCESSED_ML', 'file_name'], {}), False, 'import sys, os, fnmatch, csv\n'), (88, 'tensorflow.keras.layers.Dense', 'Dense', (['(300)'], {'activation': '"""tanh"""', 'input_shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Dropout, Dense, BatchNormalization\n'), (89, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import Dropout, Dense, BatchNormalization\n'), (90, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.3)'], {}), False, 'from tensorflow.keras.layers import Dropout, Dense, BatchNormalization\n'), (92, 'tensorflow.keras.layers.Dense', 'Dense', (['(200)'], {'activation': '"""tanh"""'}), False, 'from tensorflow.keras.layers import Dropout, Dense, BatchNormalization\n'), (93, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import Dropout, Dense, BatchNormalization\n'), (95, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {}), False, 'from tensorflow.keras.layers import Dropout, Dense, BatchNormalization\n'), (104, 'tensorflow.keras.metrics.RootMeanSquaredError', 'RootMeanSquaredError', ([], {}), False, 'from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n'), (104, 'tensorflow.keras.metrics.MeanAbsoluteError', 'MeanAbsoluteError', ([], {}), False, 'from tensorflow.keras.metrics import RootMeanSquaredError, MeanAbsoluteError\n')] |
oliviaweng/imgclsmob | a1f1f52eecbb841fa878bff4d3c311b79864835d | """
GhostNet for ImageNet-1K, implemented in TensorFlow.
Original paper: 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
"""
__all__ = ['GhostNet', 'ghostnet']
import os
import math
import tensorflow as tf
import tensorflow.keras.layers as nn
from .common import round_channels, conv1x1, conv1x1_block, conv3x3_block, dwconv3x3_block, dwconv5x5_block,\
dwsconv3x3_block, SEBlock, get_channel_axis, flatten, is_channels_first
class GhostHSigmoid(nn.Layer):
"""
Approximated sigmoid function, specific for GhostNet.
"""
def __init__(self, **kwargs):
super(GhostHSigmoid, self).__init__(**kwargs)
def call(self, x, training=None):
return tf.clip_by_value(x, 0.0, 1.0)
class GhostConvBlock(nn.Layer):
"""
GhostNet specific convolution block.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
activation : function or str or None, default 'relu'
Activation function or name of activation function.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
activation="relu",
data_format="channels_last",
**kwargs):
super(GhostConvBlock, self).__init__(**kwargs)
self.data_format = data_format
main_out_channels = math.ceil(0.5 * out_channels)
cheap_out_channels = out_channels - main_out_channels
self.main_conv = conv1x1_block(
in_channels=in_channels,
out_channels=main_out_channels,
activation=activation,
data_format=data_format,
name="main_conv")
self.cheap_conv = dwconv3x3_block(
in_channels=main_out_channels,
out_channels=cheap_out_channels,
activation=activation,
data_format=data_format,
name="cheap_conv")
def call(self, x, training=None):
x = self.main_conv(x, training=training)
y = self.cheap_conv(x, training=training)
return tf.concat([x, y], axis=get_channel_axis(self.data_format))
class GhostExpBlock(nn.Layer):
"""
GhostNet expansion block for residual path in GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the convolution.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
data_format="channels_last",
**kwargs):
super(GhostExpBlock, self).__init__(**kwargs)
self.use_dw_conv = (strides != 1)
self.use_se = use_se
mid_channels = int(math.ceil(exp_factor * in_channels))
self.exp_conv = GhostConvBlock(
in_channels=in_channels,
out_channels=mid_channels,
name="exp_conv")
if self.use_dw_conv:
dw_conv_class = dwconv3x3_block if use_kernel3 else dwconv5x5_block
self.dw_conv = dw_conv_class(
in_channels=mid_channels,
out_channels=mid_channels,
strides=strides,
activation=None,
data_format=data_format,
name="dw_conv")
if self.use_se:
self.se = SEBlock(
channels=mid_channels,
reduction=4,
out_activation=GhostHSigmoid(),
data_format=data_format,
name="se")
self.pw_conv = GhostConvBlock(
in_channels=mid_channels,
out_channels=out_channels,
activation=None,
data_format=data_format,
name="pw_conv")
def call(self, x, training=None):
x = self.exp_conv(x, training=training)
if self.use_dw_conv:
x = self.dw_conv(x, training=training)
if self.use_se:
x = self.se(x)
x = self.pw_conv(x, training=training)
return x
class GhostUnit(nn.Layer):
"""
GhostNet unit.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
strides : int or tuple/list of 2 int
Strides of the second convolution layer.
use_kernel3 : bool
Whether to use 3x3 (instead of 5x5) kernel.
exp_factor : float
Expansion factor.
use_se : bool
Whether to use SE-module.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
strides,
use_kernel3,
exp_factor,
use_se,
data_format="channels_last",
**kwargs):
super(GhostUnit, self).__init__(**kwargs)
self.resize_identity = (in_channels != out_channels) or (strides != 1)
self.body = GhostExpBlock(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se,
data_format=data_format,
name="body")
if self.resize_identity:
self.identity_conv = dwsconv3x3_block(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
pw_activation=None,
data_format=data_format,
name="identity_conv")
def call(self, x, training=None):
if self.resize_identity:
identity = self.identity_conv(x, training=training)
else:
identity = x
x = self.body(x, training=training)
x = x + identity
return x
class GhostClassifier(nn.Layer):
"""
GhostNet classifier.
Parameters:
----------
in_channels : int
Number of input channels.
out_channels : int
Number of output channels.
mid_channels : int
Number of middle channels.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
in_channels,
out_channels,
mid_channels,
data_format="channels_last",
**kwargs):
super(GhostClassifier, self).__init__(**kwargs)
self.conv1 = conv1x1_block(
in_channels=in_channels,
out_channels=mid_channels,
data_format=data_format,
name="conv1")
self.conv2 = conv1x1(
in_channels=mid_channels,
out_channels=out_channels,
use_bias=True,
data_format=data_format,
name="conv2")
def call(self, x, training=None):
x = self.conv1(x, training=training)
x = self.conv2(x)
return x
class GhostNet(tf.keras.Model):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
channels : list of list of int
Number of output channels for each unit.
init_block_channels : int
Number of output channels for the initial unit.
final_block_channels : int
Number of output channels for the final block of the feature extractor.
classifier_mid_channels : int
Number of middle channels for classifier.
kernels3 : list of list of int/bool
Using 3x3 (instead of 5x5) kernel for each unit.
exp_factors : list of list of int
Expansion factor for each unit.
use_se : list of list of int/bool
Using SE-block flag for each unit.
first_stride : bool
Whether to use stride for the first stage.
in_channels : int, default 3
Number of input channels.
in_size : tuple of two ints, default (224, 224)
Spatial size of the expected input image.
classes : int, default 1000
Number of classification classes.
data_format : str, default 'channels_last'
The ordering of the dimensions in tensors.
"""
def __init__(self,
channels,
init_block_channels,
final_block_channels,
classifier_mid_channels,
kernels3,
exp_factors,
use_se,
first_stride,
in_channels=3,
in_size=(224, 224),
classes=1000,
data_format="channels_last",
**kwargs):
super(GhostNet, self).__init__(**kwargs)
self.in_size = in_size
self.classes = classes
self.data_format = data_format
self.features = tf.keras.Sequential(name="features")
self.features.add(conv3x3_block(
in_channels=in_channels,
out_channels=init_block_channels,
strides=2,
data_format=data_format,
name="init_block"))
in_channels = init_block_channels
for i, channels_per_stage in enumerate(channels):
stage = tf.keras.Sequential(name="stage{}".format(i + 1))
for j, out_channels in enumerate(channels_per_stage):
strides = 2 if (j == 0) and ((i != 0) or first_stride) else 1
use_kernel3 = kernels3[i][j] == 1
exp_factor = exp_factors[i][j]
use_se_flag = use_se[i][j] == 1
stage.add(GhostUnit(
in_channels=in_channels,
out_channels=out_channels,
strides=strides,
use_kernel3=use_kernel3,
exp_factor=exp_factor,
use_se=use_se_flag,
data_format=data_format,
name="unit{}".format(j + 1)))
in_channels = out_channels
self.features.add(stage)
self.features.add(conv1x1_block(
in_channels=in_channels,
out_channels=final_block_channels,
data_format=data_format,
name="final_block"))
in_channels = final_block_channels
self.features.add(nn.AveragePooling2D(
pool_size=7,
strides=1,
data_format=data_format,
name="final_pool"))
self.output1 = GhostClassifier(
in_channels=in_channels,
out_channels=classes,
mid_channels=classifier_mid_channels,
data_format=data_format,
name="output1")
def call(self, x, training=None):
x = self.features(x, training=training)
x = self.output1(x, training=training)
x = flatten(x, self.data_format)
return x
def get_ghostnet(width_scale=1.0,
model_name=None,
pretrained=False,
root=os.path.join("~", ".tensorflow", "models"),
**kwargs):
"""
Create GhostNet model with specific parameters.
Parameters:
----------
width_scale : float, default 1.0
Scale factor for width of layers.
model_name : str or None, default None
Model name for loading pretrained model.
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
init_block_channels = 16
channels = [[16], [24, 24], [40, 40], [80, 80, 80, 80, 112, 112], [160, 160, 160, 160, 160]]
kernels3 = [[1], [1, 1], [0, 0], [1, 1, 1, 1, 1, 1], [0, 0, 0, 0, 0]]
exp_factors = [[1], [3, 3], [3, 3], [6, 2.5, 2.3, 2.3, 6, 6], [6, 6, 6, 6, 6]]
use_se = [[0], [0, 0], [1, 1], [0, 0, 0, 0, 1, 1], [1, 0, 1, 0, 1]]
final_block_channels = 960
classifier_mid_channels = 1280
first_stride = False
if width_scale != 1.0:
channels = [[round_channels(cij * width_scale, divisor=4) for cij in ci] for ci in channels]
init_block_channels = round_channels(init_block_channels * width_scale, divisor=4)
if width_scale > 1.0:
final_block_channels = round_channels(final_block_channels * width_scale, divisor=4)
net = GhostNet(
channels=channels,
init_block_channels=init_block_channels,
final_block_channels=final_block_channels,
classifier_mid_channels=classifier_mid_channels,
kernels3=kernels3,
exp_factors=exp_factors,
use_se=use_se,
first_stride=first_stride,
**kwargs)
if pretrained:
if (model_name is None) or (not model_name):
raise ValueError("Parameter `model_name` should be properly initialized for loading pretrained model.")
from .model_store import get_model_file
in_channels = kwargs["in_channels"] if ("in_channels" in kwargs) else 3
input_shape = (1,) + (in_channels,) + net.in_size if net.data_format == "channels_first" else\
(1,) + net.in_size + (in_channels,)
net.build(input_shape=input_shape)
net.load_weights(
filepath=get_model_file(
model_name=model_name,
local_model_store_dir_path=root))
return net
def ghostnet(**kwargs):
"""
GhostNet model from 'GhostNet: More Features from Cheap Operations,' https://arxiv.org/abs/1911.11907.
Parameters:
----------
pretrained : bool, default False
Whether to load the pretrained weights for model.
root : str, default '~/.tensorflow/models'
Location for keeping the model parameters.
"""
return get_ghostnet(model_name="ghostnet", **kwargs)
def _test():
import numpy as np
import tensorflow.keras.backend as K
data_format = "channels_last"
pretrained = False
models = [
ghostnet,
]
for model in models:
net = model(pretrained=pretrained, data_format=data_format)
batch_saze = 14
x = tf.random.normal((batch_saze, 3, 224, 224) if is_channels_first(data_format) else (batch_saze, 224, 224, 3))
y = net(x)
assert (tuple(y.shape.as_list()) == (batch_saze, 1000))
weight_count = sum([np.prod(K.get_value(w).shape) for w in net.trainable_weights])
print("m={}, {}".format(model.__name__, weight_count))
assert (model != ghostnet or weight_count == 5180840)
if __name__ == "__main__":
_test()
| [
"tensorflow.clip_by_value",
"tensorflow.keras.backend.get_value",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.Sequential"
] | tensorflow2/tf2cv/models/ghostnet.py | [(350, 'os.path.join', 'os.path.join', (['"""~"""', '""".tensorflow"""', '"""models"""'], {}), False, 'import os\n'), (24, 'tensorflow.clip_by_value', 'tf.clip_by_value', (['x', '(0.0)', '(1.0)'], {}), True, 'import tensorflow as tf\n'), (50, 'math.ceil', 'math.ceil', (['(0.5 * out_channels)'], {}), False, 'import math\n'), (295, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', ([], {'name': '"""features"""'}), True, 'import tensorflow as tf\n'), (105, 'math.ceil', 'math.ceil', (['(exp_factor * in_channels)'], {}), False, 'import math\n'), (327, 'tensorflow.keras.layers.AveragePooling2D', 'nn.AveragePooling2D', ([], {'pool_size': '(7)', 'strides': '(1)', 'data_format': 'data_format', 'name': '"""final_pool"""'}), True, 'import tensorflow.keras.layers as nn\n'), (442, 'tensorflow.keras.backend.get_value', 'K.get_value', (['w'], {}), True, 'import tensorflow.keras.backend as K\n')] |
aasir22/tools_classification | f5a2606f5fa07c1ebc161c467d17f4e7a04c5ebb | from tensorflow.keras.layers import Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.callbacks import ModelCheckpoint
from datetime import datetime
import numpy as np
import os
import cv2
import matplotlib.pyplot as plt
from Logger.app_logger import App_logger
from sklearn.metrics import accuracy_score,classification_report,confusion_matrix
class Training:
def __init__(self,train_path,test_path,val_path):
self.train_path = train_path
self.test_path = test_path
self.val_path = val_path
self.file_object = open("Training_Logs/ModelTrainingLog.txt", 'a+')
self.log_object = App_logger()
def train(self):
self.log_object.log(self.file_object,"Entered in to train method in Training class.Training started")
try:
x_train = []
for folder in os.listdir(self.train_path):
sub_path = self.train_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_train.append(img_arr)
x_test = []
for folder in os.listdir(self.test_path):
sub_path = self.test_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_test.append(img_arr)
x_val = []
for folder in os.listdir(self.val_path):
sub_path = self.val_path + "/" + folder
for img in os.listdir(sub_path):
image_path = sub_path + "/" + img
img_arr = cv2.imread(image_path)
if img_arr is None:
os.remove(image_path)
continue
elif img_arr.shape[0] < 224:
os.remove(image_path)
continue
else:
img_arr = cv2.resize(img_arr, (224, 224))
x_val.append(img_arr)
self.log_object.log(self.file_object, "Entered in to train method in Training class.train,test,val split successfull")
train_x = np.array(x_train) / 255.0
test_x = np.array(x_test) / 255.0
val_x = np.array(x_val) / 255.0
train_datagen = ImageDataGenerator(rescale=1. / 255)
test_datagen = ImageDataGenerator(rescale=1. / 255)
val_datagen = ImageDataGenerator(rescale=1. / 255)
training_set = train_datagen.flow_from_directory(self.train_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
test_set = test_datagen.flow_from_directory(self.test_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
val_set = val_datagen.flow_from_directory(self.val_path,
target_size=(224, 224),
batch_size=32,
class_mode='sparse')
train_y = training_set.classes
test_y = test_set.classes
val_y = val_set.classes
IMAGE_SIZE = [224, 224]
vgg = VGG19(input_shape= IMAGE_SIZE + [3],weights='imagenet',include_top=False)
self.log_object.log(self.file_object, "Entered in to train method in Training class. Model successfully initialized")
for layer in vgg.layers:
layer.trainable = False
x = Flatten() (vgg.output)
prediction = Dense(5 ,activation='softmax') (x)
model = Model(inputs=vgg.input,outputs = prediction)
model.summary()
model.compile(loss = 'sparse_categorical_crossentropy',
optimizer='adam',metrics=['accuracy'])
self.log_object.log(self.file_object, "Entered in to train method in Training class.Model compile successfull")
file_path = 'vgg19_model/checkpoint-{epoch:02d}-{val_accuracy:.2f}.hdf5'
self.log_object.log(self.file_object,"check point directory created")
check_point = ModelCheckpoint(file_path,monitor='val_accuracy', verbose=1,save_best_only=True, mode='max')
start = datetime.now()
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Training start time {start}")
history = model.fit(train_x,train_y,
validation_data= (val_x,val_y),
epochs=20,
callbacks = [check_point],
batch_size=64, shuffle=True)
duration = datetime.now() - start
self.log_object.log(self.file_object, f"Entered in to train method in Training class.Total time taken is {duration}")
model.save('mech_tools_model.h5')
self.log_object.log(self.file_object, f"Entered in to train method in Training class.model saved successfully")
# accuracies
plt.plot(history.history['accuracy'], label='train acc')
plt.plot(history.history['val_accuracy'], label='val acc')
plt.legend()
plt.savefig('vgg-acc-rps-1.png')
# loss
plt.plot(history.history['loss'], label='train loss')
plt.plot(history.history['val_loss'], label='val loss')
plt.legend()
plt.savefig('vgg-loss-rps-1.png')
self.log_object.log(self.file_object, "Entered in to train method in Training class.model evaluation started")
model.evaluate(test_x, test_y, batch_size=32)
# predict
y_pred = model.predict(test_x)
y_pred = np.argmax(y_pred, axis=1)
self.log_object.log(self.file_object, f"Entered in to train method in Training class.classification report {classification_report(y_pred, test_y)}")
self.log_object.log(self.file_object, f"Entered in to train method in Training class.confusion matrix is{confusion_matrix(y_pred, test_y)}")
except Exception as e:
# logging the unsuccessful Training
self.log_object.log(self.file_object, 'Unsuccessful End of Training')
self.log_object.log(self.file_object,f"exception occured.exception is {e}")
raise Exception
self.file_object.close()
if __name__ == "__main__":
train_path = "final_dataset/train"
test_path = "final_dataset/test"
val_path = "final_dataset/val"
train_model = Training(train_path, test_path, val_path)
train_model.train() | [
"tensorflow.keras.callbacks.ModelCheckpoint",
"matplotlib.pyplot.legend",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.models.Model",
"tensorflow.keras.applications.vgg19.VGG19",
"tensorflow.keras.layers.Dense",
"sklearn.metrics.classification_report",
"matplotlib.pyplot.savefig",
"sklearn.metrics.confusion_matrix",
"matplotlib.pyplot.plot",
"numpy.argmax",
"numpy.array",
"tensorflow.keras.layers.Flatten"
] | training.py | [(24, 'Logger.app_logger.App_logger', 'App_logger', ([], {}), False, 'from Logger.app_logger import App_logger\n'), (31, 'os.listdir', 'os.listdir', (['self.train_path'], {}), False, 'import os\n'), (50, 'os.listdir', 'os.listdir', (['self.test_path'], {}), False, 'import os\n'), (73, 'os.listdir', 'os.listdir', (['self.val_path'], {}), False, 'import os\n'), (95, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (96, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (97, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rescale': '(1.0 / 255)'}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (118, 'tensorflow.keras.applications.vgg19.VGG19', 'VGG19', ([], {'input_shape': '(IMAGE_SIZE + [3])', 'weights': '"""imagenet"""', 'include_top': '(False)'}), False, 'from tensorflow.keras.applications.vgg19 import VGG19\n'), (127, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'vgg.input', 'outputs': 'prediction'}), False, 'from tensorflow.keras.models import Model\n'), (135, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', (['file_path'], {'monitor': '"""val_accuracy"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""max"""'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint\n'), (136, 'datetime.datetime.now', 'datetime.now', ([], {}), False, 'from datetime import datetime\n'), (153, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['accuracy']"], {'label': '"""train acc"""'}), True, 'import matplotlib.pyplot as plt\n'), (154, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_accuracy']"], {'label': '"""val acc"""'}), True, 'import matplotlib.pyplot as plt\n'), (155, 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (156, 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""vgg-acc-rps-1.png"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (159, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['loss']"], {'label': '"""train loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (160, 'matplotlib.pyplot.plot', 'plt.plot', (["history.history['val_loss']"], {'label': '"""val loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (161, 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (162, 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""vgg-loss-rps-1.png"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (169, 'numpy.argmax', 'np.argmax', (['y_pred'], {'axis': '(1)'}), True, 'import numpy as np\n'), (35, 'os.listdir', 'os.listdir', (['sub_path'], {}), False, 'import os\n'), (54, 'os.listdir', 'os.listdir', (['sub_path'], {}), False, 'import os\n'), (77, 'os.listdir', 'os.listdir', (['sub_path'], {}), False, 'import os\n'), (91, 'numpy.array', 'np.array', (['x_train'], {}), True, 'import numpy as np\n'), (92, 'numpy.array', 'np.array', (['x_test'], {}), True, 'import numpy as np\n'), (93, 'numpy.array', 'np.array', (['x_val'], {}), True, 'import numpy as np\n'), (124, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Dense, Flatten\n'), (126, 'tensorflow.keras.layers.Dense', 'Dense', (['(5)'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras.layers import Dense, Flatten\n'), (144, 'datetime.datetime.now', 'datetime.now', ([], {}), False, 'from datetime import datetime\n'), (37, 'cv2.imread', 'cv2.imread', (['image_path'], {}), False, 'import cv2\n'), (57, 'cv2.imread', 'cv2.imread', (['image_path'], {}), False, 'import cv2\n'), (79, 'cv2.imread', 'cv2.imread', (['image_path'], {}), False, 'import cv2\n'), (39, 'os.remove', 'os.remove', (['image_path'], {}), False, 'import os\n'), (59, 'os.remove', 'os.remove', (['image_path'], {}), False, 'import os\n'), (81, 'os.remove', 'os.remove', (['image_path'], {}), False, 'import os\n'), (170, 'sklearn.metrics.classification_report', 'classification_report', (['y_pred', 'test_y'], {}), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), (171, 'sklearn.metrics.confusion_matrix', 'confusion_matrix', (['y_pred', 'test_y'], {}), False, 'from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n'), (42, 'os.remove', 'os.remove', (['image_path'], {}), False, 'import os\n'), (45, 'cv2.resize', 'cv2.resize', (['img_arr', '(224, 224)'], {}), False, 'import cv2\n'), (62, 'os.remove', 'os.remove', (['image_path'], {}), False, 'import os\n'), (66, 'cv2.resize', 'cv2.resize', (['img_arr', '(224, 224)'], {}), False, 'import cv2\n'), (84, 'os.remove', 'os.remove', (['image_path'], {}), False, 'import os\n'), (87, 'cv2.resize', 'cv2.resize', (['img_arr', '(224, 224)'], {}), False, 'import cv2\n')] |
soybase/DroneImageScripts | c077325a868237569592bd3820b3d873eddb4f83 | # import the necessary packages
import sys
import cv2
import numpy as np
import pandas as pd
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from sklearn.preprocessing import LabelBinarizer
from sklearn.preprocessing import MinMaxScaler
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import Conv2DTranspose
from tensorflow.keras.layers import LeakyReLU
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Reshape
from tensorflow.keras.optimizers import Adam
from tensorflow.keras import Input
from tensorflow.keras import Model
from tensorflow.keras import backend as K
from tensorflow.keras.models import load_model
from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping
class CNNProcessData:
def __init__(self):
pass
def get_imagedatagenerator(self):
datagen = ImageDataGenerator(
featurewise_center=True,
featurewise_std_normalization=True,
#rotation_range=20,
#width_shift_range=0.05,
#height_shift_range=0.05,
#horizontal_flip=True,
# vertical_flip=True,
#brightness_range=[0.8,1.2]
)
return datagen
def generate_croppings(self, testX, testY, image_size, number):
if number != 11:
raise Exception("Only implemented for number = 11 right now")
augmented_testX_1 = []
augmented_testX_2 = []
augmented_testX_3 = []
augmented_testX_4 = []
augmented_testX_5 = []
augmented_testX_6 = []
augmented_testX_7 = []
augmented_testX_8 = []
augmented_testX_9 = []
augmented_testX_10 = []
augmented_testX_11 = []
mid_image_size = int(round(image_size/2))
for img in testX:
height = img.shape[0]
small_height = int(round(height*0.1))
mid_height = int(round(height/2))
width = img.shape[1]
mid_width = int(round(width/2))
crop_img1 = img[height-image_size:height, 0:image_size]
crop_img2 = img[height-image_size:height, width-image_size:width]
crop_img3 = img[0:image_size, width-image_size:width]
crop_img4 = img[0:image_size, 0:image_size]
crop_img5 = img[mid_height-mid_image_size:mid_height+mid_image_size, mid_width-mid_image_size:mid_width+mid_image_size]
crop_img6 = img[mid_height-mid_image_size:mid_height+mid_image_size, 0:image_size]
crop_img7 = img[mid_height-mid_image_size:mid_height+mid_image_size, width-image_size:width]
crop_img8 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, 0:image_size]
crop_img9 = img[mid_height+small_height-mid_image_size:mid_height+small_height+mid_image_size, width-image_size:width]
crop_img10 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, 0:image_size]
crop_img11 = img[mid_height-small_height-mid_image_size:mid_height-small_height+mid_image_size, width-image_size:width]
augmented_testX_1.append(crop_img1)
augmented_testX_2.append(crop_img2)
augmented_testX_3.append(crop_img3)
augmented_testX_4.append(crop_img4)
augmented_testX_5.append(crop_img5)
augmented_testX_6.append(crop_img6)
augmented_testX_7.append(crop_img7)
augmented_testX_8.append(crop_img8)
augmented_testX_9.append(crop_img9)
augmented_testX_10.append(crop_img10)
augmented_testX_11.append(crop_img11)
augmented_testX_1 = np.array(augmented_testX_1)
augmented_testX_2 = np.array(augmented_testX_2)
augmented_testX_3 = np.array(augmented_testX_3)
augmented_testX_4 = np.array(augmented_testX_4)
augmented_testX_5 = np.array(augmented_testX_5)
augmented_testX_6 = np.array(augmented_testX_6)
augmented_testX_7 = np.array(augmented_testX_7)
augmented_testX_8 = np.array(augmented_testX_8)
augmented_testX_9 = np.array(augmented_testX_9)
augmented_testX_10 = np.array(augmented_testX_10)
augmented_testX_11 = np.array(augmented_testX_11)
testX = np.concatenate((augmented_testX_1, augmented_testX_2, augmented_testX_3, augmented_testX_4, augmented_testX_5, augmented_testX_6, augmented_testX_7, augmented_testX_8, augmented_testX_9, augmented_testX_10, augmented_testX_11))
# testXflipped = []
# for img in testX:
# horizontal_flip = cv2.flip( img, 0 )
# testXflipped.append(horizontal_flip)
# testXflipped = np.array(testXflipped)
# testX = np.concatenate((testX, testXflipped))
testY = np.repeat(testY, number)
return (testX, testY)
def create_montages(self, images, montage_image_number, image_size, full_montage_image_size):
output = []
if montage_image_number == 4:
data = images.reshape(int(len(images)/montage_image_number), montage_image_number, image_size, image_size, 3)
for iter in range(len(data)):
img_set = data[iter]
outputImage = np.zeros((full_montage_image_size, full_montage_image_size, 3))
outputImage[0:image_size, 0:image_size, :] = img_set[0]
outputImage[0:image_size, image_size:2*image_size, :] = img_set[1]
outputImage[image_size:2*image_size, 0:image_size, :] = img_set[2]
outputImage[image_size:2*image_size, image_size:2*image_size, :] = img_set[3]
# cv2.imshow("Result", outputImage)
# cv2.waitKey(0)
# raise Exception('Exit')
output.append(outputImage)
else:
raise Exception('Only implemented to montage 4 images into one image')
return np.array(output)
def process_cnn_data(self, images, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, data_augmentation, data_augmentation_test, montage_image_number, full_montage_image_size, output_autoencoder_model_file_path, log_file_path):
if log_file_path is not None:
sys.stderr = open(log_file_path, 'a')
def eprint(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(images)
images = datagen.standardize(images)
aux_data["value"] = aux_data["value"].astype(float)
output_image_file = aux_data["output_image_file"].tolist()
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'densenet121_lstm_imagenet':
images = images.reshape(num_unique_stock_ids * num_unique_image_types, num_unique_time_days, input_image_size, input_image_size, 3)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
trainX_length = len(train_images)
testX_length = len(test_images)
train_images = train_images.reshape(trainX_length * num_unique_time_days, input_image_size, input_image_size, 3)
test_images = test_images.reshape(testX_length * num_unique_time_days, input_image_size, input_image_size, 3)
trainX_length_flat = len(train_images)
test_images = datagen.standardize(test_images)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
testX_resized = []
for img in test_images:
testX_resized.append(cv2.resize(img, (image_size, image_size)))
test_images = np.array(testX_resized)
test_images = test_images.reshape(data_augmentation_test * testX_length, num_unique_time_days, image_size, image_size, 3)
# trainX_aug = []
# trainY_aug = []
# augmented = datagen.flow(train_images, train_aux_data, batch_size=trainX_length_flat)
# for i in range(0, data_augmentation):
# X, y = augmented.next()
# if len(trainX_aug) == 0:
# trainX_aug = X
# trainY_aug = y
# else:
# trainX_aug = np.concatenate((trainX_aug, X))
# trainY_aug = np.concatenate((trainY_aug, y))
#
# trainX = trainX_aug
# trainY = trainY_aug
trainX_resized = []
for img in train_images:
trainX_resized.append(cv2.resize(img, (image_size, image_size)))
train_images = np.array(trainX_resized)
train_images = train_images.reshape(data_augmentation * trainX_length, num_unique_time_days, image_size, image_size, 3)
else:
images = self.create_montages(images, montage_image_number, image_size, full_montage_image_size)
(encoder, decoder, autoencoder) = self.build_autoencoder(full_montage_image_size, full_montage_image_size, 3)
opt = Adam(lr=1e-3)
autoencoder.compile(loss="mse", optimizer=opt)
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, images, test_size=0.2)
checkpoint = ModelCheckpoint(filepath=output_autoencoder_model_file_path, monitor='loss', verbose=1, save_best_only=True, mode='min', save_frequency=1, save_weights_only=False)
callbacks_list = [checkpoint]
# train the convolutional autoencoder
H = autoencoder.fit(
train_images, train_images,
validation_data=(test_images, test_images),
epochs=25,
batch_size=32,
callbacks=callbacks_list
)
decoded = autoencoder.predict(images)
output_image_counter = 0
for image in decoded:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
(train_aux_data, test_aux_data, train_images, test_images) = train_test_split(aux_data, decoded, test_size=0.2)
# testY_length = len(testY)
# (testX, testY) = self.generate_croppings(testX, testY, image_size, data_augmentation_test)
# testY = testY.reshape(data_augmentation_test * testY_length, 1)
# augmented = datagen.flow(trainX, trainY, batch_size=len(trainX))
# for i in range(0, data_augmentation):
# X, y = augmented.next()
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
train_stock_id_categorical = stock_id_binarizer.transform(train_aux_data["stock_id"])
test_stock_id_categorical = stock_id_binarizer.transform(test_aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
train_accession_id_categorical = accession_id_binarizer.transform(train_aux_data["accession_id"])
test_accession_id_categorical = accession_id_binarizer.transform(test_aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
train_female_id_categorical = female_id_binarizer.transform(train_aux_data["female_id"])
test_female_id_categorical = female_id_binarizer.transform(test_aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
train_male_id_categorical = male_id_binarizer.transform(train_aux_data["male_id"])
test_male_id_categorical = male_id_binarizer.transform(test_aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
trainContinuous = cs.fit_transform(train_aux_data[continuous])
testContinuous = cs.transform(test_aux_data[continuous])
#trainX = np.hstack((train_stock_id_categorical, train_accession_id_categorical, train_female_id_categorical, train_male_id_categorical, trainContinuous))
#testX = np.hstack((test_stock_id_categorical, test_accession_id_categorical, test_female_id_categorical, test_male_id_categorical, testContinuous))
trainX = trainContinuous
testX = testContinuous
else:
trainX = []
testX = []
trainx = np.array(trainX)
testx = np.array(testX)
max_label = aux_data["value"].max()
trainY = train_aux_data["value"]/max_label
testY = test_aux_data["value"]/max_label
train_genotype_files = train_aux_data["genotype_file"].tolist()
test_genotype_files = test_aux_data["genotype_file"].tolist()
train_genotype_data = []
for f in train_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
train_genotype_data.append(np.array(geno_data.iloc[:,0]))
test_genotype_data = []
for f in test_genotype_files:
if log_file_path is not None:
eprint(f)
else:
print(f)
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
test_genotype_data.append(np.array(geno_data.iloc[:,0]))
train_genotype_data = np.array(train_genotype_data)
test_genotype_data = np.array(test_genotype_data)
eprint(train_genotype_data)
eprint(testX)
eprint(trainX)
return (test_images, np.array(testX), testY.to_numpy(), test_genotype_data, train_images, np.array(trainX), trainY.to_numpy(), train_genotype_data)
def process_cnn_data_predictions(self, data, aux_data, num_unique_stock_ids, num_unique_image_types, num_unique_time_days, image_size, keras_model_type, input_autoencoder_model_file_path, training_data, data_augmentation_test, montage_image_number, full_montage_image_size):
trainX = []
testX = []
trainY = []
testY = []
datagen = self.get_imagedatagenerator()
datagen.fit(training_data)
data = datagen.standardize(data)
output_image_file = aux_data["output_image_file"].tolist()
data = self.create_montages(data, montage_image_number, image_size, full_montage_image_size)
autoencoder_model = load_model(input_autoencoder_model_file_path)
data = autoencoder_model.predict(data)
#ret = self.generate_croppings(data, None, image_size, data_augmentation_test)
#augmented_data = ret[0]
# LSTM models group images by time, but are still ties to a single label e.g. X, Y = [img_t1, img_t2, img_t3], y1.
if keras_model_type == 'KerasCNNLSTMDenseNet121ImageNetWeights':
data = data.reshape(data_augmentation_test * num_unique_stock_ids * num_unique_image_types, num_unique_time_days, image_size, image_size, 3)
output_image_counter = 0
for image in data:
cv2.imwrite(output_image_file[output_image_counter], image*255)
output_image_counter += 1
stock_id_binarizer = LabelBinarizer().fit(aux_data["stock_id"])
stock_id_categorical = stock_id_binarizer.transform(aux_data["stock_id"])
accession_id_binarizer = LabelBinarizer().fit(aux_data["accession_id"])
accession_id_categorical = accession_id_binarizer.transform(aux_data["accession_id"])
female_id_binarizer = LabelBinarizer().fit(aux_data["female_id"])
female_id_categorical = female_id_binarizer.transform(aux_data["female_id"])
male_id_binarizer = LabelBinarizer().fit(aux_data["male_id"])
male_id_categorical = male_id_binarizer.transform(aux_data["male_id"])
continuous = [col for col in aux_data.columns if 'aux_trait_' in col]
cs = MinMaxScaler()
if len(continuous) > 0:
fitContinuous = cs.fit_transform(aux_data[continuous])
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical, fitContinuous])
fitX = fitContinuous
else:
# fitX = np.hstack([stock_id_categorical, accession_id_categorical, female_id_categorical, male_id_categorical])
fitX = []
fitX = np.array(fitX)
max_label = aux_data["value"].max()
fitY = aux_data["value"]/max_label
genotype_files = aux_data["genotype_file"].tolist()
genotype_data = []
for f in genotype_files:
if pd.isna(f) is False:
geno_data = pd.read_csv(f, sep="\t", header=None, na_values="NA")
genotype_data.append(np.array(geno_data.iloc[:,0]))
genotype_data = np.array(genotype_data)
return (data, fitX, genotype_data, fitY.to_numpy())
def build_autoencoder(self, width, height, depth, filters=(32, 64), latentDim=16):
inputShape = (height, width, depth)
chanDim = -1
# define the input to the encoder
inputs = Input(shape=inputShape)
x = inputs
# loop over the number of filters
for f in filters:
# apply a CONV => RELU => BN operation
x = Conv2D(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# flatten the network and then construct our latent vector
volumeSize = K.int_shape(x)
x = Flatten()(x)
latent = Dense(latentDim)(x)
# build the encoder model
encoder = Model(inputs, latent, name="encoder")
# start building the decoder model which will accept the
# output of the encoder as its inputs
latentInputs = Input(shape=(latentDim,))
x = Dense(np.prod(volumeSize[1:]))(latentInputs)
x = Reshape((volumeSize[1], volumeSize[2], volumeSize[3]))(x)
# loop over our number of filters again, but this time in
# reverse order
for f in filters[::-1]:
# apply a CONV_TRANSPOSE => RELU => BN operation
x = Conv2DTranspose(f, (3, 3), strides=2, padding="same")(x)
x = LeakyReLU(alpha=0.2)(x)
x = BatchNormalization(axis=chanDim)(x)
# apply a single CONV_TRANSPOSE layer used to recover the
# original depth of the image
x = Conv2DTranspose(depth, (3, 3), padding="same")(x)
outputs = Activation("sigmoid")(x)
# build the decoder model
decoder = Model(latentInputs, outputs, name="decoder")
# our autoencoder is the encoder + decoder
autoencoder = Model(inputs, decoder(encoder(inputs)), name="autoencoder")
# return a 3-tuple of the encoder, decoder, and autoencoder
return (encoder, decoder, autoencoder)
| [
"tensorflow.keras.models.load_model",
"tensorflow.keras.layers.Conv2DTranspose",
"numpy.concatenate",
"pandas.isna",
"sklearn.preprocessing.MinMaxScaler",
"pandas.read_csv",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.Input",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.layers.Conv2D",
"sklearn.preprocessing.LabelBinarizer",
"numpy.repeat",
"numpy.zeros",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Dense",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.Model",
"tensorflow.keras.layers.Reshape",
"numpy.array",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"numpy.prod"
] | CNN/CNNProcessData.py | [(30, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'featurewise_center': '(True)', 'featurewise_std_normalization': '(True)'}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (87, 'numpy.array', 'np.array', (['augmented_testX_1'], {}), True, 'import numpy as np\n'), (88, 'numpy.array', 'np.array', (['augmented_testX_2'], {}), True, 'import numpy as np\n'), (89, 'numpy.array', 'np.array', (['augmented_testX_3'], {}), True, 'import numpy as np\n'), (90, 'numpy.array', 'np.array', (['augmented_testX_4'], {}), True, 'import numpy as np\n'), (91, 'numpy.array', 'np.array', (['augmented_testX_5'], {}), True, 'import numpy as np\n'), (92, 'numpy.array', 'np.array', (['augmented_testX_6'], {}), True, 'import numpy as np\n'), (93, 'numpy.array', 'np.array', (['augmented_testX_7'], {}), True, 'import numpy as np\n'), (94, 'numpy.array', 'np.array', (['augmented_testX_8'], {}), True, 'import numpy as np\n'), (95, 'numpy.array', 'np.array', (['augmented_testX_9'], {}), True, 'import numpy as np\n'), (96, 'numpy.array', 'np.array', (['augmented_testX_10'], {}), True, 'import numpy as np\n'), (97, 'numpy.array', 'np.array', (['augmented_testX_11'], {}), True, 'import numpy as np\n'), (98, 'numpy.concatenate', 'np.concatenate', (['(augmented_testX_1, augmented_testX_2, augmented_testX_3, augmented_testX_4,\n augmented_testX_5, augmented_testX_6, augmented_testX_7,\n augmented_testX_8, augmented_testX_9, augmented_testX_10,\n augmented_testX_11)'], {}), True, 'import numpy as np\n'), (105, 'numpy.repeat', 'np.repeat', (['testY', 'number'], {}), True, 'import numpy as np\n'), (129, 'numpy.array', 'np.array', (['output'], {}), True, 'import numpy as np\n'), (248, 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler\n'), (260, 'numpy.array', 'np.array', (['trainX'], {}), True, 'import numpy as np\n'), (261, 'numpy.array', 'np.array', (['testX'], {}), True, 'import numpy as np\n'), (288, 'numpy.array', 'np.array', (['train_genotype_data'], {}), True, 'import numpy as np\n'), (289, 'numpy.array', 'np.array', (['test_genotype_data'], {}), True, 'import numpy as np\n'), (310, 'tensorflow.keras.models.load_model', 'load_model', (['input_autoencoder_model_file_path'], {}), False, 'from tensorflow.keras.models import load_model\n'), (338, 'sklearn.preprocessing.MinMaxScaler', 'MinMaxScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler\n'), (347, 'numpy.array', 'np.array', (['fitX'], {}), True, 'import numpy as np\n'), (359, 'numpy.array', 'np.array', (['genotype_data'], {}), True, 'import numpy as np\n'), (368, 'tensorflow.keras.Input', 'Input', ([], {'shape': 'inputShape'}), False, 'from tensorflow.keras import Input\n'), (379, 'tensorflow.keras.backend.int_shape', 'K.int_shape', (['x'], {}), True, 'from tensorflow.keras import backend as K\n'), (384, 'tensorflow.keras.Model', 'Model', (['inputs', 'latent'], {'name': '"""encoder"""'}), False, 'from tensorflow.keras import Model\n'), (388, 'tensorflow.keras.Input', 'Input', ([], {'shape': '(latentDim,)'}), False, 'from tensorflow.keras import Input\n'), (406, 'tensorflow.keras.Model', 'Model', (['latentInputs', 'outputs'], {'name': '"""decoder"""'}), False, 'from tensorflow.keras import Model\n'), (156, 'sklearn.model_selection.train_test_split', 'train_test_split', (['aux_data', 'images'], {'test_size': '(0.2)'}), False, 'from sklearn.model_selection import train_test_split\n'), (169, 'numpy.array', 'np.array', (['testX_resized'], {}), True, 'import numpy as np\n'), (191, 'numpy.array', 'np.array', (['trainX_resized'], {}), True, 'import numpy as np\n'), (198, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), False, 'from tensorflow.keras.optimizers import Adam\n'), (201, 'sklearn.model_selection.train_test_split', 'train_test_split', (['aux_data', 'images'], {'test_size': '(0.2)'}), False, 'from sklearn.model_selection import train_test_split\n'), (203, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'output_autoencoder_model_file_path', 'monitor': '"""loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""', 'save_frequency': '(1)', 'save_weights_only': '(False)'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, EarlyStopping\n'), (221, 'sklearn.model_selection.train_test_split', 'train_test_split', (['aux_data', 'decoded'], {'test_size': '(0.2)'}), False, 'from sklearn.model_selection import train_test_split\n'), (294, 'numpy.array', 'np.array', (['testX'], {}), True, 'import numpy as np\n'), (294, 'numpy.array', 'np.array', (['trainX'], {}), True, 'import numpy as np\n'), (322, 'cv2.imwrite', 'cv2.imwrite', (['output_image_file[output_image_counter]', '(image * 255)'], {}), False, 'import cv2\n'), (380, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Flatten\n'), (381, 'tensorflow.keras.layers.Dense', 'Dense', (['latentDim'], {}), False, 'from tensorflow.keras.layers import Dense\n'), (390, 'tensorflow.keras.layers.Reshape', 'Reshape', (['(volumeSize[1], volumeSize[2], volumeSize[3])'], {}), False, 'from tensorflow.keras.layers import Reshape\n'), (402, 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['depth', '(3, 3)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2DTranspose\n'), (403, 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (115, 'numpy.zeros', 'np.zeros', (['(full_montage_image_size, full_montage_image_size, 3)'], {}), True, 'import numpy as np\n'), (218, 'cv2.imwrite', 'cv2.imwrite', (['output_image_file[output_image_counter]', '(image * 255)'], {}), False, 'import cv2\n'), (231, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (235, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (239, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (243, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (275, 'pandas.isna', 'pd.isna', (['f'], {}), True, 'import pandas as pd\n'), (276, 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '"""\t"""', 'header': 'None', 'na_values': '"""NA"""'}), True, 'import pandas as pd\n'), (284, 'pandas.isna', 'pd.isna', (['f'], {}), True, 'import pandas as pd\n'), (285, 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '"""\t"""', 'header': 'None', 'na_values': '"""NA"""'}), True, 'import pandas as pd\n'), (325, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (328, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (331, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (334, 'sklearn.preprocessing.LabelBinarizer', 'LabelBinarizer', ([], {}), False, 'from sklearn.preprocessing import LabelBinarizer\n'), (355, 'pandas.isna', 'pd.isna', (['f'], {}), True, 'import pandas as pd\n'), (356, 'pandas.read_csv', 'pd.read_csv', (['f'], {'sep': '"""\t"""', 'header': 'None', 'na_values': '"""NA"""'}), True, 'import pandas as pd\n'), (374, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['f', '(3, 3)'], {'strides': '(2)', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (375, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import LeakyReLU\n'), (376, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (389, 'numpy.prod', 'np.prod', (['volumeSize[1:]'], {}), True, 'import numpy as np\n'), (396, 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['f', '(3, 3)'], {'strides': '(2)', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2DTranspose\n'), (397, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import LeakyReLU\n'), (398, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'chanDim'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (168, 'cv2.resize', 'cv2.resize', (['img', '(image_size, image_size)'], {}), False, 'import cv2\n'), (190, 'cv2.resize', 'cv2.resize', (['img', '(image_size, image_size)'], {}), False, 'import cv2\n'), (277, 'numpy.array', 'np.array', (['geno_data.iloc[:, (0)]'], {}), True, 'import numpy as np\n'), (286, 'numpy.array', 'np.array', (['geno_data.iloc[:, (0)]'], {}), True, 'import numpy as np\n'), (357, 'numpy.array', 'np.array', (['geno_data.iloc[:, (0)]'], {}), True, 'import numpy as np\n')] |
kct22aws/transformers | 28e091430eea9e0d40839e56fd0d57aec262f5f9 | # coding=utf-8
# Copyright 2021 The HuggingFace Inc. team. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" TF 2.0 ConvBERT model."""
import tensorflow as tf
from ...activations_tf import get_tf_activation
from ...file_utils import (
MULTIPLE_CHOICE_DUMMY_INPUTS,
add_code_sample_docstrings,
add_start_docstrings,
add_start_docstrings_to_model_forward,
)
from ...modeling_tf_outputs import (
TFBaseModelOutput,
TFMaskedLMOutput,
TFMultipleChoiceModelOutput,
TFQuestionAnsweringModelOutput,
TFSequenceClassifierOutput,
TFTokenClassifierOutput,
)
from ...modeling_tf_utils import (
TFMaskedLanguageModelingLoss,
TFMultipleChoiceLoss,
TFPreTrainedModel,
TFQuestionAnsweringLoss,
TFSequenceClassificationLoss,
TFSequenceSummary,
TFTokenClassificationLoss,
get_initializer,
input_processing,
keras_serializable,
shape_list,
)
from ...utils import logging
from .configuration_convbert import ConvBertConfig
logger = logging.get_logger(__name__)
_CHECKPOINT_FOR_DOC = "YituTech/conv-bert-base"
_CONFIG_FOR_DOC = "ConvBertConfig"
_TOKENIZER_FOR_DOC = "ConvBertTokenizer"
TF_CONVBERT_PRETRAINED_MODEL_ARCHIVE_LIST = [
"YituTech/conv-bert-base",
"YituTech/conv-bert-medium-small",
"YituTech/conv-bert-small",
# See all ConvBERT models at https://huggingface.co/models?filter=convbert
]
# Copied from transformers.models.albert.modeling_tf_albert.TFAlbertEmbeddings with Albert->ConvBert
class TFConvBertEmbeddings(tf.keras.layers.Layer):
"""Construct the embeddings from word, position and token_type embeddings."""
def __init__(self, config: ConvBertConfig, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.type_vocab_size = config.type_vocab_size
self.embedding_size = config.embedding_size
self.max_position_embeddings = config.max_position_embeddings
self.initializer_range = config.initializer_range
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(rate=config.hidden_dropout_prob)
def build(self, input_shape: tf.TensorShape):
with tf.name_scope("word_embeddings"):
self.weight = self.add_weight(
name="weight",
shape=[self.vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("token_type_embeddings"):
self.token_type_embeddings = self.add_weight(
name="embeddings",
shape=[self.type_vocab_size, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
with tf.name_scope("position_embeddings"):
self.position_embeddings = self.add_weight(
name="embeddings",
shape=[self.max_position_embeddings, self.embedding_size],
initializer=get_initializer(self.initializer_range),
)
super().build(input_shape)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertEmbeddings.call
def call(
self,
input_ids: tf.Tensor = None,
position_ids: tf.Tensor = None,
token_type_ids: tf.Tensor = None,
inputs_embeds: tf.Tensor = None,
past_key_values_length=0,
training: bool = False,
) -> tf.Tensor:
"""
Applies embedding based on inputs tensor.
Returns:
final_embeddings (`tf.Tensor`): output embedding tensor.
"""
if input_ids is None and inputs_embeds is None:
raise ValueError("Need to provide either `input_ids` or `input_embeds`.")
if input_ids is not None:
inputs_embeds = tf.gather(params=self.weight, indices=input_ids)
input_shape = shape_list(inputs_embeds)[:-1]
if token_type_ids is None:
token_type_ids = tf.fill(dims=input_shape, value=0)
if position_ids is None:
position_ids = tf.expand_dims(
tf.range(start=past_key_values_length, limit=input_shape[1] + past_key_values_length), axis=0
)
position_embeds = tf.gather(params=self.position_embeddings, indices=position_ids)
token_type_embeds = tf.gather(params=self.token_type_embeddings, indices=token_type_ids)
final_embeddings = inputs_embeds + position_embeds + token_type_embeds
final_embeddings = self.LayerNorm(inputs=final_embeddings)
final_embeddings = self.dropout(inputs=final_embeddings, training=training)
return final_embeddings
class TFConvBertSelfAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.hidden_size % config.num_attention_heads != 0:
raise ValueError(
f"The hidden size ({config.hidden_size}) is not a multiple of the number of attention "
f"heads ({config.num_attention_heads})"
)
new_num_attention_heads = int(config.num_attention_heads / config.head_ratio)
if new_num_attention_heads < 1:
self.head_ratio = config.num_attention_heads
num_attention_heads = 1
else:
num_attention_heads = new_num_attention_heads
self.head_ratio = config.head_ratio
self.num_attention_heads = num_attention_heads
self.conv_kernel_size = config.conv_kernel_size
assert (
config.hidden_size % self.num_attention_heads == 0
), "hidden_size should be divisible by num_attention_heads"
self.attention_head_size = config.hidden_size // config.num_attention_heads
self.all_head_size = self.num_attention_heads * self.attention_head_size
self.query = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="query"
)
self.key = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="key"
)
self.value = tf.keras.layers.Dense(
self.all_head_size, kernel_initializer=get_initializer(config.initializer_range), name="value"
)
self.key_conv_attn_layer = tf.keras.layers.SeparableConv1D(
self.all_head_size,
self.conv_kernel_size,
padding="same",
activation=None,
depthwise_initializer=get_initializer(1 / self.conv_kernel_size),
pointwise_initializer=get_initializer(config.initializer_range),
name="key_conv_attn_layer",
)
self.conv_kernel_layer = tf.keras.layers.Dense(
self.num_attention_heads * self.conv_kernel_size,
activation=None,
name="conv_kernel_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.conv_out_layer = tf.keras.layers.Dense(
self.all_head_size,
activation=None,
name="conv_out_layer",
kernel_initializer=get_initializer(config.initializer_range),
)
self.dropout = tf.keras.layers.Dropout(config.attention_probs_dropout_prob)
def transpose_for_scores(self, x, batch_size):
# Reshape from [batch_size, seq_length, all_head_size] to [batch_size, seq_length, num_attention_heads, attention_head_size]
x = tf.reshape(x, (batch_size, -1, self.num_attention_heads, self.attention_head_size))
return tf.transpose(x, perm=[0, 2, 1, 3])
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
batch_size = shape_list(hidden_states)[0]
mixed_query_layer = self.query(hidden_states)
mixed_key_layer = self.key(hidden_states)
mixed_value_layer = self.value(hidden_states)
mixed_key_conv_attn_layer = self.key_conv_attn_layer(hidden_states)
query_layer = self.transpose_for_scores(mixed_query_layer, batch_size)
key_layer = self.transpose_for_scores(mixed_key_layer, batch_size)
conv_attn_layer = tf.multiply(mixed_key_conv_attn_layer, mixed_query_layer)
conv_kernel_layer = self.conv_kernel_layer(conv_attn_layer)
conv_kernel_layer = tf.reshape(conv_kernel_layer, [-1, self.conv_kernel_size, 1])
conv_kernel_layer = tf.nn.softmax(conv_kernel_layer, axis=1)
paddings = tf.constant(
[
[
0,
0,
],
[int((self.conv_kernel_size - 1) / 2), int((self.conv_kernel_size - 1) / 2)],
[0, 0],
]
)
conv_out_layer = self.conv_out_layer(hidden_states)
conv_out_layer = tf.reshape(conv_out_layer, [batch_size, -1, self.all_head_size])
conv_out_layer = tf.pad(conv_out_layer, paddings, "CONSTANT")
unfold_conv_out_layer = tf.stack(
[
tf.slice(conv_out_layer, [0, i, 0], [batch_size, shape_list(mixed_query_layer)[1], self.all_head_size])
for i in range(self.conv_kernel_size)
],
axis=-1,
)
conv_out_layer = tf.reshape(unfold_conv_out_layer, [-1, self.attention_head_size, self.conv_kernel_size])
conv_out_layer = tf.matmul(conv_out_layer, conv_kernel_layer)
conv_out_layer = tf.reshape(conv_out_layer, [-1, self.all_head_size])
# Take the dot product between "query" and "key" to get the raw attention scores.
attention_scores = tf.matmul(
query_layer, key_layer, transpose_b=True
) # (batch size, num_heads, seq_len_q, seq_len_k)
dk = tf.cast(shape_list(key_layer)[-1], attention_scores.dtype) # scale attention_scores
attention_scores = attention_scores / tf.math.sqrt(dk)
if attention_mask is not None:
# Apply the attention mask is (precomputed for all layers in TFBertModel call() function)
attention_scores = attention_scores + attention_mask
# Normalize the attention scores to probabilities.
attention_probs = tf.nn.softmax(attention_scores, axis=-1)
# This is actually dropping out entire tokens to attend to, which might
# seem a bit unusual, but is taken from the original Transformer paper.
attention_probs = self.dropout(attention_probs, training=training)
# Mask heads if we want to
if head_mask is not None:
attention_probs = attention_probs * head_mask
value_layer = tf.reshape(
mixed_value_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size]
)
value_layer = tf.transpose(value_layer, [0, 2, 1, 3])
context_layer = tf.matmul(attention_probs, value_layer)
context_layer = tf.transpose(context_layer, perm=[0, 2, 1, 3])
conv_out = tf.reshape(conv_out_layer, [batch_size, -1, self.num_attention_heads, self.attention_head_size])
context_layer = tf.concat([context_layer, conv_out], 2)
context_layer = tf.reshape(
context_layer, (batch_size, -1, self.head_ratio * self.all_head_size)
) # (batch_size, seq_len_q, all_head_size)
outputs = (context_layer, attention_probs) if output_attentions else (context_layer,)
return outputs
class TFConvBertSelfOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertAttention(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.self_attention = TFConvBertSelfAttention(config, name="self")
self.dense_output = TFConvBertSelfOutput(config, name="output")
def prune_heads(self, heads):
raise NotImplementedError
def call(self, input_tensor, attention_mask, head_mask, output_attentions, training=False):
self_outputs = self.self_attention(
input_tensor, attention_mask, head_mask, output_attentions, training=training
)
attention_output = self.dense_output(self_outputs[0], input_tensor, training=training)
outputs = (attention_output,) + self_outputs[1:] # add attentions if we output them
return outputs
class GroupedLinearLayer(tf.keras.layers.Layer):
def __init__(self, input_size, output_size, num_groups, kernel_initializer, **kwargs):
super().__init__(**kwargs)
self.input_size = input_size
self.output_size = output_size
self.num_groups = num_groups
self.kernel_initializer = kernel_initializer
self.group_in_dim = self.input_size // self.num_groups
self.group_out_dim = self.output_size // self.num_groups
def build(self, input_shape):
self.kernel = self.add_weight(
"kernel",
shape=[self.group_out_dim, self.group_in_dim, self.num_groups],
initializer=self.kernel_initializer,
trainable=True,
)
self.bias = self.add_weight(
"bias", shape=[self.output_size], initializer=self.kernel_initializer, dtype=self.dtype, trainable=True
)
def call(self, hidden_states):
batch_size = shape_list(hidden_states)[0]
x = tf.transpose(tf.reshape(hidden_states, [-1, self.num_groups, self.group_in_dim]), [1, 0, 2])
x = tf.matmul(x, tf.transpose(self.kernel, [2, 1, 0]))
x = tf.transpose(x, [1, 0, 2])
x = tf.reshape(x, [batch_size, -1, self.output_size])
x = tf.nn.bias_add(value=x, bias=self.bias)
return x
class TFConvBertIntermediate(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.intermediate_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.hidden_size,
config.intermediate_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
if isinstance(config.hidden_act, str):
self.intermediate_act_fn = get_tf_activation(config.hidden_act)
else:
self.intermediate_act_fn = config.hidden_act
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.intermediate_act_fn(hidden_states)
return hidden_states
class TFConvBertOutput(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
if config.num_groups == 1:
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
else:
self.dense = GroupedLinearLayer(
config.intermediate_size,
config.hidden_size,
num_groups=config.num_groups,
kernel_initializer=get_initializer(config.initializer_range),
name="dense",
)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dropout = tf.keras.layers.Dropout(config.hidden_dropout_prob)
def call(self, hidden_states, input_tensor, training=False):
hidden_states = self.dense(hidden_states)
hidden_states = self.dropout(hidden_states, training=training)
hidden_states = self.LayerNorm(hidden_states + input_tensor)
return hidden_states
class TFConvBertLayer(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.attention = TFConvBertAttention(config, name="attention")
self.intermediate = TFConvBertIntermediate(config, name="intermediate")
self.bert_output = TFConvBertOutput(config, name="output")
def call(self, hidden_states, attention_mask, head_mask, output_attentions, training=False):
attention_outputs = self.attention(
hidden_states, attention_mask, head_mask, output_attentions, training=training
)
attention_output = attention_outputs[0]
intermediate_output = self.intermediate(attention_output)
layer_output = self.bert_output(intermediate_output, attention_output, training=training)
outputs = (layer_output,) + attention_outputs[1:] # add attentions if we output them
return outputs
class TFConvBertEncoder(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.layer = [TFConvBertLayer(config, name=f"layer_._{i}") for i in range(config.num_hidden_layers)]
def call(
self,
hidden_states,
attention_mask,
head_mask,
output_attentions,
output_hidden_states,
return_dict,
training=False,
):
all_hidden_states = () if output_hidden_states else None
all_attentions = () if output_attentions else None
for i, layer_module in enumerate(self.layer):
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
layer_outputs = layer_module(
hidden_states, attention_mask, head_mask[i], output_attentions, training=training
)
hidden_states = layer_outputs[0]
if output_attentions:
all_attentions = all_attentions + (layer_outputs[1],)
# Add last layer
if output_hidden_states:
all_hidden_states = all_hidden_states + (hidden_states,)
if not return_dict:
return tuple(v for v in [hidden_states, all_hidden_states, all_attentions] if v is not None)
return TFBaseModelOutput(
last_hidden_state=hidden_states, hidden_states=all_hidden_states, attentions=all_attentions
)
class TFConvBertPredictionHeadTransform(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.embedding_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
if isinstance(config.hidden_act, str):
self.transform_act_fn = get_tf_activation(config.hidden_act)
else:
self.transform_act_fn = config.hidden_act
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
def call(self, hidden_states):
hidden_states = self.dense(hidden_states)
hidden_states = self.transform_act_fn(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@keras_serializable
class TFConvBertMainLayer(tf.keras.layers.Layer):
config_class = ConvBertConfig
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.embeddings = TFConvBertEmbeddings(config, name="embeddings")
if config.embedding_size != config.hidden_size:
self.embeddings_project = tf.keras.layers.Dense(config.hidden_size, name="embeddings_project")
self.encoder = TFConvBertEncoder(config, name="encoder")
self.config = config
def get_input_embeddings(self):
return self.embeddings
def set_input_embeddings(self, value):
self.embeddings.weight = value
self.embeddings.vocab_size = value.shape[0]
def _prune_heads(self, heads_to_prune):
"""
Prunes heads of the model. heads_to_prune: dict of {layer_num: list of heads to prune in this layer} See base
class PreTrainedModel
"""
raise NotImplementedError
def get_extended_attention_mask(self, attention_mask, input_shape, dtype):
if attention_mask is None:
attention_mask = tf.fill(input_shape, 1)
# We create a 3D attention mask from a 2D tensor mask.
# Sizes are [batch_size, 1, 1, to_seq_length]
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
# this attention mask is more simple than the triangular masking of causal attention
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
extended_attention_mask = tf.reshape(attention_mask, (input_shape[0], 1, 1, input_shape[1]))
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
# masked positions, this operation will create a tensor which is 0.0 for
# positions we want to attend and -10000.0 for masked positions.
# Since we are adding it to the raw scores before the softmax, this is
# effectively the same as removing these entirely.
extended_attention_mask = tf.cast(extended_attention_mask, dtype)
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0
return extended_attention_mask
def get_head_mask(self, head_mask):
if head_mask is not None:
raise NotImplementedError
else:
head_mask = [None] * self.config.num_hidden_layers
return head_mask
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None and inputs["inputs_embeds"] is not None:
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
elif inputs["input_ids"] is not None:
input_shape = shape_list(inputs["input_ids"])
elif inputs["inputs_embeds"] is not None:
input_shape = shape_list(inputs["inputs_embeds"])[:-1]
else:
raise ValueError("You have to specify either input_ids or inputs_embeds")
if inputs["attention_mask"] is None:
inputs["attention_mask"] = tf.fill(input_shape, 1)
if inputs["token_type_ids"] is None:
inputs["token_type_ids"] = tf.fill(input_shape, 0)
hidden_states = self.embeddings(
inputs["input_ids"],
inputs["position_ids"],
inputs["token_type_ids"],
inputs["inputs_embeds"],
training=inputs["training"],
)
extended_attention_mask = self.get_extended_attention_mask(
inputs["attention_mask"], input_shape, hidden_states.dtype
)
inputs["head_mask"] = self.get_head_mask(inputs["head_mask"])
if hasattr(self, "embeddings_project"):
hidden_states = self.embeddings_project(hidden_states, training=inputs["training"])
hidden_states = self.encoder(
hidden_states,
extended_attention_mask,
inputs["head_mask"],
inputs["output_attentions"],
inputs["output_hidden_states"],
inputs["return_dict"],
training=inputs["training"],
)
return hidden_states
class TFConvBertPreTrainedModel(TFPreTrainedModel):
"""
An abstract class to handle weights initialization and a simple interface for downloading and loading pretrained
models.
"""
config_class = ConvBertConfig
base_model_prefix = "convbert"
CONVBERT_START_DOCSTRING = r"""
This model inherits from [`TFPreTrainedModel`]. Check the superclass documentation for the generic methods the
library implements for all its model (such as downloading or saving, resizing the input embeddings, pruning heads
etc.)
This model is also a [tf.keras.Model](https://www.tensorflow.org/api_docs/python/tf/keras/Model) subclass. Use it
as a regular TF 2.0 Keras Model and refer to the TF 2.0 documentation for all matter related to general usage and
behavior.
<Tip>
TF 2.0 models accepts two formats as inputs:
- having all inputs as keyword arguments (like PyTorch models), or
- having all inputs as a list, tuple or dict in the first positional arguments.
This second option is useful when using [`tf.keras.Model.fit`] method which currently requires having all the
tensors in the first argument of the model call function: `model(inputs)`.
If you choose this second option, there are three possibilities you can use to gather all the input Tensors in the
first positional argument :
- a single Tensor with `input_ids` only and nothing else: `model(inputs_ids)`
- a list of varying length with one or several input Tensors IN THE ORDER given in the docstring:
`model([input_ids, attention_mask])` or `model([input_ids, attention_mask, token_type_ids])`
- a dictionary with one or several input Tensors associated to the input names given in the docstring:
`model({"input_ids": input_ids, "token_type_ids": token_type_ids})`
</Tip>
Args:
config ([`ConvBertConfig`]): Model configuration class with all the parameters of the model.
Initializing with a config file does not load the weights associated with the model, only the
configuration. Check out the [`~PreTrainedModel.from_pretrained`] method to load the model weights.
"""
CONVBERT_INPUTS_DOCSTRING = r"""
Args:
input_ids (`Numpy array` or `tf.Tensor` of shape `({0})`):
Indices of input sequence tokens in the vocabulary.
Indices can be obtained using [`ConvBertTokenizer`]. See [`PreTrainedTokenizer.__call__`] and
[`PreTrainedTokenizer.encode`] for details.
[What are input IDs?](../glossary#input-ids)
attention_mask (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Mask to avoid performing attention on padding token indices. Mask values selected in `[0, 1]`:
- 1 for tokens that are **not masked**,
- 0 for tokens that are **masked**.
[What are attention masks?](../glossary#attention-mask)
token_type_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Segment token indices to indicate first and second portions of the inputs. Indices are selected in `[0,
1]`:
- 0 corresponds to a *sentence A* token,
- 1 corresponds to a *sentence B* token.
[What are token type IDs?](../glossary#token-type-ids)
position_ids (`Numpy array` or `tf.Tensor` of shape `({0})`, *optional*):
Indices of positions of each input sequence tokens in the position embeddings. Selected in the range `[0,
config.max_position_embeddings - 1]`.
[What are position IDs?](../glossary#position-ids)
head_mask (`Numpy array` or `tf.Tensor` of shape `(num_heads,)` or `(num_layers, num_heads)`, *optional*):
Mask to nullify selected heads of the self-attention modules. Mask values selected in `[0, 1]`:
- 1 indicates the head is **not masked**,
- 0 indicates the head is **masked**.
inputs_embeds (`tf.Tensor` of shape `({0}, hidden_size)`, *optional*):
Optionally, instead of passing `input_ids` you can choose to directly pass an embedded representation. This
is useful if you want more control over how to convert `input_ids` indices into associated vectors than the
model's internal embedding lookup matrix.
output_attentions (`bool`, *optional*):
Whether or not to return the attentions tensors of all attention layers. See `attentions` under returned
tensors for more detail. This argument can be used only in eager mode, in graph mode the value in the
config will be used instead.
output_hidden_states (`bool`, *optional*):
Whether or not to return the hidden states of all layers. See `hidden_states` under returned tensors for
more detail. This argument can be used only in eager mode, in graph mode the value in the config will be
used instead.
return_dict (`bool`, *optional*):
Whether or not to return a [`~file_utils.ModelOutput`] instead of a plain tuple. This argument can be used
in eager mode, in graph mode the value will always be set to True.
training (`bool`, *optional*, defaults to `False`):
Whether or not to use the model in training mode (some modules like dropout modules have different
behaviors between training and evaluation).
"""
@add_start_docstrings(
"The bare ConvBERT Model transformer outputting raw hidden-states without any specific head on top.",
CONVBERT_START_DOCSTRING,
)
class TFConvBertModel(TFConvBertPreTrainedModel):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFBaseModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
training=False,
**kwargs,
):
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
return outputs
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFBaseModelOutput(last_hidden_state=output.last_hidden_state, hidden_states=hs, attentions=attns)
class TFConvBertMaskedLMHead(tf.keras.layers.Layer):
def __init__(self, config, input_embeddings, **kwargs):
super().__init__(**kwargs)
self.vocab_size = config.vocab_size
self.embedding_size = config.embedding_size
self.input_embeddings = input_embeddings
def build(self, input_shape):
self.bias = self.add_weight(shape=(self.vocab_size,), initializer="zeros", trainable=True, name="bias")
super().build(input_shape)
def get_output_embeddings(self):
return self.input_embeddings
def set_output_embeddings(self, value):
self.input_embeddings.weight = value
self.input_embeddings.vocab_size = shape_list(value)[0]
def get_bias(self):
return {"bias": self.bias}
def set_bias(self, value):
self.bias = value["bias"]
self.vocab_size = shape_list(value["bias"])[0]
def call(self, hidden_states):
seq_length = shape_list(tensor=hidden_states)[1]
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, self.embedding_size])
hidden_states = tf.matmul(a=hidden_states, b=self.input_embeddings.weight, transpose_b=True)
hidden_states = tf.reshape(tensor=hidden_states, shape=[-1, seq_length, self.vocab_size])
hidden_states = tf.nn.bias_add(value=hidden_states, bias=self.bias)
return hidden_states
class TFConvBertGeneratorPredictions(tf.keras.layers.Layer):
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.LayerNorm = tf.keras.layers.LayerNormalization(epsilon=config.layer_norm_eps, name="LayerNorm")
self.dense = tf.keras.layers.Dense(config.embedding_size, name="dense")
def call(self, generator_hidden_states, training=False):
hidden_states = self.dense(generator_hidden_states)
hidden_states = get_tf_activation("gelu")(hidden_states)
hidden_states = self.LayerNorm(hidden_states)
return hidden_states
@add_start_docstrings("""ConvBERT Model with a `language modeling` head on top.""", CONVBERT_START_DOCSTRING)
class TFConvBertForMaskedLM(TFConvBertPreTrainedModel, TFMaskedLanguageModelingLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, **kwargs)
self.vocab_size = config.vocab_size
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.generator_predictions = TFConvBertGeneratorPredictions(config, name="generator_predictions")
if isinstance(config.hidden_act, str):
self.activation = get_tf_activation(config.hidden_act)
else:
self.activation = config.hidden_act
self.generator_lm_head = TFConvBertMaskedLMHead(config, self.convbert.embeddings, name="generator_lm_head")
def get_lm_head(self):
return self.generator_lm_head
def get_prefix_bias_name(self):
return self.name + "/" + self.generator_lm_head.name
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMaskedLMOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the masked language modeling loss. Indices should be in `[-100, 0, ...,
config.vocab_size]` (see `input_ids` docstring) Tokens with indices set to `-100` are ignored (masked), the
loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
generator_hidden_states = self.convbert(
input_ids=inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
generator_sequence_output = generator_hidden_states[0]
prediction_scores = self.generator_predictions(generator_sequence_output, training=inputs["training"])
prediction_scores = self.generator_lm_head(prediction_scores, training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], prediction_scores)
if not inputs["return_dict"]:
output = (prediction_scores,) + generator_hidden_states[1:]
return ((loss,) + output) if loss is not None else output
return TFMaskedLMOutput(
loss=loss,
logits=prediction_scores,
hidden_states=generator_hidden_states.hidden_states,
attentions=generator_hidden_states.attentions,
)
# Copied from transformers.models.bert.modeling_tf_bert.TFBertForMaskedLM.serving_output
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMaskedLMOutput(logits=output.logits, hidden_states=hs, attentions=attns)
class TFConvBertClassificationHead(tf.keras.layers.Layer):
"""Head for sentence-level classification tasks."""
def __init__(self, config, **kwargs):
super().__init__(**kwargs)
self.dense = tf.keras.layers.Dense(
config.hidden_size, kernel_initializer=get_initializer(config.initializer_range), name="dense"
)
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.out_proj = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="out_proj"
)
self.config = config
def call(self, hidden_states, **kwargs):
x = hidden_states[:, 0, :] # take <s> token (equiv. to [CLS])
x = self.dropout(x)
x = self.dense(x)
x = get_tf_activation(self.config.hidden_act)(x)
x = self.dropout(x)
x = self.out_proj(x)
return x
@add_start_docstrings(
"""
ConvBERT Model transformer with a sequence classification/regression head on top e.g., for GLUE tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForSequenceClassification(TFConvBertPreTrainedModel, TFSequenceClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.classifier = TFConvBertClassificationHead(config, name="classifier")
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFSequenceClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.classifier(outputs[0], training=inputs["training"])
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFSequenceClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFSequenceClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a multiple choice classification head on top (a linear layer on top of the pooled output and a
softmax) e.g. for RocStories/SWAG tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForMultipleChoice(TFConvBertPreTrainedModel, TFMultipleChoiceLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.sequence_summary = TFSequenceSummary(
config, initializer_range=config.initializer_range, name="sequence_summary"
)
self.classifier = tf.keras.layers.Dense(
1, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@property
def dummy_inputs(self):
"""
Dummy inputs to build the network.
Returns:
tf.Tensor with dummy inputs
"""
return {"input_ids": tf.convert_to_tensor(MULTIPLE_CHOICE_DUMMY_INPUTS)}
@add_start_docstrings_to_model_forward(
CONVBERT_INPUTS_DOCSTRING.format("batch_size, num_choices, sequence_length")
)
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFMultipleChoiceModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for computing the multiple choice classification loss. Indices should be in `[0, ..., num_choices]`
where `num_choices` is the size of the second dimension of the input tensors. (See `input_ids` above)
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
if inputs["input_ids"] is not None:
num_choices = shape_list(inputs["input_ids"])[1]
seq_length = shape_list(inputs["input_ids"])[2]
else:
num_choices = shape_list(inputs["inputs_embeds"])[1]
seq_length = shape_list(inputs["inputs_embeds"])[2]
flat_input_ids = tf.reshape(inputs["input_ids"], (-1, seq_length)) if inputs["input_ids"] is not None else None
flat_attention_mask = (
tf.reshape(inputs["attention_mask"], (-1, seq_length)) if inputs["attention_mask"] is not None else None
)
flat_token_type_ids = (
tf.reshape(inputs["token_type_ids"], (-1, seq_length)) if inputs["token_type_ids"] is not None else None
)
flat_position_ids = (
tf.reshape(inputs["position_ids"], (-1, seq_length)) if inputs["position_ids"] is not None else None
)
flat_inputs_embeds = (
tf.reshape(inputs["inputs_embeds"], (-1, seq_length, shape_list(inputs["inputs_embeds"])[3]))
if inputs["inputs_embeds"] is not None
else None
)
outputs = self.convbert(
flat_input_ids,
flat_attention_mask,
flat_token_type_ids,
flat_position_ids,
inputs["head_mask"],
flat_inputs_embeds,
inputs["output_attentions"],
inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
logits = self.sequence_summary(outputs[0], training=inputs["training"])
logits = self.classifier(logits)
reshaped_logits = tf.reshape(logits, (-1, num_choices))
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], reshaped_logits)
if not inputs["return_dict"]:
output = (reshaped_logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFMultipleChoiceModelOutput(
loss=loss,
logits=reshaped_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
output = self.call(inputs)
return self.serving_output(output)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFMultipleChoiceModelOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a token classification head on top (a linear layer on top of the hidden-states output) e.g. for
Named-Entity-Recognition (NER) tasks.
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForTokenClassification(TFConvBertPreTrainedModel, TFTokenClassificationLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
classifier_dropout = (
config.classifier_dropout if config.classifier_dropout is not None else config.hidden_dropout_prob
)
self.dropout = tf.keras.layers.Dropout(classifier_dropout)
self.classifier = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="classifier"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFTokenClassifierOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
labels=None,
training=False,
**kwargs,
):
r"""
labels (`tf.Tensor` of shape `(batch_size, sequence_length)`, *optional*):
Labels for computing the token classification loss. Indices should be in `[0, ..., config.num_labels - 1]`.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
labels=labels,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
sequence_output = self.dropout(sequence_output, training=inputs["training"])
logits = self.classifier(sequence_output)
loss = None if inputs["labels"] is None else self.compute_loss(inputs["labels"], logits)
if not inputs["return_dict"]:
output = (logits,) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFTokenClassifierOutput(
loss=loss,
logits=logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFTokenClassifierOutput(logits=output.logits, hidden_states=hs, attentions=attns)
@add_start_docstrings(
"""
ConvBERT Model with a span classification head on top for extractive question-answering tasks like SQuAD (a linear
layer on top of the hidden-states output to compute `span start logits` and `span end logits`).
""",
CONVBERT_START_DOCSTRING,
)
class TFConvBertForQuestionAnswering(TFConvBertPreTrainedModel, TFQuestionAnsweringLoss):
def __init__(self, config, *inputs, **kwargs):
super().__init__(config, *inputs, **kwargs)
self.num_labels = config.num_labels
self.convbert = TFConvBertMainLayer(config, name="convbert")
self.qa_outputs = tf.keras.layers.Dense(
config.num_labels, kernel_initializer=get_initializer(config.initializer_range), name="qa_outputs"
)
@add_start_docstrings_to_model_forward(CONVBERT_INPUTS_DOCSTRING.format("batch_size, sequence_length"))
@add_code_sample_docstrings(
processor_class=_TOKENIZER_FOR_DOC,
checkpoint=_CHECKPOINT_FOR_DOC,
output_type=TFQuestionAnsweringModelOutput,
config_class=_CONFIG_FOR_DOC,
)
def call(
self,
input_ids=None,
attention_mask=None,
token_type_ids=None,
position_ids=None,
head_mask=None,
inputs_embeds=None,
output_attentions=None,
output_hidden_states=None,
return_dict=None,
start_positions=None,
end_positions=None,
training=False,
**kwargs,
):
r"""
start_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the start of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
end_positions (`tf.Tensor` of shape `(batch_size,)`, *optional*):
Labels for position (index) of the end of the labelled span for computing the token classification loss.
Positions are clamped to the length of the sequence (`sequence_length`). Position outside of the sequence
are not taken into account for computing the loss.
"""
inputs = input_processing(
func=self.call,
config=self.config,
input_ids=input_ids,
attention_mask=attention_mask,
token_type_ids=token_type_ids,
position_ids=position_ids,
head_mask=head_mask,
inputs_embeds=inputs_embeds,
output_attentions=output_attentions,
output_hidden_states=output_hidden_states,
return_dict=return_dict,
start_positions=start_positions,
end_positions=end_positions,
training=training,
kwargs_call=kwargs,
)
outputs = self.convbert(
inputs["input_ids"],
attention_mask=inputs["attention_mask"],
token_type_ids=inputs["token_type_ids"],
position_ids=inputs["position_ids"],
head_mask=inputs["head_mask"],
inputs_embeds=inputs["inputs_embeds"],
output_attentions=inputs["output_attentions"],
output_hidden_states=inputs["output_hidden_states"],
return_dict=inputs["return_dict"],
training=inputs["training"],
)
sequence_output = outputs[0]
logits = self.qa_outputs(sequence_output)
start_logits, end_logits = tf.split(logits, 2, axis=-1)
start_logits = tf.squeeze(start_logits, axis=-1)
end_logits = tf.squeeze(end_logits, axis=-1)
loss = None
if inputs["start_positions"] is not None and inputs["end_positions"] is not None:
labels = {"start_position": inputs["start_positions"]}
labels["end_position"] = inputs["end_positions"]
loss = self.compute_loss(labels, (start_logits, end_logits))
if not inputs["return_dict"]:
output = (start_logits, end_logits) + outputs[1:]
return ((loss,) + output) if loss is not None else output
return TFQuestionAnsweringModelOutput(
loss=loss,
start_logits=start_logits,
end_logits=end_logits,
hidden_states=outputs.hidden_states,
attentions=outputs.attentions,
)
def serving_output(self, output):
hs = tf.convert_to_tensor(output.hidden_states) if self.config.output_hidden_states else None
attns = tf.convert_to_tensor(output.attentions) if self.config.output_attentions else None
return TFQuestionAnsweringModelOutput(
start_logits=output.start_logits, end_logits=output.end_logits, hidden_states=hs, attentions=attns
)
| [
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.convert_to_tensor",
"tensorflow.concat",
"tensorflow.cast",
"tensorflow.pad",
"tensorflow.squeeze",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.matmul",
"tensorflow.fill",
"tensorflow.keras.layers.Dense",
"tensorflow.split",
"tensorflow.nn.bias_add",
"tensorflow.multiply",
"tensorflow.transpose",
"tensorflow.nn.softmax",
"tensorflow.math.sqrt",
"tensorflow.range",
"tensorflow.reshape",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
] | src/transformers/models/convbert/modeling_tf_convbert.py | [(78, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (79, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': 'config.hidden_dropout_prob'}), True, 'import tensorflow as tf\n'), (137, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.position_embeddings', 'indices': 'position_ids'}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.token_type_embeddings', 'indices': 'token_type_ids'}), True, 'import tensorflow as tf\n'), (207, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.attention_probs_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.reshape', 'tf.reshape', (['x', '(batch_size, -1, self.num_attention_heads, self.attention_head_size)'], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.transpose', 'tf.transpose', (['x'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.multiply', 'tf.multiply', (['mixed_key_conv_attn_layer', 'mixed_query_layer'], {}), True, 'import tensorflow as tf\n'), (227, 'tensorflow.reshape', 'tf.reshape', (['conv_kernel_layer', '[-1, self.conv_kernel_size, 1]'], {}), True, 'import tensorflow as tf\n'), (228, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['conv_kernel_layer'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (242, 'tensorflow.reshape', 'tf.reshape', (['conv_out_layer', '[batch_size, -1, self.all_head_size]'], {}), True, 'import tensorflow as tf\n'), (243, 'tensorflow.pad', 'tf.pad', (['conv_out_layer', 'paddings', '"""CONSTANT"""'], {}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.reshape', 'tf.reshape', (['unfold_conv_out_layer', '[-1, self.attention_head_size, self.conv_kernel_size]'], {}), True, 'import tensorflow as tf\n'), (255, 'tensorflow.matmul', 'tf.matmul', (['conv_out_layer', 'conv_kernel_layer'], {}), True, 'import tensorflow as tf\n'), (256, 'tensorflow.reshape', 'tf.reshape', (['conv_out_layer', '[-1, self.all_head_size]'], {}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.matmul', 'tf.matmul', (['query_layer', 'key_layer'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (270, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['attention_scores'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (280, 'tensorflow.reshape', 'tf.reshape', (['mixed_value_layer', '[batch_size, -1, self.num_attention_heads, self.attention_head_size]'], {}), True, 'import tensorflow as tf\n'), (283, 'tensorflow.transpose', 'tf.transpose', (['value_layer', '[0, 2, 1, 3]'], {}), True, 'import tensorflow as tf\n'), (285, 'tensorflow.matmul', 'tf.matmul', (['attention_probs', 'value_layer'], {}), True, 'import tensorflow as tf\n'), (286, 'tensorflow.transpose', 'tf.transpose', (['context_layer'], {'perm': '[0, 2, 1, 3]'}), True, 'import tensorflow as tf\n'), (288, 'tensorflow.reshape', 'tf.reshape', (['conv_out_layer', '[batch_size, -1, self.num_attention_heads, self.attention_head_size]'], {}), True, 'import tensorflow as tf\n'), (289, 'tensorflow.concat', 'tf.concat', (['[context_layer, conv_out]', '(2)'], {}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.reshape', 'tf.reshape', (['context_layer', '(batch_size, -1, self.head_ratio * self.all_head_size)'], {}), True, 'import tensorflow as tf\n'), (305, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (306, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.transpose', 'tf.transpose', (['x', '[1, 0, 2]'], {}), True, 'import tensorflow as tf\n'), (363, 'tensorflow.reshape', 'tf.reshape', (['x', '[batch_size, -1, self.output_size]'], {}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', ([], {'value': 'x', 'bias': 'self.bias'}), True, 'import tensorflow as tf\n'), (412, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (413, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.hidden_dropout_prob'], {}), True, 'import tensorflow as tf\n'), (499, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (547, 'tensorflow.reshape', 'tf.reshape', (['attention_mask', '(input_shape[0], 1, 1, input_shape[1])'], {}), True, 'import tensorflow as tf\n'), (554, 'tensorflow.cast', 'tf.cast', (['extended_attention_mask', 'dtype'], {}), True, 'import tensorflow as tf\n'), (840, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'hidden_states', 'shape': '[-1, self.embedding_size]'}), True, 'import tensorflow as tf\n'), (841, 'tensorflow.matmul', 'tf.matmul', ([], {'a': 'hidden_states', 'b': 'self.input_embeddings.weight', 'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (842, 'tensorflow.reshape', 'tf.reshape', ([], {'tensor': 'hidden_states', 'shape': '[-1, seq_length, self.vocab_size]'}), True, 'import tensorflow as tf\n'), (843, 'tensorflow.nn.bias_add', 'tf.nn.bias_add', ([], {'value': 'hidden_states', 'bias': 'self.bias'}), True, 'import tensorflow as tf\n'), (852, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {'epsilon': 'config.layer_norm_eps', 'name': '"""LayerNorm"""'}), True, 'import tensorflow as tf\n'), (853, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.embedding_size'], {'name': '"""dense"""'}), True, 'import tensorflow as tf\n'), (978, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['classifier_dropout'], {}), True, 'import tensorflow as tf\n'), (1198, 'tensorflow.reshape', 'tf.reshape', (['logits', '(-1, num_choices)'], {}), True, 'import tensorflow as tf\n'), (1250, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['classifier_dropout'], {}), True, 'import tensorflow as tf\n'), (1413, 'tensorflow.split', 'tf.split', (['logits', '(2)'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (1414, 'tensorflow.squeeze', 'tf.squeeze', (['start_logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (1415, 'tensorflow.squeeze', 'tf.squeeze', (['end_logits'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (82, 'tensorflow.name_scope', 'tf.name_scope', (['"""word_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.name_scope', 'tf.name_scope', (['"""token_type_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.name_scope', 'tf.name_scope', (['"""position_embeddings"""'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.gather', 'tf.gather', ([], {'params': 'self.weight', 'indices': 'input_ids'}), True, 'import tensorflow as tf\n'), (130, 'tensorflow.fill', 'tf.fill', ([], {'dims': 'input_shape', 'value': '(0)'}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.math.sqrt', 'tf.math.sqrt', (['dk'], {}), True, 'import tensorflow as tf\n'), (360, 'tensorflow.reshape', 'tf.reshape', (['hidden_states', '[-1, self.num_groups, self.group_in_dim]'], {}), True, 'import tensorflow as tf\n'), (361, 'tensorflow.transpose', 'tf.transpose', (['self.kernel', '[2, 1, 0]'], {}), True, 'import tensorflow as tf\n'), (519, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['config.hidden_size'], {'name': '"""embeddings_project"""'}), True, 'import tensorflow as tf\n'), (540, 'tensorflow.fill', 'tf.fill', (['input_shape', '(1)'], {}), True, 'import tensorflow as tf\n'), (607, 'tensorflow.fill', 'tf.fill', (['input_shape', '(1)'], {}), True, 'import tensorflow as tf\n'), (610, 'tensorflow.fill', 'tf.fill', (['input_shape', '(0)'], {}), True, 'import tensorflow as tf\n'), (805, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (806, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (960, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (961, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1081, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1082, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1114, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['MULTIPLE_CHOICE_DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (1169, 'tensorflow.reshape', 'tf.reshape', (["inputs['input_ids']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1171, 'tensorflow.reshape', 'tf.reshape', (["inputs['attention_mask']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1174, 'tensorflow.reshape', 'tf.reshape', (["inputs['token_type_ids']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1177, 'tensorflow.reshape', 'tf.reshape', (["inputs['position_ids']", '(-1, seq_length)'], {}), True, 'import tensorflow as tf\n'), (1228, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1229, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1326, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1327, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (1436, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.hidden_states'], {}), True, 'import tensorflow as tf\n'), (1437, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['output.attentions'], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.range', 'tf.range', ([], {'start': 'past_key_values_length', 'limit': '(input_shape[1] + past_key_values_length)'}), True, 'import tensorflow as tf\n'), (1216, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (1217, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""attention_mask"""'}), True, 'import tensorflow as tf\n'), (1218, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None, None)', 'tf.int32'], {'name': '"""token_type_ids"""'}), True, 'import tensorflow as tf\n')] |
Yonder-OSS/D3M-Primitives | b5f2c14d2afdadc6e97316aae5dd33fe4b874b09 | '''
Bootstrapped from https://github.com/NewKnowledge/imagenet and refined for D3M purposes
Original implementation from Craig Corcoran
'''
import os
import math
import numpy as np
import tensorflow as tf
from tensorflow.keras.applications import inception_v3, mobilenet_v2, xception
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.keras.utils import to_categorical, Sequence
import logging
logger = logging.getLogger(__name__)
#logger.setLevel(logging.INFO)
class ImagenetModel:
''' A class for featurizing images using pre-trained neural nets on ImageNet
and finetuning those nets for downstream classification
'''
def __init__(self,
model='inception_v3',
weights = 'imagenet',
include_top = False,
pooling=None,
n_channels=None,
clf_head_dense_dim = 1024,
):
''' Creates ImageNet base model for featurization or classification and corresponding image
preprocessing function
:param model: options are xception, inception_v3, and mobilenet_v2
:param weights: 'imagenet' or filepath
:param include_top: whether to include original ImageNet classification head with 1000 classes
:param pooling: 'avg', 'max', or None
:param n_channels: number of channels to keep if performing featurization
:param clf_head_dense_dim: dimension of dense layer before softmax classification (only applies
if `include_top` is false)
'''
self.include_top = include_top # determines if used for classification or featurization
self.n_channels = n_channels
self.pooling = pooling
self.clf_head_dense_dim = clf_head_dense_dim
if model == 'xception':
self.model = xception.Xception(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = xception.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = xception.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 10**2)
elif model == 'inception_v3':
self.model = inception_v3.InceptionV3(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = inception_v3.preprocess_input
self.target_size = (299, 299)
if include_top:
self.decode = inception_v3.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 2048) * (1 if pooling else 8**2)
elif model == 'mobilenet_v2':
self.model = mobilenetv2.MobileNetV2(weights=weights, include_top=include_top, pooling=pooling)
self.preprocess = mobilenetv2.preprocess_input
self.target_size = (244, 244)
if include_top:
self.decode = mobilenetv2.decode_predictions
else:
self.output_dim = (n_channels if n_channels else 1280) * (1 if pooling else 7**2)
else:
raise Exception('model option not implemented')
def _load_finetune_model(
self,
nclasses = 2,
weights_path = None,
):
''' Constructs finetuning model architecture and optionally loads weights
:param nclasses: number of classes on which to softmax over
:param weights_path: optional filepath from which to try to load weights
'''
out = self.model.output
if self.pooling is None:
out = GlobalAveragePooling2D()(out)# if self.pooling == 'avg' else GlobalMaxPooling2D()(out)
dense = Dense(self.clf_head_dense_dim, activation='relu')(out)
preds = Dense(nclasses, activation='softmax')(dense)
finetune_model = Model(inputs = self.model.input, outputs = preds)
# try to load weights
if weights_path is not None:
if os.path.isfile(weights_path):
finetune_model.load_weights(weights_path)
return finetune_model
def get_features(self, images_array):
''' takes a batch of images as a 4-d array and returns the (flattened) imagenet features for those images as a 2-d array '''
if self.include_top:
raise Exception('getting features from a classification model with include_top=True is currently not supported')
if images_array.ndim != 4:
raise Exception('invalid input shape for images_array, expects a 4d array')
# preprocess and compute image features
logger.debug(f'preprocessing {images_array.shape[0]} images')
images_array = self.preprocess(images_array)
logger.debug(f'computing image features')
image_features = self.model.predict(images_array)
# if n_channels is specified, only keep that number of channels
if self.n_channels:
logger.debug(f'truncating to first {self.n_channels} channels')
image_features = image_features.T[: self.n_channels].T
# reshape output array by flattening each image into a vector of features
shape = image_features.shape
return image_features.reshape(shape[0], np.prod(shape[1:]))
def predict(self, images_array):
''' alias for get_features to more closely match scikit-learn interface '''
return self.get_features(images_array)
def finetune(self,
train_dataset,
val_dataset = None,
nclasses = 2,
top_layer_epochs = 1,
unfreeze_proportions = [0.5],
all_layer_epochs = 5,
class_weight = None,
optimizer_top = 'rmsprop',
optimizer_full = 'sgd',
callbacks = None,
num_workers = 8,
load_weights_path = None,
save_weights_path = None,
):
''' Finetunes the Imagenet model iteratively on a smaller set of images with (potentially) a smaller set of classes.
First finetunes last layer then freezes bottom N layers and retrains the rest
:param train_dataset: (X, y) pair of tf.constant tensors for training
:param val_dataset: (X, y) pair of tf.constant tensors for validation, optional
:param nclasses: number of classes
:param top_layer_epochs: how many epochs for which to finetune classification head (happens first)
:param unfreeze_proportions: list of proportions representing how much of the base ImageNet model one wants to
unfreeze (later layers unfrozen) for another round of finetuning
:param all_layer_epochs: how many epochs for which to finetune entire model (happens second)
:param class_weight: class weights (used for both training steps)
:param optimizer_top: optimizer to use for training of classification head
:param optimizer_full: optimizer to use for training full classification model
* suggest to use lower learning rate / more conservative optimizer for this step to
prevent catastrophic forgetting
:param callbacks: optional list of callbacks to use for each round of finetuning
:param num_workers: number of workers to use for multiprocess data loading
:param load_weights_path: optional filepath from which to try to load weights
:param save_weights_path: optional filepath to which to store weights
'''
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path=load_weights_path
)
fitting_histories = []
# freeze all convolutional InceptionV3 layers, retrain top layer
for layer in self.model.layers:
layer.trainable = False
finetune_model.compile(
optimizer=optimizer_top,
loss='categorical_crossentropy')
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = top_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
# iteratively unfreeze specified proportion of later ImageNet base layers and finetune
finetune_model.compile(
# SGD(lr=0.0001, momentum=0.9)
optimizer=optimizer_full,
loss='categorical_crossentropy')
for p in unfreeze_proportions:
freeze_count = int(len(self.model.layers) * p)
for layer in finetune_model.layers[:freeze_count]:
layer.trainable = False
for layer in finetune_model.layers[freeze_count:]:
layer.trainable = True
fitting_histories.append(
finetune_model.fit(
train_dataset,
validation_data = val_dataset,
epochs = all_layer_epochs,
class_weight = class_weight,
shuffle = True,
use_multiprocessing = True,
workers = num_workers,
callbacks = callbacks
)
)
# save weights
if save_weights_path is not None:
finetune_model.save_weights(save_weights_path)
return fitting_histories
def finetune_classify(self,
test_dataset,
nclasses = 2,
num_workers = 8,
load_weights_path = None,
):
''' Uses the finetuned model to predict on a test dataset.
:param test_dataset: X, tf.constant tensor for inference
:param nclasses: number of classes
:param num_workers: number of workers to use for multiprocess data loading
:return: array of softmaxed prediction probabilities
:param load_weights_path: optional filepath from which to try to load weights
'''
finetune_model = self._load_finetune_model(
nclasses = nclasses,
weights_path = load_weights_path
)
return finetune_model.predict_generator(test_dataset,
use_multiprocessing = True,
workers = num_workers
)
class ImageNetGen(Sequence):
""" Tf.Keras Sequence for ImageNet input data """
def __init__(self, X, y = None, batch_size = 32):
self.X = X
self.y = y
self.batch_size = batch_size
def __len__(self):
return math.ceil(self.X.shape[0] / self.batch_size)
def __getitem__(self, idx):
batch_x = self.X[idx * self.batch_size:(idx + 1) * self.batch_size]
if self.y is None:
return tf.constant(batch_x)
else:
batch_y = self.y[idx * self.batch_size:(idx + 1) * self.batch_size]
return tf.constant(batch_x), tf.constant(batch_y)
| [
"tensorflow.constant",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.applications.xception.Xception",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"numpy.prod",
"tensorflow.keras.applications.inception_v3.InceptionV3"
] | primitives/image_classification/utils/imagenet.py | [(17, 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), False, 'import logging\n'), (92, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'self.model.input', 'outputs': 'preds'}), False, 'from tensorflow.keras.models import Model\n'), (255, 'math.ceil', 'math.ceil', (['(self.X.shape[0] / self.batch_size)'], {}), False, 'import math\n'), (51, 'tensorflow.keras.applications.xception.Xception', 'xception.Xception', ([], {'weights': 'weights', 'include_top': 'include_top', 'pooling': 'pooling'}), False, 'from tensorflow.keras.applications import inception_v3, mobilenet_v2, xception\n'), (90, 'tensorflow.keras.layers.Dense', 'Dense', (['self.clf_head_dense_dim'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (91, 'tensorflow.keras.layers.Dense', 'Dense', (['nclasses'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (96, 'os.path.isfile', 'os.path.isfile', (['weights_path'], {}), False, 'import os\n'), (122, 'numpy.prod', 'np.prod', (['shape[1:]'], {}), True, 'import numpy as np\n'), (260, 'tensorflow.constant', 'tf.constant', (['batch_x'], {}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.keras.applications.inception_v3.InceptionV3', 'inception_v3.InceptionV3', ([], {'weights': 'weights', 'include_top': 'include_top', 'pooling': 'pooling'}), False, 'from tensorflow.keras.applications import inception_v3, mobilenet_v2, xception\n'), (89, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (263, 'tensorflow.constant', 'tf.constant', (['batch_x'], {}), True, 'import tensorflow as tf\n'), (263, 'tensorflow.constant', 'tf.constant', (['batch_y'], {}), True, 'import tensorflow as tf\n')] |
csepreghy/spectral_analysis | 1cbd9770347a71721164a7daf7b133ad0eeba8e4 | import numpy as np
import pandas as pd
import os
import matplotlib.pyplot as plt
import time
from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.callbacks import TensorBoard, History, EarlyStopping, ModelCheckpoint
from tensorflow.keras.optimizers import Adam, Nadam, RMSprop
from tensorflow.keras.callbacks import EarlyStopping
from kerastuner.engine.hyperparameters import HyperParameters
from kerastuner.tuners import RandomSearch
from sklearn.preprocessing import MinMaxScaler, StandardScaler
import seaborn as sns
from spectral_analysis.classifiers.neural_network.helper_functions import train_test_split
from spectral_analysis.plotify import Plotify
class AutoEncoder():
def __init__(self, df_source_info, df_fluxes, df_wavelengths, load_model, weights_path=''):
self.load_model = load_model
self.weights_path = weights_path
X = self._prepare_data(df_source_info, df_fluxes, df_wavelengths)
indeces = list(range(len(X)))
X_train, X_test, self.i_train, self.i_test = train_test_split(X, 0.2, indeces=indeces)
X_train, X_val, self.i_train, self.i_val = train_test_split(X_train, 0.2, indeces=indeces)
self.scaler = StandardScaler()
X_train = self.scaler.fit_transform(X_train)
X_test = self.scaler.transform(X_test)
X_val = self.scaler.transform(X_val)
self.X_train = np.expand_dims(X_train, axis=2)
self.X_test = np.expand_dims(X_test, axis=2)
self.X_val = np.expand_dims(X_val, axis=2)
def _prepare_data(self, df_source_info, df_fluxes, df_wavelengths):
# self.df_source_info = df_source_info.loc[df_source_info['class'] == 'QSO']
self.df_source_info = df_source_info
self.objids = self.df_source_info['objid'].to_numpy()
fluxes = df_fluxes.loc[df_fluxes['objid'].isin(self.objids)]
X = np.delete(fluxes.values, 0, axis=1)
X = X[:, 0::2]
print(f'X.shape = {X.shape}')
X = X[:, np.mod(np.arange(X[0].size),25)!=0]
X = X[:,:1792]
print(f'X.shape = {X.shape}')
wavelengths = df_wavelengths.to_numpy()
wavelengths = wavelengths[::2]
self.wavelengths = wavelengths[0:1792]
# plot_spectrum(X[0], wavelengths)
return X
def build_model(self):
# ================================================================================== #
# ==================================== ENCODER ===================================== #
# ================================================================================== #
input_layer = Input(shape=(self.X_train.shape[1], 1))
# encoder
x = Conv1D(filters=256,
kernel_size=7,
activation='relu',
padding='same')(input_layer)
x = MaxPooling1D(4)(x)
x = Conv1D(filters=128,
kernel_size=5,
activation='relu',
padding='same')(x)
x = MaxPooling1D(4)(x)
x = Conv1D(filters=64,
kernel_size=5,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = MaxPooling1D(2)(x)
x = Conv1D(filters=1,
kernel_size=3,
activation='relu',
padding='same')(x)
encoded = MaxPooling1D(2, padding='same')(x)
# ================================================================================== #
# ==================================== DECODER ===================================== #
# ================================================================================== #
x = Conv1D(filters=1,
kernel_size=3,
activation='relu',
padding='same')(encoded)
x = UpSampling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=32,
kernel_size=3,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=64,
kernel_size=5,
activation='relu',
padding='same')(x)
x = UpSampling1D(2)(x)
x = Conv1D(filters=128,
kernel_size=5,
activation='relu',
padding='same')(x)
x = UpSampling1D(4)(x)
x = Conv1D(filters=256,
kernel_size=7,
activation='relu',
padding='same')(x)
x = UpSampling1D(4)(x)
decoded = Conv1D(1, 1, activation='tanh', padding='same')(x)
self.autoencoder = Model(input_layer, decoded)
self.autoencoder.summary()
self.autoencoder.compile(loss='mse', optimizer='adam')
return self.autoencoder
def train_model(self, epochs, batch_size=32):
model = self.build_model()
if self.load_model == False:
modelcheckpoint = ModelCheckpoint(filepath='logs/1-14_autoencoder.epoch{epoch:02d}.h5',
monitor='val_loss',
save_best_only=True)
history = model.fit(x=self.X_train,
y=self.X_train,
epochs=epochs,
batch_size=32,
validation_data=(self.X_val, self.X_val),
callbacks=[EarlyStopping('val_loss', patience=8), modelcheckpoint])
self.evaluate_model(model)
else:
model.load_weights(self.weights_path)
print(f'model = {model}')
# self.evaluate_model(model)
self.get_bottleneck_values(model)
return model
def get_bottleneck_values(self, model):
bottleneck = model.get_layer('conv1d_5')
extractor = Model(inputs=model.inputs, outputs=[bottleneck.output])
features = extractor(self.X_test)
features = np.squeeze(features, axis=2)
df_source_info_test = pd.DataFrame({'class': self.df_source_info.iloc[self.i_test]['class'].values})
print(f'df_source_info_test = {df_source_info_test}')
df = pd.DataFrame(features)
df = df.join(df_source_info_test)
print(f'df = {df}')
sns.set(style="ticks", color_codes=True)
sns.pairplot(df, hue='class')
plt.savefig('plots/autoencoder_pairplot', dpi=100)
def evaluate_model(self, model):
preds = model.predict(self.X_test)
print(self.X_test.shape)
self.X_test = np.squeeze(self.X_test, axis=2)
preds = np.squeeze(preds, axis=2)
print(self.X_test.shape)
self.X_test = self.scaler.inverse_transform(self.X_test)
preds = self.scaler.inverse_transform(preds)
for i in range(100):
qso_ra = self.df_source_info.iloc[self.i_test[i]]['ra']
qso_dec = self.df_source_info.iloc[self.i_test[i]]['dec']
qso_plate = self.df_source_info.iloc[self.i_test[i]]['plate']
qso_z = self.df_source_info.iloc[self.i_test[i]]['z']
qso_class = self.df_source_info.iloc[self.i_test[i]]['class']
plotify = Plotify(theme='ugly')
_, axs = plotify.get_figax(nrows=2, figsize=(5.8, 8))
axs[0].plot(self.wavelengths, self.X_test[i], color=plotify.c_orange)
axs[1].plot(self.wavelengths, preds[i], color=plotify.c_orange)
axs[0].set_title(f'ra = {qso_ra}, dec = {qso_dec}, \n z = {qso_z}, plate = {qso_plate}, class = {qso_class} \n', fontsize=14)
axs[1].set_title(f'Autoencoder recreation \n')
axs[0].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
axs[1].set_ylabel(r'$F_{\lambda[10^{-17} erg \: cm^{-2}s^{-1} Å^{-1}]}$', fontsize=14)
axs[1].set_xlabel('Wavelength (Å)')
plt.subplots_adjust(hspace=0.4)
plt.savefig(f'plots/autoencoder/__all_sources/_autoencoder_{i}', dpi=160)
return preds
def main():
df_fluxes = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='fluxes').head(5000)
df_source_info = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='source_info').head(5000)
df_wavelengths = pd.read_hdf('data/sdss/preprocessed/balanced.h5', key='wavelengths')
ae = AutoEncoder(df_source_info, df_fluxes, df_wavelengths, load_model=False, weights_path='logs/colab-logs/_all_sources1-14_autoencoder.epoch30.h5')
ae.train_model(epochs=12, batch_size=64)
if __name__ == "__main__":
main() | [
"tensorflow.keras.callbacks.ModelCheckpoint",
"pandas.read_hdf",
"numpy.expand_dims",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.UpSampling1D",
"numpy.arange",
"numpy.squeeze",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.layers.MaxPooling1D",
"pandas.DataFrame",
"matplotlib.pyplot.savefig",
"numpy.delete",
"matplotlib.pyplot.subplots_adjust",
"sklearn.preprocessing.StandardScaler",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Input"
] | spectral_analysis/unsupervised_learning/autoencoder/autoencoder_bestmodel.py | [(240, 'pandas.read_hdf', 'pd.read_hdf', (['"""data/sdss/preprocessed/balanced.h5"""'], {'key': '"""wavelengths"""'}), True, 'import pandas as pd\n'), (29, 'spectral_analysis.classifiers.neural_network.helper_functions.train_test_split', 'train_test_split', (['X', '(0.2)'], {'indeces': 'indeces'}), False, 'from spectral_analysis.classifiers.neural_network.helper_functions import train_test_split\n'), (30, 'spectral_analysis.classifiers.neural_network.helper_functions.train_test_split', 'train_test_split', (['X_train', '(0.2)'], {'indeces': 'indeces'}), False, 'from spectral_analysis.classifiers.neural_network.helper_functions import train_test_split\n'), (32, 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), False, 'from sklearn.preprocessing import MinMaxScaler, StandardScaler\n'), (37, 'numpy.expand_dims', 'np.expand_dims', (['X_train'], {'axis': '(2)'}), True, 'import numpy as np\n'), (38, 'numpy.expand_dims', 'np.expand_dims', (['X_test'], {'axis': '(2)'}), True, 'import numpy as np\n'), (39, 'numpy.expand_dims', 'np.expand_dims', (['X_val'], {'axis': '(2)'}), True, 'import numpy as np\n'), (47, 'numpy.delete', 'np.delete', (['fluxes.values', '(0)'], {'axis': '(1)'}), True, 'import numpy as np\n'), (66, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.X_train.shape[1], 1)'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (153, 'tensorflow.keras.models.Model', 'Model', (['input_layer', 'decoded'], {}), False, 'from tensorflow.keras.models import Model, Sequential\n'), (187, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.inputs', 'outputs': '[bottleneck.output]'}), False, 'from tensorflow.keras.models import Model, Sequential\n'), (189, 'numpy.squeeze', 'np.squeeze', (['features'], {'axis': '(2)'}), True, 'import numpy as np\n'), (191, 'pandas.DataFrame', 'pd.DataFrame', (["{'class': self.df_source_info.iloc[self.i_test]['class'].values}"], {}), True, 'import pandas as pd\n'), (195, 'pandas.DataFrame', 'pd.DataFrame', (['features'], {}), True, 'import pandas as pd\n'), (200, 'seaborn.set', 'sns.set', ([], {'style': '"""ticks"""', 'color_codes': '(True)'}), True, 'import seaborn as sns\n'), (201, 'seaborn.pairplot', 'sns.pairplot', (['df'], {'hue': '"""class"""'}), True, 'import seaborn as sns\n'), (202, 'matplotlib.pyplot.savefig', 'plt.savefig', (['"""plots/autoencoder_pairplot"""'], {'dpi': '(100)'}), True, 'import matplotlib.pyplot as plt\n'), (208, 'numpy.squeeze', 'np.squeeze', (['self.X_test'], {'axis': '(2)'}), True, 'import numpy as np\n'), (209, 'numpy.squeeze', 'np.squeeze', (['preds'], {'axis': '(2)'}), True, 'import numpy as np\n'), (69, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(256)', 'kernel_size': '(7)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (73, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(4)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (75, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(128)', 'kernel_size': '(5)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (80, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(4)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (81, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(5)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (85, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (87, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (91, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (93, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (97, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (99, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(1)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (104, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(2)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (110, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(1)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (115, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (117, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (122, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (124, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (129, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (131, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(64)', 'kernel_size': '(5)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (136, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (138, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(128)', 'kernel_size': '(5)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (143, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', (['(4)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (145, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(256)', 'kernel_size': '(7)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (149, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', (['(4)'], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (151, 'tensorflow.keras.layers.Conv1D', 'Conv1D', (['(1)', '(1)'], {'activation': '"""tanh"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten, Conv1D, MaxPooling1D, UpSampling1D, BatchNormalization, Reshape\n'), (163, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': '"""logs/1-14_autoencoder.epoch{epoch:02d}.h5"""', 'monitor': '"""val_loss"""', 'save_best_only': '(True)'}), False, 'from tensorflow.keras.callbacks import TensorBoard, History, EarlyStopping, ModelCheckpoint\n'), (222, 'spectral_analysis.plotify.Plotify', 'Plotify', ([], {'theme': '"""ugly"""'}), False, 'from spectral_analysis.plotify import Plotify\n'), (232, 'matplotlib.pyplot.subplots_adjust', 'plt.subplots_adjust', ([], {'hspace': '(0.4)'}), True, 'import matplotlib.pyplot as plt\n'), (233, 'matplotlib.pyplot.savefig', 'plt.savefig', (['f"""plots/autoencoder/__all_sources/_autoencoder_{i}"""'], {'dpi': '(160)'}), True, 'import matplotlib.pyplot as plt\n'), (238, 'pandas.read_hdf', 'pd.read_hdf', (['"""data/sdss/preprocessed/balanced.h5"""'], {'key': '"""fluxes"""'}), True, 'import pandas as pd\n'), (239, 'pandas.read_hdf', 'pd.read_hdf', (['"""data/sdss/preprocessed/balanced.h5"""'], {'key': '"""source_info"""'}), True, 'import pandas as pd\n'), (172, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', (['"""val_loss"""'], {'patience': '(8)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), (50, 'numpy.arange', 'np.arange', (['X[0].size'], {}), True, 'import numpy as np\n')] |
non778/examples | d1eed1a6a987b0ebbb0341925a480dc3e60489ee | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""End-to-end tests that check model correctness."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import tempfile
import unittest
import numpy as np
import tensorflow as tf
from tensorflow.compat import v1 as tfv1
# pylint: disable=g-bad-import-order
from tfltransfer import bases
from tfltransfer import optimizers
from tfltransfer import heads
from tfltransfer import tflite_transfer_converter
# pylint: enable=g-bad-import-order
IMAGE_SIZE = 224
BATCH_SIZE = 128
NUM_CLASSES = 5
VALIDATION_SPLIT = 0.2
LEARNING_RATE = 0.001
BOTTLENECK_SHAPE = (7, 7, 1280)
DATASET_URL = 'https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz'
class TransferModel(object):
"""Test consumer of models generated by the converter."""
def __init__(self, dataset_dir, base_model, head_model, optimizer):
"""Creates a wrapper for a set of models and a data set."""
self.dataset_dir = dataset_dir
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1. / 255, validation_split=VALIDATION_SPLIT)
self.train_img_generator = datagen.flow_from_directory(
self.dataset_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='training')
self.val_img_generator = datagen.flow_from_directory(
self.dataset_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='validation')
converter = tflite_transfer_converter.TFLiteTransferConverter(
NUM_CLASSES, base_model, head_model, optimizer, BATCH_SIZE)
models = converter._convert()
self.initialize_model = models['initialize']
self.bottleneck_model = models['bottleneck']
self.train_head_model = models['train_head']
self.inference_model = models['inference']
self.optimizer_model = models['optimizer']
self.variables = self._generate_initial_variables()
optim_state_shapes = self._optimizer_state_shapes()
self.optim_state = [
np.zeros(shape, dtype=np.float32) for shape in optim_state_shapes
]
def _generate_initial_variables(self):
"""Generates the initial model variables."""
interpreter = tf.lite.Interpreter(model_content=self.initialize_model)
zero_in = interpreter.get_input_details()[0]
variable_outs = interpreter.get_output_details()
interpreter.allocate_tensors()
interpreter.set_tensor(zero_in['index'], np.float32(0.))
interpreter.invoke()
return [interpreter.get_tensor(var['index']) for var in variable_outs]
def _optimizer_state_shapes(self):
"""Reads the shapes of the optimizer parameters (mutable state)."""
interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)
num_variables = len(self.variables)
optim_state_inputs = interpreter.get_input_details()[num_variables * 2:]
return [input_['shape'] for input_ in optim_state_inputs]
def prepare_bottlenecks(self):
"""Passes all images through the base model and save the bottlenecks.
This method has to be called before any training or inference.
"""
self.train_bottlenecks, self.train_labels = (
self._collect_and_generate_bottlenecks(self.train_img_generator))
self.val_bottlenecks, self.val_labels = (
self._collect_and_generate_bottlenecks(self.val_img_generator))
def _collect_and_generate_bottlenecks(self, image_gen):
"""Consumes a generator and converts all images to bottlenecks.
Args:
image_gen: A Keras data generator for images to process
Returns:
Two NumPy arrays: (bottlenecks, labels).
"""
collected_bottlenecks = np.zeros(
(image_gen.samples,) + BOTTLENECK_SHAPE, dtype=np.float32)
collected_labels = np.zeros((image_gen.samples, NUM_CLASSES),
dtype=np.float32)
next_idx = 0
for bottlenecks, truth in self._generate_bottlenecks(
make_finite(image_gen)):
batch_size = bottlenecks.shape[0]
collected_bottlenecks[next_idx:next_idx + batch_size] = bottlenecks
collected_labels[next_idx:next_idx + batch_size] = truth
next_idx += batch_size
return collected_bottlenecks, collected_labels
def _generate_bottlenecks(self, image_gen):
"""Generator adapter that passes images through the bottleneck model.
Args:
image_gen: A generator that returns images to be processed. Images are
paired with ground truth labels.
Yields:
Bottlenecks from input images, paired with ground truth labels.
"""
interpreter = tf.lite.Interpreter(model_content=self.bottleneck_model)
[x_in] = interpreter.get_input_details()
[bottleneck_out] = interpreter.get_output_details()
for (x, y) in image_gen:
batch_size = x.shape[0]
interpreter.resize_tensor_input(x_in['index'],
(batch_size, IMAGE_SIZE, IMAGE_SIZE, 3))
interpreter.allocate_tensors()
interpreter.set_tensor(x_in['index'], x)
interpreter.invoke()
bottleneck = interpreter.get_tensor(bottleneck_out['index'])
yield bottleneck, y
def train_head(self, num_epochs):
"""Trains the head model for a given number of epochs.
SGD is used as an optimizer.
Args:
num_epochs: how many epochs should be trained
Returns:
A list of train_loss values after every epoch trained.
Raises:
RuntimeError: when prepare_bottlenecks() has not been called.
"""
if not hasattr(self, 'train_bottlenecks'):
raise RuntimeError('prepare_bottlenecks has not been called')
results = []
for _ in range(num_epochs):
loss = self._train_one_epoch(
self._generate_batches(self.train_bottlenecks, self.train_labels))
results.append(loss)
return results
def _generate_batches(self, x, y):
"""Creates a generator that iterates over the data in batches."""
num_total = x.shape[0]
for begin in range(0, num_total, BATCH_SIZE):
end = min(begin + BATCH_SIZE, num_total)
yield x[begin:end], y[begin:end]
def _train_one_epoch(self, train_gen):
"""Performs one training epoch."""
interpreter = tf.lite.Interpreter(model_content=self.train_head_model)
interpreter.allocate_tensors()
x_in, y_in = interpreter.get_input_details()[:2]
variable_ins = interpreter.get_input_details()[2:]
loss_out = interpreter.get_output_details()[0]
gradient_outs = interpreter.get_output_details()[1:]
epoch_loss = 0.
num_processed = 0
for bottlenecks, truth in train_gen:
batch_size = bottlenecks.shape[0]
if batch_size < BATCH_SIZE:
bottlenecks = pad_batch(bottlenecks, BATCH_SIZE)
truth = pad_batch(truth, BATCH_SIZE)
interpreter.set_tensor(x_in['index'], bottlenecks)
interpreter.set_tensor(y_in['index'], truth)
for variable_in, variable_value in zip(variable_ins, self.variables):
interpreter.set_tensor(variable_in['index'], variable_value)
interpreter.invoke()
loss = interpreter.get_tensor(loss_out['index'])
gradients = [
interpreter.get_tensor(gradient_out['index'])
for gradient_out in gradient_outs
]
self._apply_gradients(gradients)
epoch_loss += loss * batch_size
num_processed += batch_size
epoch_loss /= num_processed
return epoch_loss
def _apply_gradients(self, gradients):
"""Applies the optimizer to the model parameters."""
interpreter = tf.lite.Interpreter(model_content=self.optimizer_model)
interpreter.allocate_tensors()
num_variables = len(self.variables)
variable_ins = interpreter.get_input_details()[:num_variables]
gradient_ins = interpreter.get_input_details()[num_variables:num_variables *
2]
state_ins = interpreter.get_input_details()[num_variables * 2:]
variable_outs = interpreter.get_output_details()[:num_variables]
state_outs = interpreter.get_output_details()[num_variables:]
for variable, gradient, variable_in, gradient_in in zip(
self.variables, gradients, variable_ins, gradient_ins):
interpreter.set_tensor(variable_in['index'], variable)
interpreter.set_tensor(gradient_in['index'], gradient)
for optim_state_elem, state_in in zip(self.optim_state, state_ins):
interpreter.set_tensor(state_in['index'], optim_state_elem)
interpreter.invoke()
self.variables = [
interpreter.get_tensor(variable_out['index'])
for variable_out in variable_outs
]
self.optim_state = [
interpreter.get_tensor(state_out['index']) for state_out in state_outs
]
def measure_inference_accuracy(self):
"""Runs the inference model and measures accuracy on the validation set."""
interpreter = tf.lite.Interpreter(model_content=self.inference_model)
bottleneck_in = interpreter.get_input_details()[0]
variable_ins = interpreter.get_input_details()[1:]
[y_out] = interpreter.get_output_details()
inference_accuracy = 0.
num_processed = 0
for bottleneck, truth in self._generate_batches(self.val_bottlenecks,
self.val_labels):
batch_size = bottleneck.shape[0]
interpreter.resize_tensor_input(bottleneck_in['index'],
(batch_size,) + BOTTLENECK_SHAPE)
interpreter.allocate_tensors()
interpreter.set_tensor(bottleneck_in['index'], bottleneck)
for variable_in, variable_value in zip(variable_ins, self.variables):
interpreter.set_tensor(variable_in['index'], variable_value)
interpreter.invoke()
preds = interpreter.get_tensor(y_out['index'])
acc = (np.argmax(preds, axis=1) == np.argmax(truth,
axis=1)).sum() / batch_size
inference_accuracy += acc * batch_size
num_processed += batch_size
inference_accuracy /= num_processed
return inference_accuracy
def make_finite(data_gen):
"""An adapter for Keras data generators that makes them finite.
The default behavior in Keras is to keep looping infinitely through
the data.
Args:
data_gen: An infinite Keras data generator.
Yields:
Same values as the parameter generator.
"""
num_samples = data_gen.samples
num_processed = 0
for batch in data_gen:
batch_size = batch[0].shape[0]
if batch_size + num_processed > num_samples:
batch_size = num_samples - num_processed
should_stop = True
else:
should_stop = False
if batch_size == 0:
return
batch = tuple(x[:batch_size] for x in batch)
yield batch
num_processed += batch_size
if should_stop:
return
# TODO(b/135138207) investigate if we can get rid of this.
def pad_batch(batch, batch_size):
"""Resize batch to a given size, tiling present samples over missing.
Example:
Suppose batch_size is 5, batch is [1, 2].
Then the return value is [1, 2, 1, 2, 1].
Args:
batch: An ndarray with first dimension size <= batch_size.
batch_size: Desired size for first dimension.
Returns:
An ndarray of the same shape, except first dimension has
the desired size.
"""
padded = np.zeros((batch_size,) + batch.shape[1:], dtype=batch.dtype)
next_idx = 0
while next_idx < batch_size:
fill_len = min(batch.shape[0], batch_size - next_idx)
padded[next_idx:next_idx + fill_len] = batch[:fill_len]
next_idx += fill_len
return padded
class ModelCorrectnessTest(unittest.TestCase):
@classmethod
def setUpClass(cls):
super(ModelCorrectnessTest, cls).setUpClass()
zip_file = tf.keras.utils.get_file(
origin=DATASET_URL, fname='flower_photos.tgz', extract=True)
cls.dataset_dir = os.path.join(os.path.dirname(zip_file), 'flower_photos')
mobilenet_dir = tempfile.mkdtemp('tflite-transfer-test')
mobilenet_keras = tf.keras.applications.MobileNetV2(
input_shape=(IMAGE_SIZE, IMAGE_SIZE, 3),
include_top=False,
weights='imagenet')
tfv1.keras.experimental.export_saved_model(mobilenet_keras, mobilenet_dir)
cls.mobilenet_dir = mobilenet_dir
def setUp(self):
super(ModelCorrectnessTest, self).setUp()
self.mobilenet_dir = ModelCorrectnessTest.mobilenet_dir
self.dataset_dir = ModelCorrectnessTest.dataset_dir
def test_mobilenet_v2_saved_model_and_softmax_classifier(self):
base_model = bases.SavedModelBase(self.mobilenet_dir)
head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,
NUM_CLASSES)
optimizer = optimizers.SGD(LEARNING_RATE)
model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)
self.assertModelAchievesAccuracy(model, 0.80)
def test_mobilenet_v2_saved_model_quantized_and_softmax_classifier(self):
base_model = bases.SavedModelBase(self.mobilenet_dir, quantize=True)
head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,
NUM_CLASSES)
optimizer = optimizers.SGD(LEARNING_RATE)
model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)
self.assertModelAchievesAccuracy(model, 0.80)
def test_mobilenet_v2_base_and_softmax_classifier(self):
base_model = bases.MobileNetV2Base()
head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,
NUM_CLASSES)
optimizer = optimizers.SGD(LEARNING_RATE)
model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)
self.assertModelAchievesAccuracy(model, 0.80)
def test_mobilenet_v2_base_and_softmax_classifier_l2(self):
base_model = bases.MobileNetV2Base()
head_model = heads.SoftmaxClassifierHead(
BATCH_SIZE, BOTTLENECK_SHAPE, NUM_CLASSES, l2_reg=0.1)
optimizer = optimizers.SGD(LEARNING_RATE)
model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)
self.assertModelAchievesAccuracy(model, 0.80)
def test_mobilenet_v2_base_quantized_and_softmax_classifier(self):
base_model = bases.MobileNetV2Base(quantize=True)
head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,
NUM_CLASSES)
optimizer = optimizers.SGD(LEARNING_RATE)
model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)
self.assertModelAchievesAccuracy(model, 0.80)
def test_mobilenet_v2_base_and_softmax_classifier_adam(self):
base_model = bases.MobileNetV2Base()
head_model = heads.SoftmaxClassifierHead(BATCH_SIZE, BOTTLENECK_SHAPE,
NUM_CLASSES)
optimizer = optimizers.Adam()
model = TransferModel(self.dataset_dir, base_model, head_model, optimizer)
self.assertModelAchievesAccuracy(model, 0.80)
def assertModelAchievesAccuracy(self, model, target_accuracy, num_epochs=30):
model.prepare_bottlenecks()
print('Bottlenecks prepared')
history = model.train_head(num_epochs)
print('Training completed, history = {}'.format(history))
accuracy = model.measure_inference_accuracy()
print('Final accuracy = {:.2f}'.format(accuracy))
self.assertGreater(accuracy, target_accuracy)
if __name__ == '__main__':
unittest.main()
| [
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.lite.Interpreter",
"numpy.argmax",
"tensorflow.keras.utils.get_file",
"numpy.float32",
"numpy.zeros",
"tensorflow.compat.v1.keras.experimental.export_saved_model",
"tensorflow.keras.applications.MobileNetV2"
] | lite/examples/model_personalization/converter/tfltransfer/model_correctness_test.py | [(329, 'numpy.zeros', 'np.zeros', (['((batch_size,) + batch.shape[1:])'], {'dtype': 'batch.dtype'}), True, 'import numpy as np\n'), (419, 'unittest.main', 'unittest.main', ([], {}), False, 'import unittest\n'), (52, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'tf.keras.preprocessing.image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': 'VALIDATION_SPLIT'}), True, 'import tensorflow as tf\n'), (65, 'tfltransfer.tflite_transfer_converter.TFLiteTransferConverter', 'tflite_transfer_converter.TFLiteTransferConverter', (['NUM_CLASSES', 'base_model', 'head_model', 'optimizer', 'BATCH_SIZE'], {}), False, 'from tfltransfer import tflite_transfer_converter\n'), (82, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'self.initialize_model'}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'self.optimizer_model'}), True, 'import tensorflow as tf\n'), (116, 'numpy.zeros', 'np.zeros', (['((image_gen.samples,) + BOTTLENECK_SHAPE)'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (118, 'numpy.zeros', 'np.zeros', (['(image_gen.samples, NUM_CLASSES)'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (141, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'self.bottleneck_model'}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'self.train_head_model'}), True, 'import tensorflow as tf\n'), (223, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'self.optimizer_model'}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.lite.Interpreter', 'tf.lite.Interpreter', ([], {'model_content': 'self.inference_model'}), True, 'import tensorflow as tf\n'), (343, 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', ([], {'origin': 'DATASET_URL', 'fname': '"""flower_photos.tgz"""', 'extract': '(True)'}), True, 'import tensorflow as tf\n'), (347, 'tempfile.mkdtemp', 'tempfile.mkdtemp', (['"""tflite-transfer-test"""'], {}), False, 'import tempfile\n'), (348, 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': '(IMAGE_SIZE, IMAGE_SIZE, 3)', 'include_top': '(False)', 'weights': '"""imagenet"""'}), True, 'import tensorflow as tf\n'), (352, 'tensorflow.compat.v1.keras.experimental.export_saved_model', 'tfv1.keras.experimental.export_saved_model', (['mobilenet_keras', 'mobilenet_dir'], {}), True, 'from tensorflow.compat import v1 as tfv1\n'), (361, 'tfltransfer.bases.SavedModelBase', 'bases.SavedModelBase', (['self.mobilenet_dir'], {}), False, 'from tfltransfer import bases\n'), (362, 'tfltransfer.heads.SoftmaxClassifierHead', 'heads.SoftmaxClassifierHead', (['BATCH_SIZE', 'BOTTLENECK_SHAPE', 'NUM_CLASSES'], {}), False, 'from tfltransfer import heads\n'), (364, 'tfltransfer.optimizers.SGD', 'optimizers.SGD', (['LEARNING_RATE'], {}), False, 'from tfltransfer import optimizers\n'), (369, 'tfltransfer.bases.SavedModelBase', 'bases.SavedModelBase', (['self.mobilenet_dir'], {'quantize': '(True)'}), False, 'from tfltransfer import bases\n'), (370, 'tfltransfer.heads.SoftmaxClassifierHead', 'heads.SoftmaxClassifierHead', (['BATCH_SIZE', 'BOTTLENECK_SHAPE', 'NUM_CLASSES'], {}), False, 'from tfltransfer import heads\n'), (372, 'tfltransfer.optimizers.SGD', 'optimizers.SGD', (['LEARNING_RATE'], {}), False, 'from tfltransfer import optimizers\n'), (377, 'tfltransfer.bases.MobileNetV2Base', 'bases.MobileNetV2Base', ([], {}), False, 'from tfltransfer import bases\n'), (378, 'tfltransfer.heads.SoftmaxClassifierHead', 'heads.SoftmaxClassifierHead', (['BATCH_SIZE', 'BOTTLENECK_SHAPE', 'NUM_CLASSES'], {}), False, 'from tfltransfer import heads\n'), (380, 'tfltransfer.optimizers.SGD', 'optimizers.SGD', (['LEARNING_RATE'], {}), False, 'from tfltransfer import optimizers\n'), (385, 'tfltransfer.bases.MobileNetV2Base', 'bases.MobileNetV2Base', ([], {}), False, 'from tfltransfer import bases\n'), (386, 'tfltransfer.heads.SoftmaxClassifierHead', 'heads.SoftmaxClassifierHead', (['BATCH_SIZE', 'BOTTLENECK_SHAPE', 'NUM_CLASSES'], {'l2_reg': '(0.1)'}), False, 'from tfltransfer import heads\n'), (388, 'tfltransfer.optimizers.SGD', 'optimizers.SGD', (['LEARNING_RATE'], {}), False, 'from tfltransfer import optimizers\n'), (393, 'tfltransfer.bases.MobileNetV2Base', 'bases.MobileNetV2Base', ([], {'quantize': '(True)'}), False, 'from tfltransfer import bases\n'), (394, 'tfltransfer.heads.SoftmaxClassifierHead', 'heads.SoftmaxClassifierHead', (['BATCH_SIZE', 'BOTTLENECK_SHAPE', 'NUM_CLASSES'], {}), False, 'from tfltransfer import heads\n'), (396, 'tfltransfer.optimizers.SGD', 'optimizers.SGD', (['LEARNING_RATE'], {}), False, 'from tfltransfer import optimizers\n'), (401, 'tfltransfer.bases.MobileNetV2Base', 'bases.MobileNetV2Base', ([], {}), False, 'from tfltransfer import bases\n'), (402, 'tfltransfer.heads.SoftmaxClassifierHead', 'heads.SoftmaxClassifierHead', (['BATCH_SIZE', 'BOTTLENECK_SHAPE', 'NUM_CLASSES'], {}), False, 'from tfltransfer import heads\n'), (404, 'tfltransfer.optimizers.Adam', 'optimizers.Adam', ([], {}), False, 'from tfltransfer import optimizers\n'), (77, 'numpy.zeros', 'np.zeros', (['shape'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (86, 'numpy.float32', 'np.float32', (['(0.0)'], {}), True, 'import numpy as np\n'), (345, 'os.path.dirname', 'os.path.dirname', (['zip_file'], {}), False, 'import os\n'), (273, 'numpy.argmax', 'np.argmax', (['preds'], {'axis': '(1)'}), True, 'import numpy as np\n'), (273, 'numpy.argmax', 'np.argmax', (['truth'], {'axis': '(1)'}), True, 'import numpy as np\n')] |
rgerkin/psiz | d540738462b6436a08a472d5e349ca2b813e6d47 | # -*- coding: utf-8 -*-
# Copyright 2020 The PsiZ Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Example that infers a shared embedding for three groups.
Fake data is generated from a ground truth model for three different
groups. In this example, these groups represent groups of agents with
varying levels of skill: novices, intermediates, and experts. Each group
has a different set of attention weights. An embedding model is
inferred from the simulated data and compared to the ground truth
model.
Example output:
Attention weights:
Novice | [3.38 3.32 0.49 0.43]
Intermediate | [2.06 2.18 2.04 2.18]
Expert | [0.55 0.50 3.40 3.32]
Model Comparison (R^2)
================================
True | Inferred
| Novice Interm Expert
--------+-----------------------
Novice | 0.95 0.68 0.16
Interm | 0.64 0.96 0.54
Expert | 0.16 0.61 0.96
"""
import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "2" # noqa
import numpy as np
from scipy.stats import pearsonr
import tensorflow as tf
import psiz
# Uncomment the following line to force eager execution.
# tf.config.run_functions_eagerly(True)
# Uncomment and edit the following to control GPU visibility.
# os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
# os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def main():
"""Run the simulation that infers an embedding for three groups."""
# Settings.
n_stimuli = 30
n_dim = 4
n_group = 3
n_restart = 1
epochs = 1000
n_trial = 2000
batch_size = 128
model_true = ground_truth(n_stimuli, n_dim, n_group)
# Generate a random docket of trials to show each group.
generator = psiz.trials.RandomRank(
n_stimuli, n_reference=8, n_select=2
)
docket = generator.generate(n_trial)
# Create virtual agents for each group.
agent_novice = psiz.agents.RankAgent(model_true, groups=[0])
agent_interm = psiz.agents.RankAgent(model_true, groups=[1])
agent_expert = psiz.agents.RankAgent(model_true, groups=[2])
# Simulate similarity judgments for each group.
obs_novice = agent_novice.simulate(docket)
obs_interm = agent_interm.simulate(docket)
obs_expert = agent_expert.simulate(docket)
obs = psiz.trials.stack((obs_novice, obs_interm, obs_expert))
# Partition observations into 80% train, 10% validation and 10% test set.
obs_train, obs_val, obs_test = psiz.utils.standard_split(obs)
# Convert to TF dataset.
ds_obs_train = obs_train.as_dataset().shuffle(
buffer_size=obs_train.n_trial, reshuffle_each_iteration=True
).batch(batch_size, drop_remainder=False)
ds_obs_val = obs_val.as_dataset().batch(
batch_size, drop_remainder=False
)
ds_obs_test = obs_test.as_dataset().batch(
batch_size, drop_remainder=False
)
# Use early stopping.
early_stop = psiz.keras.callbacks.EarlyStoppingRe(
'val_cce', patience=15, mode='min', restore_best_weights=True
)
callbacks = [early_stop]
compile_kwargs = {
'loss': tf.keras.losses.CategoricalCrossentropy(),
'optimizer': tf.keras.optimizers.Adam(lr=.001),
'weighted_metrics': [
tf.keras.metrics.CategoricalCrossentropy(name='cce')
]
}
model_inferred = build_model(n_stimuli, n_dim, n_group)
# Infer embedding with restarts.
restarter = psiz.keras.Restarter(
model_inferred, compile_kwargs=compile_kwargs, monitor='val_loss',
n_restart=n_restart
)
restart_record = restarter.fit(
x=ds_obs_train, validation_data=ds_obs_val, epochs=epochs,
callbacks=callbacks, verbose=0
)
model_inferred = restarter.model
# Compare the inferred model with ground truth by comparing the
# similarity matrices implied by each model.
simmat_truth = (
model_similarity(model_true, groups=[0]),
model_similarity(model_true, groups=[1]),
model_similarity(model_true, groups=[2])
)
simmat_inferred = (
model_similarity(model_inferred, groups=[0]),
model_similarity(model_inferred, groups=[1]),
model_similarity(model_inferred, groups=[2])
)
r_squared = np.empty((n_group, n_group))
for i_truth in range(n_group):
for j_infer in range(n_group):
rho, _ = pearsonr(simmat_truth[i_truth], simmat_inferred[j_infer])
r_squared[i_truth, j_infer] = rho**2
# Display attention weights.
# Permute inferred dimensions to best match ground truth.
attention_weight = tf.stack(
[
model_inferred.kernel.subnets[0].distance.w,
model_inferred.kernel.subnets[1].distance.w,
model_inferred.kernel.subnets[2].distance.w
],
axis=0
).numpy()
idx_sorted = np.argsort(-attention_weight[0, :])
attention_weight = attention_weight[:, idx_sorted]
group_labels = ["Novice", "Intermediate", "Expert"]
print("\n Attention weights:")
for i_group in range(attention_weight.shape[0]):
print(" {0:>12} | {1}".format(
group_labels[i_group],
np.array2string(
attention_weight[i_group, :],
formatter={'float_kind': lambda x: "%.2f" % x})
)
)
# Display comparison results. A good inferred model will have a high
# R^2 value on the diagonal elements (max is 1) and relatively low R^2
# values on the off-diagonal elements.
print('\n Model Comparison (R^2)')
print(' ================================')
print(' True | Inferred')
print(' | Novice Interm Expert')
print(' --------+-----------------------')
print(' Novice | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r_squared[0, 0], r_squared[0, 1], r_squared[0, 2]))
print(' Interm | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r_squared[1, 0], r_squared[1, 1], r_squared[1, 2]))
print(' Expert | {0: >6.2f} {1: >6.2f} {2: >6.2f}'.format(
r_squared[2, 0], r_squared[2, 1], r_squared[2, 2]))
print('\n')
def ground_truth(n_stimuli, n_dim, n_group):
"""Return a ground truth embedding."""
stimuli = tf.keras.layers.Embedding(
n_stimuli+1, n_dim, mask_zero=True,
embeddings_initializer=tf.keras.initializers.RandomNormal(
stddev=.17
)
)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(
trainable=False,
beta_initializer=tf.keras.initializers.Constant(10.),
tau_initializer=tf.keras.initializers.Constant(1.),
gamma_initializer=tf.keras.initializers.Constant(0.)
)
# Define group-specific kernels.
kernel_0 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[1.8, 1.8, .2, .2]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_1 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[1., 1., 1., 1.]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_2 = psiz.keras.layers.DistanceBased(
distance=psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_initializer=tf.keras.initializers.Constant(
[.2, .2, 1.8, 1.8]
),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
),
similarity=shared_similarity
)
kernel_group = psiz.keras.layers.GateMulti(
subnets=[kernel_0, kernel_1, kernel_2], group_col=0
)
model = psiz.keras.models.Rank(
stimuli=stimuli, kernel=kernel_group, use_group_kernel=True
)
return model
def build_model(n_stimuli, n_dim, n_group):
"""Build model.
Arguments:
n_stimuli: Integer indicating the number of stimuli in the
embedding.
n_dim: Integer indicating the dimensionality of the embedding.
Returns:
model: A TensorFlow Keras model.
"""
stimuli = tf.keras.layers.Embedding(
n_stimuli+1, n_dim, mask_zero=True,
)
shared_similarity = psiz.keras.layers.ExponentialSimilarity(
trainable=False,
beta_initializer=tf.keras.initializers.Constant(10.),
tau_initializer=tf.keras.initializers.Constant(1.),
gamma_initializer=tf.keras.initializers.Constant(0.)
)
kernel_0 = build_kernel(shared_similarity, n_dim)
kernel_1 = build_kernel(shared_similarity, n_dim)
kernel_2 = build_kernel(shared_similarity, n_dim)
kernel_group = psiz.keras.layers.GateMulti(
subnets=[kernel_0, kernel_1, kernel_2], group_col=0
)
model = psiz.keras.models.Rank(
stimuli=stimuli, kernel=kernel_group, use_group_kernel=True
)
return model
def build_kernel(similarity, n_dim):
"""Build kernel for single group."""
mink = psiz.keras.layers.Minkowski(
rho_trainable=False,
rho_initializer=tf.keras.initializers.Constant(2.),
w_constraint=psiz.keras.constraints.NonNegNorm(
scale=n_dim, p=1.
),
)
kernel = psiz.keras.layers.DistanceBased(
distance=mink,
similarity=similarity
)
return kernel
def model_similarity(model, groups=[]):
ds_pairs, ds_info = psiz.utils.pairwise_index_dataset(
model.n_stimuli, mask_zero=True, groups=groups
)
simmat = psiz.utils.pairwise_similarity(
model.stimuli, model.kernel, ds_pairs, use_group_kernel=True
).numpy()
return simmat
if __name__ == "__main__":
main()
| [
"tensorflow.keras.initializers.Constant",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.layers.Embedding",
"tensorflow.stack",
"scipy.stats.pearsonr",
"tensorflow.keras.metrics.CategoricalCrossentropy",
"tensorflow.keras.optimizers.Adam",
"numpy.argsort",
"numpy.array2string",
"tensorflow.keras.initializers.RandomNormal",
"numpy.empty"
] | examples/rank/mle_3g.py | [(73, 'psiz.trials.RandomRank', 'psiz.trials.RandomRank', (['n_stimuli'], {'n_reference': '(8)', 'n_select': '(2)'}), False, 'import psiz\n'), (79, 'psiz.agents.RankAgent', 'psiz.agents.RankAgent', (['model_true'], {'groups': '[0]'}), False, 'import psiz\n'), (80, 'psiz.agents.RankAgent', 'psiz.agents.RankAgent', (['model_true'], {'groups': '[1]'}), False, 'import psiz\n'), (81, 'psiz.agents.RankAgent', 'psiz.agents.RankAgent', (['model_true'], {'groups': '[2]'}), False, 'import psiz\n'), (87, 'psiz.trials.stack', 'psiz.trials.stack', (['(obs_novice, obs_interm, obs_expert)'], {}), False, 'import psiz\n'), (90, 'psiz.utils.standard_split', 'psiz.utils.standard_split', (['obs'], {}), False, 'import psiz\n'), (103, 'psiz.keras.callbacks.EarlyStoppingRe', 'psiz.keras.callbacks.EarlyStoppingRe', (['"""val_cce"""'], {'patience': '(15)', 'mode': '"""min"""', 'restore_best_weights': '(True)'}), False, 'import psiz\n'), (119, 'psiz.keras.Restarter', 'psiz.keras.Restarter', (['model_inferred'], {'compile_kwargs': 'compile_kwargs', 'monitor': '"""val_loss"""', 'n_restart': 'n_restart'}), False, 'import psiz\n'), (143, 'numpy.empty', 'np.empty', (['(n_group, n_group)'], {}), True, 'import numpy as np\n'), (159, 'numpy.argsort', 'np.argsort', (['(-attention_weight[(0), :])'], {}), True, 'import numpy as np\n'), (248, 'psiz.keras.layers.GateMulti', 'psiz.keras.layers.GateMulti', ([], {'subnets': '[kernel_0, kernel_1, kernel_2]', 'group_col': '(0)'}), False, 'import psiz\n'), (252, 'psiz.keras.models.Rank', 'psiz.keras.models.Rank', ([], {'stimuli': 'stimuli', 'kernel': 'kernel_group', 'use_group_kernel': '(True)'}), False, 'import psiz\n'), (271, 'tensorflow.keras.layers.Embedding', 'tf.keras.layers.Embedding', (['(n_stimuli + 1)', 'n_dim'], {'mask_zero': '(True)'}), True, 'import tensorflow as tf\n'), (285, 'psiz.keras.layers.GateMulti', 'psiz.keras.layers.GateMulti', ([], {'subnets': '[kernel_0, kernel_1, kernel_2]', 'group_col': '(0)'}), False, 'import psiz\n'), (289, 'psiz.keras.models.Rank', 'psiz.keras.models.Rank', ([], {'stimuli': 'stimuli', 'kernel': 'kernel_group', 'use_group_kernel': '(True)'}), False, 'import psiz\n'), (306, 'psiz.keras.layers.DistanceBased', 'psiz.keras.layers.DistanceBased', ([], {'distance': 'mink', 'similarity': 'similarity'}), False, 'import psiz\n'), (314, 'psiz.utils.pairwise_index_dataset', 'psiz.utils.pairwise_index_dataset', (['model.n_stimuli'], {'mask_zero': '(True)', 'groups': 'groups'}), False, 'import psiz\n'), (109, 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(0.001)'}), True, 'import tensorflow as tf\n'), (112, 'tensorflow.keras.metrics.CategoricalCrossentropy', 'tf.keras.metrics.CategoricalCrossentropy', ([], {'name': '"""cce"""'}), True, 'import tensorflow as tf\n'), (146, 'scipy.stats.pearsonr', 'pearsonr', (['simmat_truth[i_truth]', 'simmat_inferred[j_infer]'], {}), False, 'from scipy.stats import pearsonr\n'), (151, 'tensorflow.stack', 'tf.stack', (['[model_inferred.kernel.subnets[0].distance.w, model_inferred.kernel.subnets\n [1].distance.w, model_inferred.kernel.subnets[2].distance.w]'], {'axis': '(0)'}), True, 'import tensorflow as tf\n'), (193, 'tensorflow.keras.initializers.RandomNormal', 'tf.keras.initializers.RandomNormal', ([], {'stddev': '(0.17)'}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(10.0)'], {}), True, 'import tensorflow as tf\n'), (201, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (202, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (277, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(10.0)'], {}), True, 'import tensorflow as tf\n'), (278, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(1.0)'], {}), True, 'import tensorflow as tf\n'), (279, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(0.0)'], {}), True, 'import tensorflow as tf\n'), (300, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (301, 'psiz.keras.constraints.NonNegNorm', 'psiz.keras.constraints.NonNegNorm', ([], {'scale': 'n_dim', 'p': '(1.0)'}), False, 'import psiz\n'), (317, 'psiz.utils.pairwise_similarity', 'psiz.utils.pairwise_similarity', (['model.stimuli', 'model.kernel', 'ds_pairs'], {'use_group_kernel': '(True)'}), False, 'import psiz\n'), (166, 'numpy.array2string', 'np.array2string', (['attention_weight[(i_group), :]'], {'formatter': "{'float_kind': lambda x: '%.2f' % x}"}), True, 'import numpy as np\n'), (209, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (210, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['[1.8, 1.8, 0.2, 0.2]'], {}), True, 'import tensorflow as tf\n'), (213, 'psiz.keras.constraints.NonNegNorm', 'psiz.keras.constraints.NonNegNorm', ([], {'scale': 'n_dim', 'p': '(1.0)'}), False, 'import psiz\n'), (223, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (224, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['[1.0, 1.0, 1.0, 1.0]'], {}), True, 'import tensorflow as tf\n'), (227, 'psiz.keras.constraints.NonNegNorm', 'psiz.keras.constraints.NonNegNorm', ([], {'scale': 'n_dim', 'p': '(1.0)'}), False, 'import psiz\n'), (237, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['(2.0)'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.keras.initializers.Constant', 'tf.keras.initializers.Constant', (['[0.2, 0.2, 1.8, 1.8]'], {}), True, 'import tensorflow as tf\n'), (241, 'psiz.keras.constraints.NonNegNorm', 'psiz.keras.constraints.NonNegNorm', ([], {'scale': 'n_dim', 'p': '(1.0)'}), False, 'import psiz\n')] |
Hammer7/Flowers-TF-Lite | e98f1ce1c354ce4e09a2c045364fa518702619c5 | #@title Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#Original code can be found at
#https://colab.research.google.com/github/tensorflow/examples/blob/master/community/en/flowers_tf_lite.ipynb#scrollTo=aCLb_yV5JfF3
import tensorflow as tf
import os
import numpy as np
import matplotlib.pyplot as plt
IMAGE_SIZE = 224
BATCH_SIZE = 64
def download_flower_dataset():
_URL = "https://storage.googleapis.com/download.tensorflow.org/example_images/flower_photos.tgz"
zip_file = tf.keras.utils.get_file(origin=_URL,
fname="flower_photos.tgz",
extract=True)
return os.path.join(os.path.dirname(zip_file), 'flower_photos')
def create_image_batch_generator(base_dir):
datagen = tf.keras.preprocessing.image.ImageDataGenerator(
rescale=1./255,
validation_split=0.2)
train_generator = datagen.flow_from_directory(
base_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='training')
val_generator = datagen.flow_from_directory(
base_dir,
target_size=(IMAGE_SIZE, IMAGE_SIZE),
batch_size=BATCH_SIZE,
subset='validation')
return train_generator, val_generator
def save_labels(train_generator):
for image_batch, label_batch in train_generator:
break
print(image_batch.shape, label_batch.shape)
print (train_generator.class_indices)
labels = '\n'.join(sorted(train_generator.class_indices.keys()))
with open('labels.txt', 'w') as f:
f.write(labels)
def download_mobilenet_v2_model():
# Create the base model from the pre-trained model MobileNet V2
IMG_SHAPE = (IMAGE_SIZE, IMAGE_SIZE, 3)
base_model = tf.keras.applications.MobileNetV2(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
model = tf.keras.Sequential([
base_model,
tf.keras.layers.Conv2D(32, 3, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(5, activation='softmax')
])
# Let's take a look to see how many layers are in the base model
print("Number of layers in the base model: ", len(base_model.layers))
return base_model, model
def run_transfer_learning(base_model, model, train_generator, val_generator):
base_model.trainable = False
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss='categorical_crossentropy',
metrics=['accuracy'])
model.summary()
print('Number of trainable variables = {}'.format(len(model.trainable_variables)))
epochs = 10
history = model.fit(train_generator,
epochs=epochs,
validation_data=val_generator)
return history
def run_fine_tuning(base_model, model, train_generator, val_generator):
base_model.trainable = True
# Fine tune from this layer onwards
fine_tune_at = 100
# Freeze all the layers before the `fine_tune_at` layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
model.compile(loss='categorical_crossentropy',
optimizer = tf.keras.optimizers.Adam(1e-5),
metrics=['accuracy'])
model.summary()
print('Number of trainable variables = {}'.format(len(model.trainable_variables)))
history = model.fit(train_generator,
epochs=5,
validation_data=val_generator)
return history
def save_model_as_tflite(model):
saved_model_dir = 'fine_tuning'
tf.saved_model.save(model, saved_model_dir)
converter = tf.lite.TFLiteConverter.from_saved_model(saved_model_dir)
tflite_model = converter.convert()
with open('model.tflite', 'wb') as f:
f.write(tflite_model)
def plot_figure(history, fig_name):
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.ylabel('Accuracy')
plt.ylim([min(plt.ylim()),1])
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.ylabel('Cross Entropy')
plt.ylim([0,1.0])
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
plt.savefig(fig_name)
if __name__ == '__main__':
print(tf.__version__)
base_dir = download_flower_dataset()
train_generator, val_generator = create_image_batch_generator(base_dir)
save_labels(train_generator)
base_model, model = download_mobilenet_v2_model() #download without top layer and add top layer
history = run_transfer_learning(base_model, model, train_generator, val_generator)
plot_figure(history, 'transfer_learning.png')
history_fine = run_fine_tuning(base_model, model, train_generator, val_generator)
save_model_as_tflite(model)
plot_figure(history_fine, 'fine_tuning.png')
| [
"matplotlib.pyplot.legend",
"matplotlib.pyplot.plot",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.Conv2D",
"tensorflow.saved_model.save",
"matplotlib.pyplot.subplot",
"tensorflow.lite.TFLiteConverter.from_saved_model",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"matplotlib.pyplot.ylim",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.applications.MobileNetV2",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.utils.get_file",
"tensorflow.keras.layers.Dropout"
] | flowers_tf_lite.py | [(26, 'tensorflow.keras.utils.get_file', 'tf.keras.utils.get_file', ([], {'origin': '_URL', 'fname': '"""flower_photos.tgz"""', 'extract': '(True)'}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'tf.keras.preprocessing.image.ImageDataGenerator', ([], {'rescale': '(1.0 / 255)', 'validation_split': '(0.2)'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.keras.applications.MobileNetV2', 'tf.keras.applications.MobileNetV2', ([], {'input_shape': 'IMG_SHAPE', 'include_top': '(False)', 'weights': '"""imagenet"""'}), True, 'import tensorflow as tf\n'), (124, 'tensorflow.saved_model.save', 'tf.saved_model.save', (['model', 'saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.lite.TFLiteConverter.from_saved_model', 'tf.lite.TFLiteConverter.from_saved_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (137, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), True, 'import matplotlib.pyplot as plt\n'), (138, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), True, 'import matplotlib.pyplot as plt\n'), (139, 'matplotlib.pyplot.plot', 'plt.plot', (['acc'], {'label': '"""Training Accuracy"""'}), True, 'import matplotlib.pyplot as plt\n'), (140, 'matplotlib.pyplot.plot', 'plt.plot', (['val_acc'], {'label': '"""Validation Accuracy"""'}), True, 'import matplotlib.pyplot as plt\n'), (141, 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), True, 'import matplotlib.pyplot as plt\n'), (142, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Accuracy"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (144, 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (146, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), True, 'import matplotlib.pyplot as plt\n'), (147, 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'label': '"""Training Loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (148, 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss'], {'label': '"""Validation Loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (149, 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), True, 'import matplotlib.pyplot as plt\n'), (150, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Cross Entropy"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (151, 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.0]'], {}), True, 'import matplotlib.pyplot as plt\n'), (152, 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (153, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (154, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (155, 'matplotlib.pyplot.savefig', 'plt.savefig', (['fig_name'], {}), True, 'import matplotlib.pyplot as plt\n'), (29, 'os.path.dirname', 'os.path.dirname', (['zip_file'], {}), False, 'import os\n'), (74, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3)'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (75, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['(0.2)'], {}), True, 'import tensorflow as tf\n'), (76, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(5)'], {'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (110, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['(1e-05)'], {}), True, 'import tensorflow as tf\n'), (143, 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), True, 'import matplotlib.pyplot as plt\n')] |
shaun95/google-research | d41bbaca1eb9bfd980ec2b3fd201c3ddb4d1f2e5 | # coding=utf-8
# Copyright 2022 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Lint as: python3
# pylint: disable=g-complex-comprehension
# pylint: disable=missing-docstring
import tensorflow as tf
from tensorflow.keras import layers
from muzero import network
LARGE_NUM = 1e9
class MLPandLSTM(network.AbstractEncoderandLSTM):
"""Conv+LSTM network for use with MuZero."""
def __init__(self,
trivial_encoding,
observation_space,
*args,
encoder_size=3,
pretrain_temperature=1.,
**kwargs):
super().__init__(*args, **kwargs)
self.trivial_encoding = trivial_encoding
self.pretrain_temperature = 1.
if encoder_size == 0:
encoding_layers = [
layers.Conv2D(
filters=32,
kernel_size=8,
strides=(4, 4),
padding='valid',
activation='relu',
batch_input_shape=(None, *observation_space)),
layers.Conv2D(
filters=64,
kernel_size=4,
strides=(2, 2),
padding='valid',
activation=None,
use_bias=False,
),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.ReLU(),
layers.Conv2D(
filters=128,
kernel_size=4,
strides=(2, 2),
padding='valid',
activation='relu',
),
layers.Conv2D(
filters=256,
kernel_size=3,
strides=(1, 1),
padding='valid',
activation=None,
use_bias=False,
),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.ReLU(),
]
else:
encoding_layers = [
layers.Conv2D(
filters=64,
kernel_size=3,
strides=(2, 2),
padding='same',
activation='relu',
batch_input_shape=(None, *observation_space)), # add activation?
]
if encoder_size > 0:
encoding_layers.append(ResidualBlock(64),)
if encoder_size > 1:
encoding_layers.append(ResidualBlock(64),)
encoding_layers.append(
layers.Conv2D(
filters=128,
kernel_size=3,
strides=(2, 2),
activation='relu',
padding='same'), # add activation?
)
if encoder_size > 0:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 1:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 2:
encoding_layers.append(ResidualBlock(128),)
encoding_layers.append(
layers.AveragePooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same'),)
if encoder_size > 0:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 1:
encoding_layers.append(ResidualBlock(128),)
if encoder_size > 2:
encoding_layers.append(ResidualBlock(128),)
encoding_layers.append(
layers.AveragePooling2D(
pool_size=(3, 3), strides=(2, 2), padding='same'))
self._observation_encoder = tf.keras.Sequential(
encoding_layers, name='observation_encoder')
pretrain_hidden_layers = self._head_hidden_layers()
pretrain_output_size = self.head_hidden_sizes[
-1] if self.head_hidden_sizes else self.hidden_state_size
self._pretrain_head = tf.keras.Sequential(
pretrain_hidden_layers + [
layers.Dense(pretrain_output_size, name='pretrain_output'),
],
name='pretrain_head')
self._pretrain_predictor = tf.keras.Sequential([
tf.keras.layers.Dense(pretrain_output_size // 4, use_bias=False),
tf.keras.layers.LayerNormalization(),
tf.keras.layers.ReLU(),
tf.keras.layers.Dense(pretrain_output_size),
],
name='pretrain_predictor')
def _encode_observation(self, observation, training=True):
observation = observation * 2 - 1.
if self.trivial_encoding:
# use the trivial observation encoding from
# https://gist.github.com/karpathy/a4166c7fe253700972fcbc77e4ea32c5.
# Simply take the difference between the last two observations.
return observation[:, :, :, -1] - observation[:, :, :, -2]
return self._observation_encoder(observation, training=training)
# The loss is according to SimCLR(https://arxiv.org/abs/2002.05709).
def pretraining_loss(self, sample, training=True):
obs1, obs2 = sample
out1 = self._pretrain_head(
self.initial_inference(obs1, training=training).hidden_state)
out2 = self._pretrain_head(
self.initial_inference(obs2, training=training).hidden_state)
pred1 = self._pretrain_predictor(out1)
pred2 = self._pretrain_predictor(out2)
loss = self.add_contrastive_loss(
pred1, out2) / 2. + self.add_contrastive_loss(pred2, out1) / 2.
return loss, None
def add_contrastive_loss(self,
hidden1,
hidden2,
hidden_norm=True,
weights=1.0):
# Get (normalized) hidden1 and hidden2.
if hidden_norm:
hidden1 = tf.math.l2_normalize(hidden1, -1)
hidden2 = tf.math.l2_normalize(hidden2, -1)
batch_size = tf.shape(hidden1)[0]
labels = tf.one_hot(tf.range(batch_size), batch_size * 2)
masks = tf.one_hot(tf.range(batch_size), batch_size)
logits_aa = tf.matmul(
hidden1, hidden1, transpose_b=True) / self.pretrain_temperature
logits_aa = logits_aa - masks * LARGE_NUM
logits_bb = tf.matmul(
hidden2, hidden2, transpose_b=True) / self.pretrain_temperature
logits_bb = logits_bb - masks * LARGE_NUM
logits_ab = tf.matmul(
hidden1, hidden2, transpose_b=True) / self.pretrain_temperature
logits_ba = tf.matmul(
hidden2, hidden1, transpose_b=True) / self.pretrain_temperature
logits_a = tf.concat([logits_ab, logits_aa], 1)
logits_b = tf.concat([logits_ba, logits_bb], 1)
loss_a = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits_a)
loss_b = tf.nn.softmax_cross_entropy_with_logits(
labels=labels, logits=logits_b)
loss = loss_a + loss_b
return loss
def get_pretraining_trainable_variables(self):
return (self._observation_encoder.trainable_variables +
self._to_hidden.trainable_variables +
self._pretrain_head.trainable_variables +
self._pretrain_predictor.trainable_variables)
class ResidualBlock(layers.Layer):
"""Residualblock.
Implementation adapted from:
https://towardsdatascience.com/from-scratch-implementation-of-alphazero-for-connect4-f73d4554002a
.
"""
def __init__(self, planes):
super(ResidualBlock, self).__init__(name='')
self.planes = planes
self.conv2a = layers.Conv2D(
filters=self.planes,
kernel_size=3,
strides=(1, 1),
padding='same',
use_bias=False)
self.bn2a = layers.LayerNormalization()
self.conv2b = layers.Conv2D(
filters=self.planes,
kernel_size=3,
strides=(1, 1),
padding='same',
use_bias=False)
self.bn2b = layers.LayerNormalization()
self.relu = layers.ReLU()
def __call__(self, input_tensor, training=True, **kwargs):
x = self.conv2a(input_tensor, training=training)
x = self.bn2a(x, training=training)
x = self.relu(x)
x = self.conv2b(x, training=training)
x = self.bn2b(x, training=training)
x += input_tensor
return self.relu(x)
| [
"tensorflow.keras.layers.LayerNormalization",
"tensorflow.nn.softmax_cross_entropy_with_logits",
"tensorflow.matmul",
"tensorflow.concat",
"tensorflow.keras.layers.ReLU",
"tensorflow.range",
"tensorflow.shape",
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.layers.Dense",
"tensorflow.math.l2_normalize",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Sequential"
] | muzero/atari/network.py | [(120, 'tensorflow.keras.Sequential', 'tf.keras.Sequential', (['encoding_layers'], {'name': '"""observation_encoder"""'}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.concat', 'tf.concat', (['[logits_ab, logits_aa]', '(1)'], {}), True, 'import tensorflow as tf\n'), (187, 'tensorflow.concat', 'tf.concat', (['[logits_ba, logits_bb]', '(1)'], {}), True, 'import tensorflow as tf\n'), (189, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits_a'}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.nn.softmax_cross_entropy_with_logits', 'tf.nn.softmax_cross_entropy_with_logits', ([], {'labels': 'labels', 'logits': 'logits_b'}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': 'self.planes', 'kernel_size': '(3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers\n'), (223, 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (225, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': 'self.planes', 'kernel_size': '(3)', 'strides': '(1, 1)', 'padding': '"""same"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers\n'), (231, 'tensorflow.keras.layers.LayerNormalization', 'layers.LayerNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (232, 'tensorflow.keras.layers.ReLU', 'layers.ReLU', ([], {}), False, 'from tensorflow.keras import layers\n'), (169, 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['hidden1', '(-1)'], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.math.l2_normalize', 'tf.math.l2_normalize', (['hidden2', '(-1)'], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.shape', 'tf.shape', (['hidden1'], {}), True, 'import tensorflow as tf\n'), (173, 'tensorflow.range', 'tf.range', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (174, 'tensorflow.range', 'tf.range', (['batch_size'], {}), True, 'import tensorflow as tf\n'), (176, 'tensorflow.matmul', 'tf.matmul', (['hidden1', 'hidden1'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (179, 'tensorflow.matmul', 'tf.matmul', (['hidden2', 'hidden2'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (182, 'tensorflow.matmul', 'tf.matmul', (['hidden1', 'hidden2'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.matmul', 'tf.matmul', (['hidden2', 'hidden1'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(32)', 'kernel_size': '(8)', 'strides': '(4, 4)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'batch_input_shape': '(None, *observation_space)'}), False, 'from tensorflow.keras import layers\n'), (51, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(4)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': 'None', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers\n'), (59, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(128)', 'kernel_size': '(4)', 'strides': '(2, 2)', 'padding': '"""valid"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras import layers\n'), (68, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3)', 'strides': '(1, 1)', 'padding': '"""valid"""', 'activation': 'None', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers\n'), (76, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {}), True, 'import tensorflow as tf\n'), (77, 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'strides': '(2, 2)', 'padding': '"""same"""', 'activation': '"""relu"""', 'batch_input_shape': '(None, *observation_space)'}), False, 'from tensorflow.keras import layers\n'), (94, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(128)', 'kernel_size': '(3)', 'strides': '(2, 2)', 'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (108, 'tensorflow.keras.layers.AveragePooling2D', 'layers.AveragePooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (117, 'tensorflow.keras.layers.AveragePooling2D', 'layers.AveragePooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2, 2)', 'padding': '"""same"""'}), False, 'from tensorflow.keras import layers\n'), (132, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(pretrain_output_size // 4)'], {'use_bias': '(False)'}), True, 'import tensorflow as tf\n'), (133, 'tensorflow.keras.layers.LayerNormalization', 'tf.keras.layers.LayerNormalization', ([], {}), True, 'import tensorflow as tf\n'), (134, 'tensorflow.keras.layers.ReLU', 'tf.keras.layers.ReLU', ([], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['pretrain_output_size'], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['pretrain_output_size'], {'name': '"""pretrain_output"""'}), False, 'from tensorflow.keras import layers\n')] |
thisisjako/UdemyTF | ee4102391ed6bd50f764955f732f5740425a9209 | from tensorflow.keras.applications import MobileNetV2
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers.experimental.preprocessing import Rescaling
from tensorflow.keras.layers.experimental.preprocessing import Resizing
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tf_utils.callbacks import schedule_fn2
from tf_utils.dogsCatsDataAdvanced import DOGSCATS
IMAGENET_SIZE = 224
IMAGENET_DEPTH = 3
IMAGENET_SHAPE = (IMAGENET_SIZE, IMAGENET_SIZE, IMAGENET_DEPTH)
def build_model(img_shape, num_classes) -> Model:
base_model = MobileNetV2(
include_top=False,
weights="imagenet",
input_shape=IMAGENET_SHAPE
)
num_layers = len(base_model.layers)
print(f"Number of layers in the base model: {num_layers}")
fine_tune_at = num_layers - 10
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
input_img = Input(shape=img_shape)
x = Rescaling(scale=2.0, offset=-1.0)(input_img)
x = Resizing(height=IMAGENET_SIZE, width=IMAGENET_SIZE)(x)
x = base_model(x)
x = GlobalAveragePooling2D()(x)
x = Dense(units=num_classes)(x)
y_pred = Activation("softmax")(x)
model = Model(
inputs=[input_img],
outputs=[y_pred]
)
model.summary()
return model
if __name__ == "__main__":
"""
Best model from chapter 9_2: 0.9034 accuracy
Best model from chapter 9_7: 0.9614 accuracy
"""
data = DOGSCATS()
train_dataset = data.get_train_set()
val_dataset = data.get_val_set()
test_dataset = data.get_test_set()
img_shape = data.img_shape
num_classes = data.num_classes
# Global params
epochs = 100
model = build_model(
img_shape,
num_classes
)
opt = Adam(learning_rate=5e-4)
model.compile(
loss="categorical_crossentropy",
optimizer=opt,
metrics=["accuracy"]
)
lrs_callback = LearningRateScheduler(
schedule=schedule_fn2,
verbose=1
)
es_callback = EarlyStopping(
monitor="val_loss",
patience=30,
verbose=1,
restore_best_weights=True
)
model.fit(
train_dataset,
verbose=1,
epochs=epochs,
callbacks=[lrs_callback, es_callback],
validation_data=val_dataset,
)
scores = model.evaluate(
val_dataset,
verbose=0
)
print(f"Scores: {scores}")
| [
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.experimental.preprocessing.Rescaling",
"tensorflow.keras.layers.experimental.preprocessing.Resizing",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.applications.MobileNetV2",
"tensorflow.keras.layers.Input"
] | Chapter9_AdvancedDL/Chapter9_7_AdvancedTechniques2/dogsCatsTransferLearning.py | [(23, 'tensorflow.keras.applications.MobileNetV2', 'MobileNetV2', ([], {'include_top': '(False)', 'weights': '"""imagenet"""', 'input_shape': 'IMAGENET_SHAPE'}), False, 'from tensorflow.keras.applications import MobileNetV2\n'), (35, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'img_shape'}), False, 'from tensorflow.keras.layers import Input\n'), (43, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input_img]', 'outputs': '[y_pred]'}), False, 'from tensorflow.keras.models import Model\n'), (58, 'tf_utils.dogsCatsDataAdvanced.DOGSCATS', 'DOGSCATS', ([], {}), False, 'from tf_utils.dogsCatsDataAdvanced import DOGSCATS\n'), (75, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'learning_rate': '(0.0005)'}), False, 'from tensorflow.keras.optimizers import Adam\n'), (83, 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', ([], {'schedule': 'schedule_fn2', 'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), (88, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'patience': '(30)', 'verbose': '(1)', 'restore_best_weights': '(True)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), (36, 'tensorflow.keras.layers.experimental.preprocessing.Rescaling', 'Rescaling', ([], {'scale': '(2.0)', 'offset': '(-1.0)'}), False, 'from tensorflow.keras.layers.experimental.preprocessing import Rescaling\n'), (37, 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'Resizing', ([], {'height': 'IMAGENET_SIZE', 'width': 'IMAGENET_SIZE'}), False, 'from tensorflow.keras.layers.experimental.preprocessing import Resizing\n'), (39, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D\n'), (40, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': 'num_classes'}), False, 'from tensorflow.keras.layers import Dense\n'), (41, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import Activation\n')] |
typhoonzero/models-1 | a3559618a013820385f43307261ad34351da2fbf | import tensorflow as tf
class StackedBiLSTMClassifier(tf.keras.Model):
def __init__(self, feature_columns, stack_units=[32], hidden_size=64, n_classes=2):
"""StackedBiLSTMClassifier
:param feature_columns: All columns must be embedding of sequence column with same sequence_length.
:type feature_columns: list[tf.embedding_column].
:param stack_units: Units for LSTM layer.
:type stack_units: vector of ints.
:param n_classes: Target number of classes.
:type n_classes: int.
"""
super(StackedBiLSTMClassifier, self).__init__()
self.feature_layer = tf.keras.experimental.SequenceFeatures(feature_columns)
self.stack_bilstm = []
self.stack_size = len(stack_units)
self.stack_units = stack_units
self.n_classes = n_classes
if self.stack_size > 1:
for i in range(self.stack_size - 1):
self.stack_bilstm.append(
tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self.stack_units[i], return_sequences=True))
)
self.lstm = tf.keras.layers.Bidirectional(tf.keras.layers.LSTM(self.stack_units[-1]))
self.hidden = tf.keras.layers.Dense(hidden_size, activation='relu')
if self.n_classes == 2:
# special setup for binary classification
pred_act = 'sigmoid'
self._loss = 'binary_crossentropy'
else:
pred_act = 'softmax'
self._loss = 'categorical_crossentropy'
self.pred = tf.keras.layers.Dense(n_classes, activation=pred_act)
def call(self, inputs):
x, seq_len = self.feature_layer(inputs)
seq_mask = tf.sequence_mask(seq_len)
if self.stack_size > 1:
for i in range(self.stack_size - 1):
x = self.stack_bilstm[i](x, mask=seq_mask)
x = self.lstm(x, mask=seq_mask)
x = self.hidden(x)
return self.pred(x)
def optimizer(self):
"""Default optimizer name. Used in model.compile."""
return 'adam'
def loss(self):
"""Default loss function. Used in model.compile."""
return self._loss
def prepare_prediction_column(self, prediction):
"""Return the class label of highest probability."""
return prediction.argmax(axis=-1)
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.LSTM",
"tensorflow.sequence_mask",
"tensorflow.keras.experimental.SequenceFeatures"
] | sqlflow_models/lstmclassifier.py | [(15, 'tensorflow.keras.experimental.SequenceFeatures', 'tf.keras.experimental.SequenceFeatures', (['feature_columns'], {}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['hidden_size'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['n_classes'], {'activation': 'pred_act'}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.sequence_mask', 'tf.sequence_mask', (['seq_len'], {}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['self.stack_units[-1]'], {}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.keras.layers.LSTM', 'tf.keras.layers.LSTM', (['self.stack_units[i]'], {'return_sequences': '(True)'}), True, 'import tensorflow as tf\n')] |
tmkkk/fcn | e2d60fd5d54fd69f2b1d8280fe870f9af8cfda50 | import numpy as np
import tensorflow as tf
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add
from tensorflow.keras.initializers import Zeros
def build_fcn32s(nb_classes, target_size=(None, None)):
inputs = Input(shape=(*target_size, 3))
vgg = VGG16(weights='imagenet', include_top=False, input_tensor=inputs, input_shape=(*target_size, 3))
x = Conv2D(4096, (7, 7), activation='relu', padding='same')(vgg.output)
x = Dropout(0.5)(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)
x = Dropout(0.5)(x)
x = Conv2D(nb_classes, (1, 1), padding='same', kernel_initializer='he_normal')(x)
x = Conv2DTranspose(nb_classes, (64, 64), strides=(32, 32), use_bias=False, padding='same', activation='softmax', name='fcn32s-transpose')(x)
model = Model(inputs=inputs, outputs=x)
return model
def build_fcn16s(nb_classes, target_size=(None, None)):
inputs = Input(shape=(*target_size, 3))
vgg = VGG16(weights='imagenet', include_top=False, input_tensor=inputs, input_shape=(*target_size, 3))
x = Conv2D(4096, (7, 7), activation='relu', padding='same')(vgg.output)
x = Dropout(0.5)(x)
x = Conv2D(4096, (1, 1), activation='relu', padding='same')(x)
x = Dropout(0.5)(x)
x = Conv2D(nb_classes, (1, 1), padding='same', kernel_initializer='he_normal')(x)
x = Conv2DTranspose(nb_classes, (4, 4), strides=(2, 2), use_bias=False, padding='same', activation='relu', name='fcn16s-transpose-first')(x)
skip_con = Conv2D(nb_classes, (1, 1), strides=(1, 1), padding='same', bias_initializer=Zeros(), kernel_initializer=Zeros(), name='fcn16s-skip-con')(vgg.get_layer(name="block4_pool").output)
x = Add()([x, skip_con])
x = Conv2DTranspose(nb_classes, (32, 32), strides=(16, 16), use_bias=False, padding='same', activation='softmax', name='fcn16s-transpose-second')(x)
model = Model(inputs=inputs, outputs=x)
return model
| [
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Add",
"tensorflow.keras.initializers.Zeros",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Input"
] | src/models.py | [(10, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(*target_size, 3)'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (11, 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_tensor': 'inputs', 'input_shape': '(*target_size, 3)'}), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), (19, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), False, 'from tensorflow.keras.models import Model\n'), (24, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(*target_size, 3)'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (25, 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(False)', 'input_tensor': 'inputs', 'input_shape': '(*target_size, 3)'}), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), (38, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': 'x'}), False, 'from tensorflow.keras.models import Model\n'), (12, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4096)', '(7, 7)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (13, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (14, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4096)', '(1, 1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (15, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (16, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['nb_classes', '(1, 1)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (17, 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['nb_classes', '(64, 64)'], {'strides': '(32, 32)', 'use_bias': '(False)', 'padding': '"""same"""', 'activation': '"""softmax"""', 'name': '"""fcn32s-transpose"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (26, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4096)', '(7, 7)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (27, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (28, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(4096)', '(1, 1)'], {'activation': '"""relu"""', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (29, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (30, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['nb_classes', '(1, 1)'], {'padding': '"""same"""', 'kernel_initializer': '"""he_normal"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (32, 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['nb_classes', '(4, 4)'], {'strides': '(2, 2)', 'use_bias': '(False)', 'padding': '"""same"""', 'activation': '"""relu"""', 'name': '"""fcn16s-transpose-first"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (35, 'tensorflow.keras.layers.Add', 'Add', ([], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (36, 'tensorflow.keras.layers.Conv2DTranspose', 'Conv2DTranspose', (['nb_classes', '(32, 32)'], {'strides': '(16, 16)', 'use_bias': '(False)', 'padding': '"""same"""', 'activation': '"""softmax"""', 'name': '"""fcn16s-transpose-second"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, Dropout, Conv2DTranspose, Add\n'), (34, 'tensorflow.keras.initializers.Zeros', 'Zeros', ([], {}), False, 'from tensorflow.keras.initializers import Zeros\n'), (34, 'tensorflow.keras.initializers.Zeros', 'Zeros', ([], {}), False, 'from tensorflow.keras.initializers import Zeros\n')] |
aaavinash85/100-Days-of-ML- | d055d718f7972e3a4469279b9112867a42cf652f | # TensorFlow and tf.keras
import tensorflow as tf
from tensorflow import keras
# Helper libraries
import numpy as np
import matplotlib.pyplot as plt
print(tf.__version__)
fashion_mnist = keras.datasets.fashion_mnist
(train_images, train_labels), (test_images, test_labels) = fashion_mnist.load_data()
class_names = ['T-shirt/top', 'Trouser', 'Pullover', 'Dress', 'Coat',
'Sandal', 'Shirt', 'Sneaker', 'Bag', 'Ankle boot']
print(train_images.shape)
print(len(train_labels))
print(train_labels)
print(tests_images.shape)
plt.figure()
plt.imshow(train_images[0])
plt.colorbar()
plt.grid(False)
plt.show()
train_images = train_images / 255.0
test_images = test_images / 255.0
plt.figure(figsize=(10,10))
for i in range(25):
plt.subplot(5,5,i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(train_images[i], cmap=plt.cm.binary)
plt.xlabel(class_names[train_labels[i]])
plt.show()
model = keras.Sequential([
keras.layers.Flatten(input_shape=(28, 28)),
keras.layers.Dense(128, activation='relu'),
keras.layers.Dense(10)
])
model.compile(optimizer='adam',
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True),
metrics=['accuracy'])
model.fit(train_images, train_labels, epochs=10)
test_loss, test_acc = model.evaluate(test_images, test_labels, verbose=2)
print('\nTest accuracy:', test_acc)
probability_model = tf.keras.Sequential([model,
tf.keras.layers.Softmax()])
predictions = probability_model.predict(test_images)
predictions[0]
np.argmax(predictions[0])
test_labels[0]
def plot_image(i, predictions_array, true_label, img):
predictions_array, true_label, img = predictions_array, true_label[i], img[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img, cmap=plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100*np.max(predictions_array),
class_names[true_label]),
color=color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array, true_label[i]
plt.grid(False)
plt.xticks(range(10))
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color="#777777")
plt.ylim([0, 1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
i = 0
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
i = 12
plt.figure(figsize=(6,3))
plt.subplot(1,2,1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(1,2,2)
plot_value_array(i, predictions[i], test_labels)
plt.show()
# Plot the first X test images, their predicted labels, and the true labels.
# Color correct predictions in blue and incorrect predictions in red.
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions[i], test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions[i], test_labels)
plt.tight_layout()
plt.show()
| [
"matplotlib.pyplot.imshow",
"matplotlib.pyplot.tight_layout",
"matplotlib.pyplot.yticks",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"matplotlib.pyplot.ylim",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.colorbar",
"numpy.max",
"numpy.argmax",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.xticks",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Softmax",
"matplotlib.pyplot.figure"
] | Tensorflow/fashionMni1.py | [(23, 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (24, 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_images[0]'], {}), True, 'import matplotlib.pyplot as plt\n'), (25, 'matplotlib.pyplot.colorbar', 'plt.colorbar', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (26, 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), True, 'import matplotlib.pyplot as plt\n'), (27, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (32, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 10)'}), True, 'import matplotlib.pyplot as plt\n'), (40, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (65, 'numpy.argmax', 'np.argmax', (['predictions[0]'], {}), True, 'import numpy as np\n'), (101, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), True, 'import matplotlib.pyplot as plt\n'), (102, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), True, 'import matplotlib.pyplot as plt\n'), (104, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), True, 'import matplotlib.pyplot as plt\n'), (106, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (109, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(6, 3)'}), True, 'import matplotlib.pyplot as plt\n'), (110, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), True, 'import matplotlib.pyplot as plt\n'), (112, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), True, 'import matplotlib.pyplot as plt\n'), (114, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (121, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(2 * 2 * num_cols, 2 * num_rows)'}), True, 'import matplotlib.pyplot as plt\n'), (127, 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (128, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (34, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(5)', '(5)', '(i + 1)'], {}), True, 'import matplotlib.pyplot as plt\n'), (35, 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), True, 'import matplotlib.pyplot as plt\n'), (36, 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), True, 'import matplotlib.pyplot as plt\n'), (37, 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), True, 'import matplotlib.pyplot as plt\n'), (38, 'matplotlib.pyplot.imshow', 'plt.imshow', (['train_images[i]'], {'cmap': 'plt.cm.binary'}), True, 'import matplotlib.pyplot as plt\n'), (39, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['class_names[train_labels[i]]'], {}), True, 'import matplotlib.pyplot as plt\n'), (71, 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), True, 'import matplotlib.pyplot as plt\n'), (72, 'matplotlib.pyplot.xticks', 'plt.xticks', (['[]'], {}), True, 'import matplotlib.pyplot as plt\n'), (73, 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), True, 'import matplotlib.pyplot as plt\n'), (75, 'matplotlib.pyplot.imshow', 'plt.imshow', (['img'], {'cmap': 'plt.cm.binary'}), True, 'import matplotlib.pyplot as plt\n'), (77, 'numpy.argmax', 'np.argmax', (['predictions_array'], {}), True, 'import numpy as np\n'), (90, 'matplotlib.pyplot.grid', 'plt.grid', (['(False)'], {}), True, 'import matplotlib.pyplot as plt\n'), (92, 'matplotlib.pyplot.yticks', 'plt.yticks', (['[]'], {}), True, 'import matplotlib.pyplot as plt\n'), (94, 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1]'], {}), True, 'import matplotlib.pyplot as plt\n'), (95, 'numpy.argmax', 'np.argmax', (['predictions_array'], {}), True, 'import numpy as np\n'), (123, 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_rows', '(2 * num_cols)', '(2 * i + 1)'], {}), True, 'import matplotlib.pyplot as plt\n'), (125, 'matplotlib.pyplot.subplot', 'plt.subplot', (['num_rows', '(2 * num_cols)', '(2 * i + 2)'], {}), True, 'import matplotlib.pyplot as plt\n'), (43, 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': '(28, 28)'}), False, 'from tensorflow import keras\n'), (44, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow import keras\n'), (45, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {}), False, 'from tensorflow import keras\n'), (49, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.keras.layers.Softmax', 'tf.keras.layers.Softmax', ([], {}), True, 'import tensorflow as tf\n'), (84, 'numpy.max', 'np.max', (['predictions_array'], {}), True, 'import numpy as np\n')] |
xihuaiwen/chinese_bert | 631afbc76c40b0ac033be2186e717885246f446c | """Configurable model specification for CosmoFlow"""
import tensorflow as tf
import tensorflow.keras.layers as layers
from .layers import scale_1p2
def build_model(input_shape, target_size,
conv_size=16, kernel_size=2, n_conv_layers=5,
fc1_size=128, fc2_size=64,
hidden_activation='LeakyReLU',
pooling_type='MaxPool3D',
dropout=0):
"""Construct the CosmoFlow 3D CNN model"""
conv_args = dict(kernel_size=kernel_size, padding='same')
hidden_activation = getattr(layers, hidden_activation)
pooling_type = getattr(layers, pooling_type)
model = tf.keras.models.Sequential()
# First convolutional layer
model.add(layers.Conv3D(conv_size, input_shape=input_shape, **conv_args))
model.add(hidden_activation())
model.add(pooling_type(pool_size=2))
# Additional conv layers
for i in range(1, n_conv_layers):
# Double conv channels at every layer
model.add(layers.Conv3D(conv_size*2**i, **conv_args))
model.add(hidden_activation())
model.add(pooling_type(pool_size=2))
model.add(layers.Flatten())
# Fully-connected layers
model.add(layers.Dense(fc1_size))
model.add(hidden_activation())
model.add(layers.Dropout(dropout))
model.add(layers.Dense(fc2_size))
model.add(hidden_activation())
model.add(layers.Dropout(dropout))
# Output layers
model.add(layers.Dense(target_size, activation='tanh'))
model.add(layers.Lambda(scale_1p2))
return model
| [
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv3D",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
] | code_examples/tensorflow/cosmoflow/models/cosmoflow.py | [(21, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['conv_size'], {'input_shape': 'input_shape'}), True, 'import tensorflow.keras.layers as layers\n'), (34, 'tensorflow.keras.layers.Flatten', 'layers.Flatten', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (37, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['fc1_size'], {}), True, 'import tensorflow.keras.layers as layers\n'), (39, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout'], {}), True, 'import tensorflow.keras.layers as layers\n'), (40, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['fc2_size'], {}), True, 'import tensorflow.keras.layers as layers\n'), (42, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['dropout'], {}), True, 'import tensorflow.keras.layers as layers\n'), (45, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['target_size'], {'activation': '"""tanh"""'}), True, 'import tensorflow.keras.layers as layers\n'), (46, 'tensorflow.keras.layers.Lambda', 'layers.Lambda', (['scale_1p2'], {}), True, 'import tensorflow.keras.layers as layers\n'), (31, 'tensorflow.keras.layers.Conv3D', 'layers.Conv3D', (['(conv_size * 2 ** i)'], {}), True, 'import tensorflow.keras.layers as layers\n')] |
xiaoweiChen/Tensorflow-2.x-Alexnet | d9161ba6764143d3d8e84bee2268b0ac8ad95355 |
import os, pathlib, PIL
from tqdm import tqdm
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
from tensorflow.keras.models import Sequential
from tensorflow.keras import Model
class AlexNet(Model):
def __init__(self, data_shape=(224, 224, 3), num_classes=1000):
super(AlexNet, self).__init__()
self.data_augmentation = keras.Sequential(
[
layers.experimental.preprocessing.RandomFlip(
"horizontal",
input_shape=data_shape),
layers.experimental.preprocessing.RandomRotation(0.1),
layers.experimental.preprocessing.RandomZoom(0.1),
]
)
self.rescaling = layers.experimental.preprocessing.Rescaling(1./255)
# layer 1
self.conv1 = layers.Conv2D(
filters=96,
kernel_size=(11, 11),
strides=4,
padding="valid",
activation='relu',
input_shape= data_shape,
kernel_initializer='GlorotNormal')
self.pool1 = layers.MaxPooling2D(
pool_size=(3, 3),
strides=2,
padding="valid")
self.norm1 = tf.keras.layers.BatchNormalization()
# layer 2
self.conv2 = layers.Conv2D(
filters=256,
kernel_size=(5, 5),
strides=1,
padding="same",
activation='relu',
kernel_initializer='GlorotNormal')
self.pool2 = layers.MaxPooling2D(
pool_size=(3, 3),
strides=2,
padding="valid")
self.norm2 = tf.keras.layers.BatchNormalization()
# layer 3
self.conv3 = layers.Conv2D(
filters=384,
kernel_size=(3, 3),
strides=1,
padding="same",
activation='relu',
kernel_initializer='GlorotNormal')
# layer 4
self.conv4 = layers.Conv2D(
filters=384,
kernel_size=(3, 3),
strides=1,
padding="same",
activation='relu',
kernel_initializer='GlorotNormal')
# layer 5
self.conv5 = layers.Conv2D(
filters=256,
kernel_size=(3, 3),
strides=1,
padding="same",
activation='relu',
kernel_initializer='GlorotNormal')
self.pool5 = layers.MaxPooling2D(
pool_size=(3, 3),
strides=2,
padding="valid")
self.norm5 = tf.keras.layers.BatchNormalization()
# layer 6
self.flatten6 = tf.keras.layers.Flatten()
self.d6 = tf.keras.layers.Dense(
units=4096,
activation='relu')
self.drop6 = tf.keras.layers.Dropout(rate=0.5)
# layer 7
self.d7 = tf.keras.layers.Dense(
units=4096,
activation='relu')
self.drop7 = tf.keras.layers.Dropout(rate=0.5)
# layer 8
self.d8 = tf.keras.layers.Dense(
units=num_classes,
activation='softmax')
self.build((None,) + data_shape)
def call(self, x):
x = self.data_augmentation(x)
x = self.rescaling(x)
x = self.conv1(x)
x = self.pool1(x)
x = self.norm1(x)
x = self.conv2(x)
x = self.pool2(x)
x = self.norm2(x)
x = self.conv3(x)
x = self.conv4(x)
x = self.conv5(x)
x = self.pool5(x)
x = self.norm5(x)
x = self.flatten6(x)
x = self.d6(x)
x = self.drop6(x)
x = self.d7(x)
x = self.drop7(x)
x = self.d8(x)
return x
class AlexNetWork():
def __init__(self, args):
# dataset
data_dir = pathlib.Path(args.dataset_path)
self.image_height = args.image_height
self.image_width = args.image_width
data_shape = (args.image_height, args.image_width, 3)
batch_size = args.batchsize
pretrain_model_path_or_dir = args.pre_train_model_path_dir
# create model
self.model = AlexNet(
data_shape = data_shape,
num_classes=args.classes)
if os.path.exists(pretrain_model_path_or_dir):
if args.use_whole_network_model:
dir = pretrain_model_path_or_dir
self.model = keras.models.load_model(dir)
print("Whole network load from {} dir".format(dir))
else:
path = pretrain_model_path_or_dir
self.model.load_weights(path)
print("Network model load from {}".format(path))
# Optimization
self.learning_rate = args.lr
self.epochs = args.epochs
if args.opt_type == 'Adam':
self.optimizer = tf.keras.optimizers.Adam(
learning_rate=args.lr)
elif args.opt_type == 'SGD':
self.optimizer = tf.keras.optimizers.SGD(
learning_rate=args.lr,
momentum=args.momentum)
elif args.opt_type == 'Adadelta':
self.optimizer = tf.keras.optimizers.Adadelta(
learning_rate=args.lr)
elif args.opt_type == 'Adamax':
self.optimizer = tf.keras.optimizers.Adamax(
learning_rate=args.lr)
elif args.opt_type == 'Ftrl':
self.optimizer = tf.keras.optimizers.Ftrl(
learning_rate=args.lr)
elif args.opt_type == 'Nadam':
self.optimizer = tf.keras.optimizers.Nadam(
learning_rate=args.lr)
else:
self.optimizer = tf.keras.optimizers.RMSprop(
learning_rate=args.lr)
self.loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
# get the data set
image_count = 0
image_count += len(list(data_dir.glob('*/*.jpg')))
image_count += len(list(data_dir.glob('*/*.JPEG')))
print("image number:", image_count)
# train dataset
self.train_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="training",
seed=123,
image_size=(args.image_height, args.image_width),
batch_size=batch_size)
self.class_names = self.train_ds.class_names
self.train_ds = self.train_ds.cache().shuffle(1000).prefetch(buffer_size=tf.data.AUTOTUNE)
# valid/test dataset
self.test_ds = tf.keras.preprocessing.image_dataset_from_directory(
data_dir,
validation_split=0.2,
subset="validation",
seed=123,
image_size=(args.image_height, args.image_width),
batch_size=batch_size)
self.test_ds = self.test_ds.cache().prefetch(buffer_size=tf.data.AUTOTUNE)
self.train_loss = tf.keras.metrics.Mean(name='train_loss')
self.train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
self.test_loss = tf.keras.metrics.Mean(name='valid_loss')
self.test_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='vaild_accuracy')
@tf.function
def train_step(self, images, labels):
with tf.GradientTape() as tape:
predictions = self.model(images)
loss = self.loss_object(labels, predictions)
gradients = tape.gradient(loss, self.model.trainable_variables)
self.optimizer.apply_gradients(zip(gradients, self.model.trainable_variables))
self.train_loss(loss)
self.train_accuracy(labels, predictions)
# [end train_step]
@tf.function
def test_step(self, images, labels):
predictions = self.model(images)
t_loss = self.loss_object(labels, predictions)
self.test_loss(t_loss)
self.test_accuracy(labels, predictions)
# [end test_step]
def train(self):
# Model summary
self.model.summary()
for epoch in range(self.epochs):
self.train_loss.reset_states()
self.train_accuracy.reset_states()
self.test_loss.reset_states()
self.test_accuracy.reset_states()
try:
with tqdm(self.train_ds, ncols=80) as t:
for images, labels in t:
self.train_step(images, labels)
template = '[Train\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'
template = template.format(epoch+1, self.train_loss.result(), self.train_accuracy.result()*100)
t.set_description(desc=template)
except KeyboardInterrupt:
t.close()
raise
try:
with tqdm(self.test_ds, ncols=80) as t:
for test_images, test_labels in t:
self.test_step(test_images, test_labels)
template = '[Test\t Epoch {}] Loss: {:.4f}, Accuracy: {:.4f}'
template = template.format(epoch+1, self.test_loss.result(), self.test_accuracy.result()*100)
t.set_description(desc=template)
except KeyboardInterrupt:
t.close()
raise
# [end train]
def saveModel(self, path_or_dir, mode='save_weight'):
if mode == 'save_weight':
path = path_or_dir
self.model.save_weights(path)
print("Network model save to {}".format(path))
elif mode == 'whole_network':
dir = path_or_dir
self.model.save(dir)
print("Whole network save to {} dir".format(dir))
# [end saveModel]
def test(self, args):
if not os.path.exists(args.test_image):
return
image_path = args.test_image
img = keras.preprocessing.image.load_img(
image_path, target_size=(
args.image_height,
args.image_width)
)
img_array = keras.preprocessing.image.img_to_array(img)
img_array = tf.expand_dims(img_array, 0) # Create a batch
predictions = self.model.predict(img_array)
score = tf.nn.softmax(predictions[0])
import numpy as np
print("{} most likely belongs to {} with a {:.2f} percent confidence.".format(image_path, self.class_names[np.argmax(score)], 100 * np.max(score)))
# [end test]
| [
"tensorflow.keras.models.load_model",
"tensorflow.keras.optimizers.Ftrl",
"tensorflow.keras.preprocessing.image.load_img",
"tensorflow.keras.layers.experimental.preprocessing.RandomFlip",
"numpy.max",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.optimizers.Nadam",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.preprocessing.image_dataset_from_directory",
"tensorflow.keras.optimizers.Adadelta",
"tensorflow.keras.optimizers.RMSprop",
"numpy.argmax",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.preprocessing.image.img_to_array",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.layers.experimental.preprocessing.Rescaling",
"tensorflow.keras.optimizers.Adamax",
"tensorflow.keras.layers.Dense",
"tensorflow.GradientTape",
"tensorflow.nn.softmax",
"tensorflow.keras.layers.experimental.preprocessing.RandomRotation",
"tensorflow.keras.layers.experimental.preprocessing.RandomZoom",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.expand_dims",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.layers.MaxPooling2D"
] | model.py | [(24, 'tensorflow.keras.layers.experimental.preprocessing.Rescaling', 'layers.experimental.preprocessing.Rescaling', (['(1.0 / 255)'], {}), False, 'from tensorflow.keras import layers\n'), (27, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(96)', 'kernel_size': '(11, 11)', 'strides': '(4)', 'padding': '"""valid"""', 'activation': '"""relu"""', 'input_shape': 'data_shape', 'kernel_initializer': '"""GlorotNormal"""'}), False, 'from tensorflow.keras import layers\n'), (35, 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)', 'padding': '"""valid"""'}), False, 'from tensorflow.keras import layers\n'), (39, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(5, 5)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""GlorotNormal"""'}), False, 'from tensorflow.keras import layers\n'), (49, 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)', 'padding': '"""valid"""'}), False, 'from tensorflow.keras import layers\n'), (53, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(384)', 'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""GlorotNormal"""'}), False, 'from tensorflow.keras import layers\n'), (65, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(384)', 'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""GlorotNormal"""'}), False, 'from tensorflow.keras import layers\n'), (74, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'filters': '(256)', 'kernel_size': '(3, 3)', 'strides': '(1)', 'padding': '"""same"""', 'activation': '"""relu"""', 'kernel_initializer': '"""GlorotNormal"""'}), False, 'from tensorflow.keras import layers\n'), (81, 'tensorflow.keras.layers.MaxPooling2D', 'layers.MaxPooling2D', ([], {'pool_size': '(3, 3)', 'strides': '(2)', 'padding': '"""valid"""'}), False, 'from tensorflow.keras import layers\n'), (85, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(4096)', 'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': '(0.5)'}), True, 'import tensorflow as tf\n'), (95, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': '(4096)', 'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (98, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', ([], {'rate': '(0.5)'}), True, 'import tensorflow as tf\n'), (101, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', ([], {'units': 'num_classes', 'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (133, 'pathlib.Path', 'pathlib.Path', (['args.dataset_path'], {}), False, 'import os, pathlib, PIL\n'), (146, 'os.path.exists', 'os.path.exists', (['pretrain_model_path_or_dir'], {}), False, 'import os, pathlib, PIL\n'), (183, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (192, 'tensorflow.keras.preprocessing.image_dataset_from_directory', 'tf.keras.preprocessing.image_dataset_from_directory', (['data_dir'], {'validation_split': '(0.2)', 'subset': '"""training"""', 'seed': '(123)', 'image_size': '(args.image_height, args.image_width)', 'batch_size': 'batch_size'}), True, 'import tensorflow as tf\n'), (203, 'tensorflow.keras.preprocessing.image_dataset_from_directory', 'tf.keras.preprocessing.image_dataset_from_directory', (['data_dir'], {'validation_split': '(0.2)', 'subset': '"""validation"""', 'seed': '(123)', 'image_size': '(args.image_height, args.image_width)', 'batch_size': 'batch_size'}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), True, 'import tensorflow as tf\n'), (213, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""train_accuracy"""'}), True, 'import tensorflow as tf\n'), (215, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""valid_loss"""'}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""vaild_accuracy"""'}), True, 'import tensorflow as tf\n'), (290, 'tensorflow.keras.preprocessing.image.load_img', 'keras.preprocessing.image.load_img', (['image_path'], {'target_size': '(args.image_height, args.image_width)'}), False, 'from tensorflow import keras\n'), (295, 'tensorflow.keras.preprocessing.image.img_to_array', 'keras.preprocessing.image.img_to_array', (['img'], {}), False, 'from tensorflow import keras\n'), (296, 'tensorflow.expand_dims', 'tf.expand_dims', (['img_array', '(0)'], {}), True, 'import tensorflow as tf\n'), (298, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['predictions[0]'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'args.lr'}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (285, 'os.path.exists', 'os.path.exists', (['args.test_image'], {}), False, 'import os, pathlib, PIL\n'), (16, 'tensorflow.keras.layers.experimental.preprocessing.RandomFlip', 'layers.experimental.preprocessing.RandomFlip', (['"""horizontal"""'], {'input_shape': 'data_shape'}), False, 'from tensorflow.keras import layers\n'), (19, 'tensorflow.keras.layers.experimental.preprocessing.RandomRotation', 'layers.experimental.preprocessing.RandomRotation', (['(0.1)'], {}), False, 'from tensorflow.keras import layers\n'), (20, 'tensorflow.keras.layers.experimental.preprocessing.RandomZoom', 'layers.experimental.preprocessing.RandomZoom', (['(0.1)'], {}), False, 'from tensorflow.keras import layers\n'), (149, 'tensorflow.keras.models.load_model', 'keras.models.load_model', (['dir'], {}), False, 'from tensorflow import keras\n'), (164, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'args.lr', 'momentum': 'args.momentum'}), True, 'import tensorflow as tf\n'), (168, 'tensorflow.keras.optimizers.Adadelta', 'tf.keras.optimizers.Adadelta', ([], {'learning_rate': 'args.lr'}), True, 'import tensorflow as tf\n'), (251, 'tqdm.tqdm', 'tqdm', (['self.train_ds'], {'ncols': '(80)'}), False, 'from tqdm import tqdm\n'), (262, 'tqdm.tqdm', 'tqdm', (['self.test_ds'], {'ncols': '(80)'}), False, 'from tqdm import tqdm\n'), (301, 'numpy.max', 'np.max', (['score'], {}), True, 'import numpy as np\n'), (171, 'tensorflow.keras.optimizers.Adamax', 'tf.keras.optimizers.Adamax', ([], {'learning_rate': 'args.lr'}), True, 'import tensorflow as tf\n'), (301, 'numpy.argmax', 'np.argmax', (['score'], {}), True, 'import numpy as np\n'), (174, 'tensorflow.keras.optimizers.Ftrl', 'tf.keras.optimizers.Ftrl', ([], {'learning_rate': 'args.lr'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.keras.optimizers.Nadam', 'tf.keras.optimizers.Nadam', ([], {'learning_rate': 'args.lr'}), True, 'import tensorflow as tf\n'), (180, 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'learning_rate': 'args.lr'}), True, 'import tensorflow as tf\n')] |
Bodhis4ttva/LHC_Net | 8b47dff5117b078a99183afd1d103da06f37361c | from abc import ABC
import os
import random
import tensorflow as tf
import tensorflow_addons as tfa
import matplotlib.pyplot as plt
import numpy as np
from shutil import copyfile
import csv
from classification_models.tfkeras import Classifiers
import gc
def get_data(filename):
csvfile = open(filename)
reader = csv.reader(csvfile, delimiter=';')
next(reader)
data = []
for row in reader:
item = [row[0], row[2:]]
data.append(item)
images = np.zeros((len(data), 48, 48, 1), dtype='float32')
labels = np.zeros((len(data)), dtype='float32')
labels_full = np.zeros(shape=(len(data), 7), dtype='float32')
for i in range(len(data)):
images[i, :, :, :] = np.array(data[i][1]).reshape((48, 48, 1))
labels[i] = np.array(data[i][0]).astype('float32')
labels_full[i, int(labels[i])] = 1
return images, labels_full
def etl_data(path):
images, labels = get_data(path)
images = tf.image.resize(images=images, size=(224, 224), method='bilinear').numpy()
imagesRGB = np.zeros(shape=(images.shape[0], 224, 224, 3), dtype='float32')
for i in range(images.shape[0]):
imagesRGB[i, :, :, :] = tf.image.grayscale_to_rgb(tf.convert_to_tensor(images[i, :, :, :])).numpy()
return imagesRGB, labels
class cb3(tf.keras.callbacks.Callback):
def __init__(self, x, y):
self.x = x
self.y = y
self.reports = []
def on_epoch_end(self, epoch, logs={}):
report = tf.keras.metrics.CategoricalAccuracy()(self.y, self.model.predict(self.x)).numpy()
self.reports.append(report)
print("Test Accuracy", report)
print("")
return
def augment(images, params):
y = images
if params['flip']:
y = tf.image.flip_left_right(image=y)
if params['zoom'] > 0 and params['zoom'] < 1:
y = tf.image.central_crop(image=y,
central_fraction=params['zoom'])
y = tf.image.resize(images=y,
size=[images.shape[1], images.shape[2]],
method='bilinear',
preserve_aspect_ratio=False)
if params['shift_h'] != 0 or params['shift_v'] != 0:
y = tfa.image.translate(images=y,
translations=[params['shift_h'], params['shift_v']],
interpolation='bilinear',
fill_mode='nearest')
if params['rot'] != 0:
y = tfa.image.rotate(images=y,
angles=params['rot'],
interpolation='bilinear',
fill_mode='nearest')
return y
def TTA_Inference(model, x):
pred_test = model.predict(x)
zooms = [1] # 2
rotations = [0, 0.4, -0.4] # 5
shifts_h = [0, 10, -10] # 3
shifts_v = [0, 10, -10] # 3
flips = [False, True] # 2
default_prediction_weight = 3
count = default_prediction_weight
predictions = default_prediction_weight*pred_test
for i1 in range(len(zooms)):
for i2 in range(len(rotations)):
for i3 in range(len(shifts_h)):
for i4 in range(len(shifts_v)):
for i5 in range(len(flips)):
params = {'zoom': zooms[i1],
'rot': rotations[i2],
'shift_h': shifts_h[i3],
'shift_v': shifts_v[i4],
'flip': flips[i5]}
if params['zoom'] < 1 or params['rot'] != 0 or params['shift_h'] != 0 or params['shift_v'] != 0 or params['flip']:
count = count + 1
d = augment(x, params)
preds = model.predict(d, batch_size=128)
predictions = predictions + preds
del d
del preds
del params
gc.collect()
gc.collect()
gc.collect()
Params = [[0.9, 0, 0, 0, False],
[0.9, 0, 0, 0, True],
[0.9, 0.15, 0, 0, False],
[0.9, 0.15, 0, 0, True],
[0.9, -0.15, 0, 0, False],
[0.9, -0.15, 0, 0, True]]
for i in range(len(Params)):
params = {'zoom': Params[i][0],
'rot': Params[i][1],
'shift_h': Params[i][2],
'shift_v': Params[i][3],
'flip': Params[i][4]}
count = count + 1
d = augment(x, params)
preds = model.predict(d, batch_size=128)
predictions = predictions + preds
del d
del preds
del params
gc.collect()
gc.collect()
gc.collect()
predictions = predictions / count
return predictions
def Check_Unique(x):
lose = 0
for i in range(x.shape[0]):
if sum(x[i, :] == x[i, :].max()) > 1:
lose = lose + 1
return lose
| [
"tensorflow.convert_to_tensor",
"tensorflow.image.central_crop",
"tensorflow.image.flip_left_right",
"tensorflow.image.resize",
"tensorflow.keras.metrics.CategoricalAccuracy",
"numpy.array",
"numpy.zeros"
] | Lib/Utils.py | [(17, 'csv.reader', 'csv.reader', (['csvfile'], {'delimiter': '""";"""'}), False, 'import csv\n'), (36, 'numpy.zeros', 'np.zeros', ([], {'shape': '(images.shape[0], 224, 224, 3)', 'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (61, 'tensorflow.image.flip_left_right', 'tf.image.flip_left_right', ([], {'image': 'y'}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.image.central_crop', 'tf.image.central_crop', ([], {'image': 'y', 'central_fraction': "params['zoom']"}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.image.resize', 'tf.image.resize', ([], {'images': 'y', 'size': '[images.shape[1], images.shape[2]]', 'method': '"""bilinear"""', 'preserve_aspect_ratio': '(False)'}), True, 'import tensorflow as tf\n'), (72, 'tensorflow_addons.image.translate', 'tfa.image.translate', ([], {'images': 'y', 'translations': "[params['shift_h'], params['shift_v']]", 'interpolation': '"""bilinear"""', 'fill_mode': '"""nearest"""'}), True, 'import tensorflow_addons as tfa\n'), (77, 'tensorflow_addons.image.rotate', 'tfa.image.rotate', ([], {'images': 'y', 'angles': "params['rot']", 'interpolation': '"""bilinear"""', 'fill_mode': '"""nearest"""'}), True, 'import tensorflow_addons as tfa\n'), (141, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (142, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (143, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (35, 'tensorflow.image.resize', 'tf.image.resize', ([], {'images': 'images', 'size': '(224, 224)', 'method': '"""bilinear"""'}), True, 'import tensorflow as tf\n'), (27, 'numpy.array', 'np.array', (['data[i][1]'], {}), True, 'import numpy as np\n'), (28, 'numpy.array', 'np.array', (['data[i][0]'], {}), True, 'import numpy as np\n'), (38, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['images[(i), :, :, :]'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (116, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (117, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n'), (118, 'gc.collect', 'gc.collect', ([], {}), False, 'import gc\n')] |
ixcc/federated | 3fb48ae6d019ee763c5112d23c3bdbcbaea17948 | # Lint as: python3
# Copyright 2018, The TensorFlow Federated Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Example showing how to run a multi-machine simulation.
In order to run this example, you must have a running instance of the
Executor Service, either locally or on Kubernetes.
The model trains EMNIST for a small number of rounds, but uses a RemoteExecutor
to distribute the work to the ExecutorService.
"""
import collections
import warnings
from absl import app
from absl import flags
import grpc
import numpy as np
import tensorflow as tf
import tensorflow_federated as tff
tf.compat.v1.enable_v2_behavior()
FLAGS = flags.FLAGS
flags.DEFINE_string('host', None, 'The host to connect to.')
flags.mark_flag_as_required('host')
flags.DEFINE_string('port', '8000', 'The port to connect to.')
flags.DEFINE_integer('n_clients', 10, 'Number of clients.')
def preprocess(dataset):
def element_fn(element):
return collections.OrderedDict([
('x', tf.reshape(element['pixels'], [-1])),
('y', tf.reshape(element['label'], [1])),
])
return dataset.repeat(NUM_EPOCHS).map(element_fn).batch(BATCH_SIZE)
def make_federated_data(client_data, client_ids):
return [
preprocess(client_data.create_tf_dataset_for_client(x))
for x in client_ids
]
def create_compiled_keras_model():
"""Create compiled Keras model."""
model = tf.keras.models.Sequential([
tf.keras.layers.Dense(
10,
activation=tf.nn.softmax,
kernel_initializer='zeros',
input_shape=(784,))
])
model.compile(
loss=tf.keras.losses.SparseCategoricalCrossentropy(),
optimizer=tf.keras.optimizers.SGD(learning_rate=0.02),
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
NUM_EPOCHS = 10
BATCH_SIZE = 20
N_ROUNDS = 3
def make_remote_executor(inferred_cardinalities):
"""Make remote executor."""
def create_worker_stack_on(ex):
return tff.framework.LambdaExecutor(tff.framework.ConcurrentExecutor(ex))
client_ex = []
num_clients = inferred_cardinalities.get(tff.CLIENTS, None)
if num_clients:
print('Inferred that there are {} clients'.format(num_clients))
else:
print('No CLIENTS placement provided')
for _ in range(num_clients or 0):
channel = grpc.insecure_channel('{}:{}'.format(FLAGS.host, FLAGS.port))
client_ex.append(
create_worker_stack_on(
tff.framework.RemoteExecutor(channel, rpc_mode='STREAMING')))
federated_ex = tff.framework.FederatedExecutor({
None: create_worker_stack_on(tff.framework.EagerExecutor()),
tff.SERVER: create_worker_stack_on(tff.framework.EagerExecutor()),
tff.CLIENTS: client_ex,
})
return tff.framework.LambdaExecutor(federated_ex)
def main(argv):
if len(argv) > 1:
raise app.UsageError('Too many command-line arguments.')
warnings.simplefilter('ignore')
np.random.seed(0)
emnist_train, _ = tff.simulation.datasets.emnist.load_data()
sample_clients = emnist_train.client_ids[0:FLAGS.n_clients]
federated_train_data = make_federated_data(emnist_train, sample_clients)
example_dataset = emnist_train.create_tf_dataset_for_client(
emnist_train.client_ids[0])
preprocessed_example_dataset = preprocess(example_dataset)
sample_batch = tf.nest.map_structure(
lambda x: x.numpy(),
iter(preprocessed_example_dataset).next())
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
# Set the default executor to be a RemoteExecutor
tff.framework.set_default_executor(make_remote_executor)
state = iterative_process.initialize()
state, metrics = iterative_process.next(state, federated_train_data)
print('round 1, metrics={}'.format(metrics))
for round_num in range(2, N_ROUNDS + 1):
state, metrics = iterative_process.next(state, federated_train_data)
print('round {:2d}, metrics={}'.format(round_num, metrics))
if __name__ == '__main__':
app.run(main)
| [
"tensorflow.compat.v1.enable_v2_behavior",
"numpy.random.seed",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.reshape",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.optimizers.SGD"
] | tensorflow_federated/python/examples/remote_executor_example.py | [(34, 'tensorflow.compat.v1.enable_v2_behavior', 'tf.compat.v1.enable_v2_behavior', ([], {}), True, 'import tensorflow as tf\n'), (38, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""host"""', 'None', '"""The host to connect to."""'], {}), False, 'from absl import flags\n'), (39, 'absl.flags.mark_flag_as_required', 'flags.mark_flag_as_required', (['"""host"""'], {}), False, 'from absl import flags\n'), (40, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""port"""', '"""8000"""', '"""The port to connect to."""'], {}), False, 'from absl import flags\n'), (41, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""n_clients"""', '(10)', '"""Number of clients."""'], {}), False, 'from absl import flags\n'), (109, 'tensorflow_federated.framework.LambdaExecutor', 'tff.framework.LambdaExecutor', (['federated_ex'], {}), True, 'import tensorflow_federated as tff\n'), (116, 'warnings.simplefilter', 'warnings.simplefilter', (['"""ignore"""'], {}), False, 'import warnings\n'), (118, 'numpy.random.seed', 'np.random.seed', (['(0)'], {}), True, 'import numpy as np\n'), (120, 'tensorflow_federated.simulation.datasets.emnist.load_data', 'tff.simulation.datasets.emnist.load_data', ([], {}), True, 'import tensorflow_federated as tff\n'), (139, 'tensorflow_federated.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', (['model_fn'], {}), True, 'import tensorflow_federated as tff\n'), (142, 'tensorflow_federated.framework.set_default_executor', 'tff.framework.set_default_executor', (['make_remote_executor'], {}), True, 'import tensorflow_federated as tff\n'), (155, 'absl.app.run', 'app.run', (['main'], {}), False, 'from absl import app\n'), (114, 'absl.app.UsageError', 'app.UsageError', (['"""Too many command-line arguments."""'], {}), False, 'from absl import app\n'), (137, 'tensorflow_federated.learning.from_compiled_keras_model', 'tff.learning.from_compiled_keras_model', (['keras_model', 'sample_batch'], {}), True, 'import tensorflow_federated as tff\n'), (65, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax', 'kernel_initializer': '"""zeros"""', 'input_shape': '(784,)'}), True, 'import tensorflow as tf\n'), (73, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.02)'}), True, 'import tensorflow as tf\n'), (88, 'tensorflow_federated.framework.ConcurrentExecutor', 'tff.framework.ConcurrentExecutor', (['ex'], {}), True, 'import tensorflow_federated as tff\n'), (75, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (101, 'tensorflow_federated.framework.RemoteExecutor', 'tff.framework.RemoteExecutor', (['channel'], {'rpc_mode': '"""STREAMING"""'}), True, 'import tensorflow_federated as tff\n'), (104, 'tensorflow_federated.framework.EagerExecutor', 'tff.framework.EagerExecutor', ([], {}), True, 'import tensorflow_federated as tff\n'), (105, 'tensorflow_federated.framework.EagerExecutor', 'tff.framework.EagerExecutor', ([], {}), True, 'import tensorflow_federated as tff\n'), (48, 'tensorflow.reshape', 'tf.reshape', (["element['pixels']", '[-1]'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.reshape', 'tf.reshape', (["element['label']", '[1]'], {}), True, 'import tensorflow as tf\n')] |
Xodarap/models | 08bb9eb5ad79e6bceffc71aeea6af809cc78694b | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A light weight utilities to train NLP models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import json
import os
from absl import logging
import tensorflow as tf
from official.utils.misc import distribution_utils
from official.utils.misc import tpu_lib
_SUMMARY_TXT = 'training_summary.txt'
_MIN_SUMMARY_STEPS = 10
def _save_checkpoint(checkpoint, model_dir, checkpoint_prefix):
"""Saves model to with provided checkpoint prefix."""
checkpoint_path = os.path.join(model_dir, checkpoint_prefix)
saved_path = checkpoint.save(checkpoint_path)
logging.info('Saving model as TF checkpoint: %s', saved_path)
return
def _get_input_iterator(input_fn, strategy):
"""Returns distributed dataset iterator."""
# When training with TPU pods, datasets needs to be cloned across
# workers. Since Dataset instance cannot be cloned in eager mode, we instead
# pass callable that returns a dataset.
input_data = input_fn()
if callable(input_data):
iterator = iter(
strategy.experimental_distribute_datasets_from_function(input_data))
else:
iterator = iter(strategy.experimental_distribute_dataset(input_data))
return iterator
def _float_metric_value(metric):
"""Gets the value of a float-value keras metric."""
return metric.result().numpy().astype(float)
def _steps_to_run(current_step, steps_per_epoch, steps_per_loop):
"""Calculates steps to run on device."""
if steps_per_loop <= 0:
raise ValueError('steps_per_loop should be positive integer.')
if steps_per_loop == 1:
return steps_per_loop
remainder_in_epoch = current_step % steps_per_epoch
if remainder_in_epoch != 0:
return min(steps_per_epoch - remainder_in_epoch, steps_per_loop)
else:
return steps_per_loop
def _write_txt_summary(training_summary, model_dir):
"""Writes a summary text file to record stats."""
summary_path = os.path.join(model_dir, _SUMMARY_TXT)
with tf.io.gfile.GFile(summary_path, 'wb') as f:
logging.info('Training Summary: \n%s', str(training_summary))
f.write(json.dumps(training_summary, indent=4))
def run_customized_training_loop(
# pylint: disable=invalid-name
_sentinel=None,
# pylint: enable=invalid-name
strategy=None,
model_fn=None,
loss_fn=None,
model_dir=None,
train_input_fn=None,
steps_per_epoch=None,
steps_per_loop=1,
epochs=1,
eval_input_fn=None,
eval_steps=None,
metric_fn=None,
init_checkpoint=None,
use_remote_tpu=False,
custom_callbacks=None,
run_eagerly=False):
"""Run BERT pretrain model training using low-level API.
Arguments:
_sentinel: Used to prevent positional parameters. Internal, do not use.
strategy: Distribution strategy on which to run low level training loop.
model_fn: Function that returns a tuple (model, sub_model). Caller of this
function should add optimizer to the `model` via calling
`model.compile()` API or manually setting `model.optimizer` attribute.
Second element of the returned tuple(sub_model) is an optional sub model
to be used for initial checkpoint -- if provided.
loss_fn: Function with signature func(labels, logits) and returns a loss
tensor.
model_dir: Model directory used during training for restoring/saving model
weights.
train_input_fn: Function that returns a tf.data.Dataset used for training.
steps_per_epoch: Number of steps to run per epoch. At the end of each
epoch, model checkpoint will be saved and evaluation will be conducted
if evaluation dataset is provided.
steps_per_loop: Number of steps per graph-mode loop. In order to reduce
communication in eager context, training logs are printed every
steps_per_loop.
epochs: Number of epochs to train.
eval_input_fn: Function that returns evaluation dataset. If none,
evaluation is skipped.
eval_steps: Number of steps to run evaluation. Required if `eval_input_fn`
is not none.
metric_fn: A metrics function that returns a Keras Metric object to record
evaluation result using evaluation dataset or with training dataset
after every epoch.
init_checkpoint: Optional checkpoint to load to `sub_model` returned by
`model_fn`.
use_remote_tpu: If true, input pipeline ops are placed in TPU worker host
as an optimization.
custom_callbacks: A list of Keras Callbacks objects to run during
training. More specifically, `on_batch_begin()`, `on_batch_end()`,
methods are invoked during training.
run_eagerly: Whether to run model training in pure eager execution. This
should be disable for TPUStrategy.
Returns:
Trained model.
Raises:
ValueError: (1) When model returned by `model_fn` does not have optimizer
attribute or when required parameters are set to none. (2) eval args are
not specified correctly. (3) metric_fn must be a callable if specified.
"""
if _sentinel is not None:
raise ValueError('only call `run_customized_training_loop()` '
'with named arguments.')
required_arguments = [
strategy, model_fn, loss_fn, model_dir, steps_per_epoch, train_input_fn
]
if [arg for arg in required_arguments if arg is None]:
raise ValueError('`strategy`, `model_fn`, `loss_fn`, `model_dir`, '
'`steps_per_loop` and `steps_per_epoch` are required '
'parameters.')
if steps_per_loop > steps_per_epoch:
logging.error(
'steps_per_loop: %d is specified to be greater than '
' steps_per_epoch: %d, we will use steps_per_epoch as'
' steps_per_loop.', steps_per_loop, steps_per_epoch)
steps_per_loop = steps_per_epoch
assert tf.executing_eagerly()
if run_eagerly:
if steps_per_loop > 1:
raise ValueError(
'steps_per_loop is used for performance optimization. When you want '
'to run eagerly, you cannot leverage graph mode loop.')
if isinstance(strategy, tf.distribute.experimental.TPUStrategy):
raise ValueError(
'TPUStrategy should not run eagerly as it heavily replies on graph'
' optimization for the distributed system.')
if eval_input_fn and (eval_steps is None or metric_fn is None):
raise ValueError(
'`eval_step` and `metric_fn` are required when `eval_input_fn ` '
'is not none.')
if metric_fn and not callable(metric_fn):
raise ValueError(
'if `metric_fn` is specified, metric_fn must be a callable.')
total_training_steps = steps_per_epoch * epochs
# To reduce unnecessary send/receive input pipeline operation, we place input
# pipeline ops in worker task.
with tf.device(tpu_lib.get_primary_cpu_task(use_remote_tpu)):
train_iterator = _get_input_iterator(train_input_fn, strategy)
with distribution_utils.get_strategy_scope(strategy):
# To correctly place the model weights on accelerators,
# model and optimizer should be created in scope.
model, sub_model = model_fn()
if not hasattr(model, 'optimizer'):
raise ValueError('User should set optimizer attribute to model '
'inside `model_fn`.')
optimizer = model.optimizer
use_float16 = isinstance(
optimizer, tf.keras.mixed_precision.experimental.LossScaleOptimizer)
if init_checkpoint:
logging.info(
'Checkpoint file %s found and restoring from '
'initial checkpoint for core model.', init_checkpoint)
checkpoint = tf.train.Checkpoint(model=sub_model)
checkpoint.restore(init_checkpoint).assert_consumed()
logging.info('Loading from checkpoint file completed')
train_loss_metric = tf.keras.metrics.Mean(
'training_loss', dtype=tf.float32)
eval_metrics = [metric_fn()] if metric_fn else []
# If evaluation is required, make a copy of metric as it will be used by
# both train and evaluation.
train_metrics = [
metric.__class__.from_config(metric.get_config())
for metric in eval_metrics
]
# Create summary writers
eval_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, 'summaries/eval'))
if steps_per_loop >= _MIN_SUMMARY_STEPS:
# Only writes summary when the stats are collected sufficiently over
# enough steps.
train_summary_writer = tf.summary.create_file_writer(
os.path.join(model_dir, 'summaries/train'))
else:
train_summary_writer = None
# Collects training variables.
training_vars = model.trainable_variables
def _replicated_step(inputs):
"""Replicated training step."""
inputs, labels = inputs
with tf.GradientTape() as tape:
model_outputs = model(inputs, training=True)
loss = loss_fn(labels, model_outputs)
if use_float16:
scaled_loss = optimizer.get_scaled_loss(loss)
if use_float16:
scaled_grads = tape.gradient(scaled_loss, training_vars)
grads = optimizer.get_unscaled_gradients(scaled_grads)
else:
grads = tape.gradient(loss, training_vars)
optimizer.apply_gradients(zip(grads, training_vars))
# For reporting, the metric takes the mean of losses.
train_loss_metric.update_state(loss)
for metric in train_metrics:
metric.update_state(labels, model_outputs)
@tf.function
def train_steps(iterator, steps):
"""Performs distributed training steps in a loop.
Args:
iterator: the distributed iterator of training datasets.
steps: an tf.int32 integer tensor to specify number of steps to run
inside host training loop.
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
if not isinstance(steps, tf.Tensor):
raise ValueError('steps should be an Tensor. Python object may cause '
'retracing.')
for _ in tf.range(steps):
strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))
def train_single_step(iterator):
"""Performs a distributed training step.
Args:
iterator: the distributed iterator of training datasets.
Raises:
ValueError: Any of the arguments or tensor shapes are invalid.
"""
strategy.experimental_run_v2(_replicated_step, args=(next(iterator),))
def test_step(iterator):
"""Calculates evaluation metrics on distributed devices."""
def _test_step_fn(inputs):
"""Replicated accuracy calculation."""
inputs, labels = inputs
model_outputs = model(inputs, training=False)
for metric in eval_metrics:
metric.update_state(labels, model_outputs)
strategy.experimental_run_v2(_test_step_fn, args=(next(iterator),))
if not run_eagerly:
train_single_step = tf.function(train_single_step)
test_step = tf.function(test_step)
def _run_evaluation(current_training_step, test_iterator):
"""Runs validation steps and aggregate metrics."""
for _ in range(eval_steps):
test_step(test_iterator)
with eval_summary_writer.as_default():
for metric in eval_metrics + model.metrics:
metric_value = _float_metric_value(metric)
logging.info('Step: [%d] Validation %s = %f', current_training_step,
metric.name, metric_value)
tf.summary.scalar(
metric.name, metric_value, step=current_training_step)
eval_summary_writer.flush()
def _run_callbacks_on_batch_begin(batch):
"""Runs custom callbacks at the start of every step."""
if not custom_callbacks:
return
for callback in custom_callbacks:
callback.on_batch_begin(batch)
def _run_callbacks_on_batch_end(batch):
"""Runs custom callbacks at the end of every step."""
if not custom_callbacks:
return
for callback in custom_callbacks:
callback.on_batch_end(batch)
# Training loop starts here.
checkpoint = tf.train.Checkpoint(model=model, optimizer=optimizer)
latest_checkpoint_file = tf.train.latest_checkpoint(model_dir)
if latest_checkpoint_file:
logging.info(
'Checkpoint file %s found and restoring from '
'checkpoint', latest_checkpoint_file)
checkpoint.restore(latest_checkpoint_file)
logging.info('Loading from checkpoint file completed')
current_step = optimizer.iterations.numpy()
checkpoint_name = 'ctl_step_{step}.ckpt'
while current_step < total_training_steps:
# Training loss/metric are taking average over steps inside micro
# training loop. We reset the their values before each round.
train_loss_metric.reset_states()
for metric in train_metrics + model.metrics:
metric.reset_states()
_run_callbacks_on_batch_begin(current_step)
# Runs several steps in the host while loop.
steps = _steps_to_run(current_step, steps_per_epoch, steps_per_loop)
if steps == 1:
# TODO(zongweiz): merge with train_steps once tf.while_loop
# GPU performance bugs are fixed.
train_single_step(train_iterator)
else:
# Converts steps to a Tensor to avoid tf.function retracing.
train_steps(train_iterator,
tf.convert_to_tensor(steps, dtype=tf.int32))
_run_callbacks_on_batch_end(current_step)
current_step += steps
train_loss = _float_metric_value(train_loss_metric)
# Updates training logging.
training_status = 'Train Step: %d/%d / loss = %s' % (
current_step, total_training_steps, train_loss)
if train_summary_writer:
with train_summary_writer.as_default():
tf.summary.scalar(
train_loss_metric.name, train_loss, step=current_step)
for metric in train_metrics + model.metrics:
metric_value = _float_metric_value(metric)
training_status += ' %s = %f' % (metric.name, metric_value)
tf.summary.scalar(metric.name, metric_value, step=current_step)
train_summary_writer.flush()
logging.info(training_status)
# Saves model checkpoints and run validation steps at every epoch end.
if current_step % steps_per_epoch == 0:
# To avoid repeated model saving, we do not save after the last
# step of training.
if current_step < total_training_steps:
_save_checkpoint(checkpoint, model_dir,
checkpoint_name.format(step=current_step))
if eval_input_fn:
logging.info('Running evaluation after step: %s.', current_step)
_run_evaluation(current_step,
_get_input_iterator(eval_input_fn, strategy))
# Re-initialize evaluation metric.
for metric in eval_metrics + model.metrics:
metric.reset_states()
_save_checkpoint(checkpoint, model_dir,
checkpoint_name.format(step=current_step))
if eval_input_fn:
logging.info('Running final evaluation after training is complete.')
_run_evaluation(current_step,
_get_input_iterator(eval_input_fn, strategy))
training_summary = {
'total_training_steps': total_training_steps,
'train_loss': _float_metric_value(train_loss_metric),
}
if eval_metrics:
# TODO(hongkuny): Cleans up summary reporting in text.
training_summary['last_train_metrics'] = _float_metric_value(
train_metrics[0])
training_summary['eval_metrics'] = _float_metric_value(eval_metrics[0])
_write_txt_summary(training_summary, model_dir)
return model
| [
"tensorflow.convert_to_tensor",
"tensorflow.executing_eagerly",
"tensorflow.train.latest_checkpoint",
"tensorflow.range",
"tensorflow.train.Checkpoint",
"tensorflow.io.gfile.GFile",
"tensorflow.function",
"tensorflow.summary.scalar",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
] | official/modeling/model_training_utils.py | [(36, 'os.path.join', 'os.path.join', (['model_dir', 'checkpoint_prefix'], {}), False, 'import os\n'), (38, 'absl.logging.info', 'logging.info', (['"""Saving model as TF checkpoint: %s"""', 'saved_path'], {}), False, 'from absl import logging\n'), (77, 'os.path.join', 'os.path.join', (['model_dir', '_SUMMARY_TXT'], {}), False, 'import os\n'), (167, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (78, 'tensorflow.io.gfile.GFile', 'tf.io.gfile.GFile', (['summary_path', '"""wb"""'], {}), True, 'import tensorflow as tf\n'), (162, 'absl.logging.error', 'logging.error', (['"""steps_per_loop: %d is specified to be greater than steps_per_epoch: %d, we will use steps_per_epoch as steps_per_loop."""', 'steps_per_loop', 'steps_per_epoch'], {}), False, 'from absl import logging\n'), (80, 'json.dumps', 'json.dumps', (['training_summary'], {'indent': '(4)'}), False, 'import json\n'), (191, 'official.utils.misc.tpu_lib.get_primary_cpu_task', 'tpu_lib.get_primary_cpu_task', (['use_remote_tpu'], {}), False, 'from official.utils.misc import tpu_lib\n'), (194, 'official.utils.misc.distribution_utils.get_strategy_scope', 'distribution_utils.get_strategy_scope', (['strategy'], {}), False, 'from official.utils.misc import distribution_utils\n'), (213, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', (['"""training_loss"""'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (334, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'model', 'optimizer': 'optimizer'}), True, 'import tensorflow as tf\n'), (335, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['model_dir'], {}), True, 'import tensorflow as tf\n'), (206, 'absl.logging.info', 'logging.info', (['"""Checkpoint file %s found and restoring from initial checkpoint for core model."""', 'init_checkpoint'], {}), False, 'from absl import logging\n'), (209, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'model': 'sub_model'}), True, 'import tensorflow as tf\n'), (211, 'absl.logging.info', 'logging.info', (['"""Loading from checkpoint file completed"""'], {}), False, 'from absl import logging\n'), (225, 'os.path.join', 'os.path.join', (['model_dir', '"""summaries/eval"""'], {}), False, 'import os\n'), (274, 'tensorflow.range', 'tf.range', (['steps'], {}), True, 'import tensorflow as tf\n'), (302, 'tensorflow.function', 'tf.function', (['train_single_step'], {}), True, 'import tensorflow as tf\n'), (303, 'tensorflow.function', 'tf.function', (['test_step'], {}), True, 'import tensorflow as tf\n'), (337, 'absl.logging.info', 'logging.info', (['"""Checkpoint file %s found and restoring from checkpoint"""', 'latest_checkpoint_file'], {}), False, 'from absl import logging\n'), (341, 'absl.logging.info', 'logging.info', (['"""Loading from checkpoint file completed"""'], {}), False, 'from absl import logging\n'), (382, 'absl.logging.info', 'logging.info', (['training_status'], {}), False, 'from absl import logging\n'), (404, 'absl.logging.info', 'logging.info', (['"""Running final evaluation after training is complete."""'], {}), False, 'from absl import logging\n'), (230, 'os.path.join', 'os.path.join', (['model_dir', '"""summaries/train"""'], {}), False, 'import os\n'), (241, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (313, 'absl.logging.info', 'logging.info', (['"""Step: [%d] Validation %s = %f"""', 'current_training_step', 'metric.name', 'metric_value'], {}), False, 'from absl import logging\n'), (315, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['metric.name', 'metric_value'], {'step': 'current_training_step'}), True, 'import tensorflow as tf\n'), (364, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['steps'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['train_loss_metric.name', 'train_loss'], {'step': 'current_step'}), True, 'import tensorflow as tf\n'), (393, 'absl.logging.info', 'logging.info', (['"""Running evaluation after step: %s."""', 'current_step'], {}), False, 'from absl import logging\n'), (380, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['metric.name', 'metric_value'], {'step': 'current_step'}), True, 'import tensorflow as tf\n')] |
nick-monto/EARShot_TF2 | c1628344b668a5f7ef4bb763e49432b8780c93eb | from numpy import power,min,max,floor
import tensorflow as tf
from tensorflow.keras import Model, Sequential, optimizers, losses
from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM
from tensorflow.keras.callbacks import LearningRateScheduler
tf.keras.backend.set_floatx('float64')
'''
Learning rate adjustment functions.
'''
def noam_decay_lr(warmup):
'''
Wrapper to define noam decay; the wrapper method allows us to make the
lr update depend on additional parameters.
The maximal learning rate under this scheme occurs at epoch = warmup, and
will be equal to initial/warmup.
'''
def schedule(epoch, lr):
# learning scheduler takes current epoch and lr as passes
lrate = lr*power(warmup,-0.5)*min([(epoch+1)*power(warmup,-1.5),power(epoch+1,-0.5)])
return lrate
return LearningRateScheduler(schedule)
def step_decay_lr(initial_lr, drop_factor,drop_every):
'''
Wrapper that just drops the learning rate by a fixed factor (drop_factor) every drop_every
epochs.
'''
def schedule(epoch):
exp_fac = floor((1+epoch)/drop_every)
lrate = initial_lr*power(drop_factor,exp_fac)
return lrate
return LearningRateScheduler(schedule)
def polynomial_decay_lr(max_epochs,poly_pow):
'''
Wrapper that drops the learning rate to zero over max_epochs epochs, with
shape given by poly_pow (set poly_pow = 1 to get linear decay).
'''
def schedule(epoch, lr):
decay = power((1 - (epoch/max_epochs)),poly_pow)
lrate = lr*decay
return lrate
return LearningRateScheduler(schedule)
def constant_lr(initial):
'''
Wrapper that just clamps the learning rate at the initial value forever.
'''
def schedule(epoch):
return initial
return LearningRateScheduler(schedule)
class EARSHOT(Model):
'''
EARSHOT model sub-classing tf.keras.Model
'''
def __init__(self, output_len, model_parameters):
'''
output_len = length of target vector
model_parameters = model hyper parameters pulled from parameters.py
'''
super(EARSHOT, self).__init__(name='earshot')
self.model_parameters = model_parameters
self.mask = Masking(mask_value=-9999, name="mask")
if self.model_parameters.hidden['type'] == "LSTM":
self.hidden = LSTM(self.model_parameters.hidden['size'],
return_sequences=True, stateful=False,
name="LSTM")
elif self.model_parameters.hidden['type'] == "GRU":
self.hidden = GRU(self.model_parameters.hidden['size'],
return_sequences=True, name="GRU")
# loss function and output activation are coupled, this sets them both
if self.model_parameters.train_loss == 'CE':
self.loss = losses.BinaryCrossentropy(from_logits=True)
self.activation = tf.nn.sigmoid
elif self.model_parameters.train_loss == 'MSE':
self.loss = losses.MeanSquaredError()
self.activation = tf.nn.tanh
# set learning rate schedule
if list(self.model_parameters.learning_schedule.keys())[0] == 'noam':
self.lr_sched = noam_decay_lr(self.model_parameters.learning_schedule['noam']['warmup'])
lr = self.model_parameters.learning_schedule['noam']['initial']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'constant':
self.lr_sched = constant_lr(self.model_parameters.learning_schedule['constant']['rate'])
lr = self.model_parameters.learning_schedule['constant']['rate']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'polynomial':
self.lr_sched = polynomial_decay_lr(self.model_parameters.learning_schedule['polynomial']['max_epochs'],
self.model_parameters.learning_schedule['polynomial']['poly_pow'])
lr = self.model_parameters.learning_schedule['polynomial']['initial']
elif list(self.model_parameters.learning_schedule.keys())[0] == 'step':
self.lr_sched = step_decay_lr(self.model_parameters.learning_schedule['step']['initial'],
self.model_parameters.learning_schedule['step']['drop_factor'],
self.model_parameters.learning_schedule['step']['drop_every'])
lr = self.model_parameters.learning_schedule['step']['initial']
# optimizer
if list(self.model_parameters.optimizer.keys())[0] == 'ADAM':
self.optimizer = tf.keras.optimizers.Adam(learning_rate=lr, **self.model_parameters.optimizer['ADAM'])
elif list(self.model_parameters.optimizer.keys())[0] == 'SGD':
self.optimizer = tf.keras.optimizers.SGD(learning_rate=lr, **self.model_parameters.optimizer['SGD'])
self.dense_output = Dense(output_len, activation=self.activation)
def call(self, inputs):
'''
Input is provided at training time.
'''
x = self.mask(inputs)
x = self.hidden(x)
return self.dense_output(x)
def model(self, input_shape):
'''
Function for model introspection
'''
x = Input(input_shape)
return Model(inputs=[x], outputs=self.call(x))
| [
"tensorflow.keras.layers.Masking",
"numpy.power",
"tensorflow.keras.backend.set_floatx",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.keras.losses.BinaryCrossentropy",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.optimizers.Adam",
"numpy.floor",
"tensorflow.keras.layers.Input"
] | earshot/model.py | [(7, 'tensorflow.keras.backend.set_floatx', 'tf.keras.backend.set_floatx', (['"""float64"""'], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['schedule'], {}), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), (37, 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['schedule'], {}), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), (50, 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['schedule'], {}), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), (60, 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['schedule'], {}), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), (33, 'numpy.floor', 'floor', (['((1 + epoch) / drop_every)'], {}), False, 'from numpy import power, min, max, floor\n'), (46, 'numpy.power', 'power', (['(1 - epoch / max_epochs)', 'poly_pow'], {}), False, 'from numpy import power, min, max, floor\n'), (75, 'tensorflow.keras.layers.Masking', 'Masking', ([], {'mask_value': '(-9999)', 'name': '"""mask"""'}), False, 'from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM\n'), (116, 'tensorflow.keras.layers.Dense', 'Dense', (['output_len'], {'activation': 'self.activation'}), False, 'from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM\n'), (132, 'tensorflow.keras.layers.Input', 'Input', (['input_shape'], {}), False, 'from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM\n'), (34, 'numpy.power', 'power', (['drop_factor', 'exp_fac'], {}), False, 'from numpy import power, min, max, floor\n'), (78, 'tensorflow.keras.layers.LSTM', 'LSTM', (["self.model_parameters.hidden['size']"], {'return_sequences': '(True)', 'stateful': '(False)', 'name': '"""LSTM"""'}), False, 'from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM\n'), (87, 'tensorflow.keras.losses.BinaryCrossentropy', 'losses.BinaryCrossentropy', ([], {'from_logits': '(True)'}), False, 'from tensorflow.keras import Model, Sequential, optimizers, losses\n'), (112, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr'}), True, 'import tensorflow as tf\n'), (22, 'numpy.power', 'power', (['warmup', '(-0.5)'], {}), False, 'from numpy import power, min, max, floor\n'), (82, 'tensorflow.keras.layers.GRU', 'GRU', (["self.model_parameters.hidden['size']"], {'return_sequences': '(True)', 'name': '"""GRU"""'}), False, 'from tensorflow.keras.layers import Dense, GRU, Input, Masking, LSTM\n'), (90, 'tensorflow.keras.losses.MeanSquaredError', 'losses.MeanSquaredError', ([], {}), False, 'from tensorflow.keras import Model, Sequential, optimizers, losses\n'), (114, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'lr'}), True, 'import tensorflow as tf\n'), (22, 'numpy.power', 'power', (['(epoch + 1)', '(-0.5)'], {}), False, 'from numpy import power, min, max, floor\n'), (22, 'numpy.power', 'power', (['warmup', '(-1.5)'], {}), False, 'from numpy import power, min, max, floor\n')] |
epochlab/xres | 38df268f92efea6ec55909cbe87d3d089977cd88 | #!/usr/bin/env python3
from tensorflow.keras import Input, Model
from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten
def edsr_residual(x, num_filters, scaling):
res = Conv2D(num_filters, 3, padding='same', activation='relu')(x)
res = Conv2D(num_filters, 3, padding='same')(res)
if scaling:
res = Lambda(lambda t: t * scaling)(res)
res = Add()([res, x])
return res
def upsampling_block(model, num_filters, kernal_size, strides):
model = Conv2D(num_filters, kernal_size, strides, padding="same")(model)
model = UpSampling2D(size=2)(model)
model = PReLU(alpha_initializer='zeros', alpha_regularizer=None, alpha_constraint=None, shared_axes=[1,2])(model)
return model
def gridless_upsampling(model, num_filters, scale):
def upsample(x, factor):
x = UpSampling2D(size=factor)(model)
x = Conv2D(num_filters, 3, padding='same')(x)
return x
if scale==2:
model = upsample(model, 2)
elif scale==3:
model = upsample(model, 3)
elif scale==4:
model = upsample(model, 2)
model = upsample(model, 2)
return model
def build_edsr(input_shape, scale, num_filters, residual_blocks, res_block_scaling=0.1):
input_layer = Input(shape=input_shape)
gen1 = Conv2D(num_filters, kernel_size=9, padding='same')(input_layer)
res = edsr_residual(gen1, num_filters, res_block_scaling)
for i in range(residual_blocks - 1):
res = edsr_residual(res, num_filters, res_block_scaling)
gen2 = Conv2D(num_filters, kernel_size=3, padding='same')(res)
model = Add()([gen2, gen1])
for index in range(2):
model = upsampling_block(model, num_filters, 3, 1)
# model = gridless_upsampling(model, num_filters, scale)
output = Conv2D(3, 9, padding='same')(model)
output = Activation('tanh')(output)
model = Model(inputs=[input_layer], outputs=[output], name='edsr_generator')
return model
def discriminator_block(model, num_filters, kernel_size, strides):
model = Conv2D(num_filters, kernel_size, strides, padding='same')(model)
model = BatchNormalization(momentum=0.5)(model)
model = LeakyReLU(alpha=0.2)(model)
return model
def build_discriminator(input_shape, num_filters=64):
input_layer = Input(shape=input_shape)
dis1 = Conv2D(num_filters, 3, padding='same')(input_layer)
dis1 = LeakyReLU(alpha=0.2)(dis1)
dis2 = discriminator_block(dis1, num_filters, 3, 2)
dis3 = discriminator_block(dis2, num_filters * 2, 3, 1)
dis4 = discriminator_block(dis3, num_filters * 2, 3, 2)
dis5 = discriminator_block(dis4, num_filters * 4, 3, 1)
dis6 = discriminator_block(dis5, num_filters * 4, 3, 2)
dis7 = discriminator_block(dis6, num_filters * 8, 3, 1)
dis8 = discriminator_block(dis7, num_filters * 8, 3, 2)
dis9 = Flatten()(dis8)
dis9 = Dense(1024)(dis9)
dis9 = LeakyReLU(alpha=0.2)(dis9)
output = Dense(units=1)(dis9)
output = Activation('sigmoid')(output)
model = Model(inputs=[input_layer], outputs=[output], name='discriminator')
return model
| [
"tensorflow.keras.layers.Activation",
"tensorflow.keras.Input",
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.PReLU",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.Model",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Add",
"tensorflow.keras.layers.Flatten"
] | model/edsr.py | [(39, 'tensorflow.keras.Input', 'Input', ([], {'shape': 'input_shape'}), False, 'from tensorflow.keras import Input, Model\n'), (58, 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[input_layer]', 'outputs': '[output]', 'name': '"""edsr_generator"""'}), False, 'from tensorflow.keras import Input, Model\n'), (68, 'tensorflow.keras.Input', 'Input', ([], {'shape': 'input_shape'}), False, 'from tensorflow.keras import Input, Model\n'), (91, 'tensorflow.keras.Model', 'Model', ([], {'inputs': '[input_layer]', 'outputs': '[output]', 'name': '"""discriminator"""'}), False, 'from tensorflow.keras import Input, Model\n'), (7, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3)'], {'padding': '"""same"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (8, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (13, 'tensorflow.keras.layers.Add', 'Add', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (17, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters', 'kernal_size', 'strides'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (18, 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': '(2)'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (19, 'tensorflow.keras.layers.PReLU', 'PReLU', ([], {'alpha_initializer': '"""zeros"""', 'alpha_regularizer': 'None', 'alpha_constraint': 'None', 'shared_axes': '[1, 2]'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (41, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters'], {'kernel_size': '(9)', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (47, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters'], {'kernel_size': '(3)', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (48, 'tensorflow.keras.layers.Add', 'Add', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (55, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(3)', '(9)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (56, 'tensorflow.keras.layers.Activation', 'Activation', (['"""tanh"""'], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (62, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters', 'kernel_size', 'strides'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (63, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.5)'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (64, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (70, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (71, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (84, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (85, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (86, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (88, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (89, 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (11, 'tensorflow.keras.layers.Lambda', 'Lambda', (['(lambda t: t * scaling)'], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (24, 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', ([], {'size': 'factor'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n'), (25, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['num_filters', '(3)'], {'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, BatchNormalization, UpSampling2D, Activation, LeakyReLU, PReLU, Add, Dense, Flatten\n')] |
rahhul/GANs | cec9e2f81528099407b8a9d3dce2f1cf85e449be | # python3
import util
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.initializers import RandomNormal, glorot_normal, VarianceScaling
# Discriminator model
def discriminator_model(in_shape=(32, 32, 3)):
init = glorot_normal()
model = Sequential()
model.add(Conv2D(64, (3,3), padding='same', kernel_initializer=init, input_shape=in_shape))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), kernel_initializer=init, padding='same'))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(128, (3,3), strides=(2,2), kernel_initializer=init, padding='same'))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# downsample
model.add(Conv2D(256, (3,3), strides=(2,2), kernel_initializer=init, padding='same'))
# model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.2))
# classifier
model.add(Flatten())
model.add(Dropout(0.4))
model.add(Dense(1, activation='sigmoid'))
# compile model
model.compile(loss='binary_crossentropy',
optimizer=Adam(lr=2e-4, beta_1=0.5),
metrics=['accuracy'])
return model
| [
"tensorflow.keras.layers.LeakyReLU",
"tensorflow.keras.initializers.glorot_normal",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
] | cifar10/trainer/discriminator.py | [(15, 'tensorflow.keras.initializers.glorot_normal', 'glorot_normal', ([], {}), False, 'from tensorflow.keras.initializers import RandomNormal, glorot_normal, VarianceScaling\n'), (16, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (17, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""', 'kernel_initializer': 'init', 'input_shape': 'in_shape'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (19, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (21, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(2, 2)', 'kernel_initializer': 'init', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (23, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (25, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)', '(3, 3)'], {'strides': '(2, 2)', 'kernel_initializer': 'init', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (27, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (29, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(3, 3)'], {'strides': '(2, 2)', 'kernel_initializer': 'init', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (31, 'tensorflow.keras.layers.LeakyReLU', 'LeakyReLU', ([], {'alpha': '(0.2)'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (33, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (34, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.4)'], {}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (35, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras.layers import Dense, Conv2D, Dropout, Flatten, LeakyReLU\n'), (38, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.0002)', 'beta_1': '(0.5)'}), False, 'from tensorflow.keras.optimizers import Adam\n')] |
MFrassek/CommittorEAE | 88a467e4500bc9ab69834209f4eaec9f2d0d7a61 | import numpy as np
import math
from matplotlib import cm
from matplotlib.colors import ListedColormap
from losses import binaryNegLikelihood
from tensorflow import keras
from data_read import get_toy_paths, get_TPS_and_TIS_paths
class Const():
def __init__(self, dataSetType):
self._dataSetType = dataSetType
if dataSetType == "DW" or dataSetType == "ZP":
self._name_to_list_position = {
"x_{1}": 0, "x_{2}": 1, "x_{3}": 2, "x_{4}": 3, "x_{5}": 4,
"x_{6}": 5, "x_{7}": 6, "x_{8}": 7, "x_{9}": 8, "x_{10}": 9}
self._used_variable_names = [
"x_{1}", "x_{2}", "x_{3}", "x_{4}", "x_{5}"]
# Name of the folder in which the toy data is found
self._toy_folder_name = dataSetType
self._path_getter_function = get_toy_paths
elif dataSetType == "MH":
self._name_to_list_position = {
"MCG": 0, "N_{w,4}": 1, "N_{w,3}": 2, "N_{w,2}": 3,
"N_{sw,3-4}": 4, "N_{sw,2-3}": 5, "F4": 6, "R_g": 7,
"5^{12}6^{2}": 8, "5^{12}": 9, "CR": 10, "N_{s,2}": 11,
"N_{s,3}": 12, "N_{c,2}": 13, "N_{c,3}": 14, "N_{s,4}": 15,
"N_{c,4}": 16, "5^{12}6^{3}": 17, "5^{12}6^{4}": 18,
"4^{1}5^{10}6^{2}": 19, "4^{1}5^{10}6^{3}": 20,
"4^{1}5^{10}6^{4}": 21}
self._used_variable_names = [
"MCG", "N_{w,4}", "N_{w,3}", "N_{w,2}", "N_{sw,3-4}",
"N_{sw,2-3}", "F4", "R_g", "5^{12}6^{2}", "5^{12}", "CR",
"N_{s,2}", "N_{s,3}", "N_{c,2}", "N_{c,3}", "N_{s,4}",
"N_{c,4}", "5^{12}6^{3}", "5^{12}6^{4}", "4^{1}5^{10}6^{2}",
"4^{1}5^{10}6^{3}", "4^{1}5^{10}6^{4}"]
# Name of the folder in which the TIS data is found
self._TIS_folder_name = "RPE_org"
self._TIS_highest_interface_name = "mcg100"
# Name of the folder in which the TPS paths are found
self._TPS_folder_name = "TPS"
# MCG threshold below which a snapshot belongs to state A
self._mcg_A = 18
# MCG threshold above which a snapshot belongs to state B
self._mcg_B = 120
self._path_getter_function = get_TPS_and_TIS_paths
# Fraction of paths used from the read files
self._used_dataset_fraction = 1
self._used_name_to_list_position = {
self._used_variable_names[i]: i
for i in range(len(self._used_variable_names))}
self._used_list_positions = [
self._name_to_list_position[name]
for name in self._used_variable_names]
# Labels assigned to the four types of paths
self._AA_label = 0.0
self._AB_label = 1.0
self._BA_label = 0.0
self._BB_label = 1.0
# Precision to which data is rounded
self._precision = 2
# List of labels to keep
self._keep_labels = ["AA", "AB", "BA", "BB"]
# Ratio of training set compared to the whole dataset
self._train_ratio = 0.6
# Ratio of validation set compared to whole dataset
self._val_ratio = 0.1
# Fraction of most extreme values that are considered
# outliers to both sides
self._outlier_cutoff = 0.02
# Number of bins to balance the pBs
self._balance_bins = 10
"""System parameters"""
# Number of cores used
self._cores_used = 2
"""Tf-Dataset parameters"""
# set size of batches
self._batch_size = 64
"""Model parameters"""
# Number of bottleneck nodes
self._bottleneck_size = 1
# Factor of hidden layer nodes relative to input nodes
self._node_mult = 4
# Number ob hidden layers in the encoder
self._encoder_hidden = 4
# Number ob hidden layers in the decoder_1
self._decoder_1_hidden = 4
# Number ob hidden layers in the decoder_2
self._decoder_2_hidden = 4
# Activation function in the encoder
self._encoder_act_func = "tanh"
# Activation function in the decoder_1
self._decoder_1_act_func = "sigmoid"
# Activation function in the decoder_2
self._decoder_2_act_func = "tanh"
# Ratio of weights for label and reconstruction loss
self._loss_weights = [1, 0.1]
# Names of input and output in the model.
self._input_name = "Input"
self._output_name_1 = "Committor"
self._output_name_2 = "Reconstruction"
# List off losses determined by the model.
self._loss_names = ["total", self._output_name_1, self._output_name_2]
# Loss functions used for the autoencoder_1
self._loss_function_1 = binaryNegLikelihood
# Loss functions used for the autoencoder_2
self._loss_function_2 = keras.losses.MeanAbsoluteError()
# Number of epochs used for model training
self._epochs = 10
"""Visualization parameters"""
# Resolution for the calc_* and plot_* functions
self._resolution = 25
# Sub-figure size for the plot_* functions
# self._subfig_size = 5
self._subfig_size = 2
# Lower bondary for a logarithmic colormap
self._logvmin = 10**(-4)
# Colormap used for the heat map plots
self._label_cmap = make_banded_label_colormap(self._logvmin)
# Colormap used for the desnity plots
self._density_cmap = make_density_colormap()
# List of colors for plt.plots
self._plt_colors = [
"c", "g", "r", "indigo", "y", "m",
"k", "lightpink", "orange", "olive", "b", "darkviolet"]
self._projection_steps = 20
self._unprojection_steps = 11
@property
def dataSetType(self):
return self._dataSetType
@property
def name_to_list_position(self):
return self._name_to_list_position
@property
def used_variable_names(self):
return self._used_variable_names
@property
def used_name_to_list_position(self):
return self._used_name_to_list_position
@property
def used_list_positions(self):
return self._used_list_positions
@property
def path_getter_function(self):
return self._path_getter_function
@property
def toy_folder_name(self):
return self._toy_folder_name
@property
def TIS_folder_name(self):
return self._TIS_folder_name
@property
def TIS_highest_interface_name(self):
return self._TIS_highest_interface_name
@property
def TPS_folder_name(self):
return self._TPS_folder_name
@property
def mcg_A(self):
return self._mcg_A
@property
def mcg_B(self):
return self._mcg_B
@property
def used_dataset_fraction(self):
return self._used_dataset_fraction
@property
def AA_label(self):
return self._AA_label
@property
def AB_label(self):
return self._AB_label
@property
def BA_label(self):
return self._BA_label
@property
def BB_label(self):
return self._BB_label
@property
def min_label(self):
return min(
self._AA_label, self._AB_label, self._BA_label, self._BB_label)
@property
def max_label(self):
return max(
self._AA_label, self._AB_label, self._BA_label, self._BB_label)
@property
def precision(self):
return self._precision
@property
def keep_labels(self):
return self._keep_labels
@property
def train_ratio(self):
assert isinstance(self._train_ratio, float) \
and self._train_ratio > 0.0, \
"train_ratio needs to be a float higher than 0.0"
return self._train_ratio
@property
def val_ratio(self):
assert isinstance(self._val_ratio, float) \
and self._val_ratio > 0.0, \
"val_ratio needs to be a float higher than 0.0"
return self._val_ratio
@property
def outlier_cutoff(self):
return self._outlier_cutoff
@property
def balance_bins(self):
return self._balance_bins
@property
def cores_used(self):
return self._cores_used
@property
def batch_size(self):
return self._batch_size
@property
def bottleneck_size(self):
return self._bottleneck_size
@property
def node_mult(self):
return self._node_mult
@property
def encoder_hidden(self):
return self._encoder_hidden
@property
def decoder_1_hidden(self):
return self._decoder_1_hidden
@property
def decoder_2_hidden(self):
return self._decoder_2_hidden
@property
def encoder_act_func(self):
return self._encoder_act_func
@property
def decoder_1_act_func(self):
return self._decoder_1_act_func
@property
def decoder_2_act_func(self):
return self._decoder_2_act_func
@property
def loss_weights(self):
return self._loss_weights
@property
def label_loss_weight(self):
return self._loss_weights[0]
@property
def reconstruction_loss_weight(self):
return self._loss_weights[1]
@property
def input_name(self):
return self._input_name
@property
def output_name_1(self):
return self._output_name_1
@property
def output_name_2(self):
return self._output_name_2
@property
def loss_names(self):
return self._loss_names
@property
def loss_type_cnt(self):
return len(self._loss_names)
@property
def loss_function_1(self):
return self._loss_function_1
@property
def loss_function_2(self):
return self._loss_function_2
@property
def epochs(self):
return self._epochs
@property
def resolution(self):
return self._resolution
@property
def subfig_size(self):
return self._subfig_size
@property
def logvmin(self):
return self._logvmin
@property
def label_cmap(self):
return self._label_cmap
@property
def density_cmap(self):
return self._density_cmap
@property
def plt_colors(self):
return self._plt_colors
@property
def projection_steps(self):
return self._projection_steps
@property
def unprojection_steps(self):
return self._unprojection_steps
@property
def data_stamp(self):
return f"kl{'_'.join(self._keep_labels)}_oc{self._outlier_cutoff}"
@property
def model_stamp(self):
return f"bn{self._bottleneck_size}_{self._node_mult}*"\
+ f"({self._encoder_hidden}{self._encoder_act_func}+"\
+ f"{self._decoder_1_hidden}{self._decoder_1_act_func}|"\
+ f"{self._decoder_2_hidden}{self._decoder_2_act_func})_"\
+ f"lw{self._loss_weights[0]}:{self._loss_weights[1]}_"\
+ f"e{self._epochs}"
# Define setter methods for all variables that can be changed.
@used_variable_names.setter
def used_variable_names(self, x):
assert isinstance(x, list), "Can only be set to type list"
self._used_variable_names = x
self._used_name_to_list_position = {
self._used_variable_names[i]: i
for i in range(len(self._used_variable_names))}
self._used_list_positions = [
self._name_to_list_position[name]
for name in self._used_variable_names]
@bottleneck_size.setter
def bottleneck_size(self, x):
assert isinstance(x, int), "Can only be set to type int"
self._bottleneck_size = x
@epochs.setter
def epochs(self, x):
assert isinstance(x, int), "Can only be set to type int"
self._epochs = x
def make_banded_label_colormap(logvmin):
resolution = 1001
bandwidth = 0.1
band_bottom_fraction = \
translate_value_to_colormap_fraction(0.5 - bandwidth / 2, logvmin)
band_bottom_index = round(band_bottom_fraction * resolution)
band_top_fraction = \
translate_value_to_colormap_fraction(0.5 + bandwidth / 2, logvmin)
band_top_index = round(band_top_fraction * resolution)
bottom_map = cm.get_cmap("summer", resolution)
cut_bottom_map = bottom_map(np.linspace(
0, 1 - band_bottom_fraction, resolution - band_bottom_index))
middle_map = cm.get_cmap("Greys", 10)
cut_middle_map = middle_map(np.linspace(
0.9, 1.0, band_bottom_index - band_top_index))
top_map = cm.get_cmap("summer", resolution)
cut_top_map = top_map(np.linspace(
1 - band_top_fraction, 1, band_top_index))
c_map = ListedColormap(np.vstack((
cut_bottom_map, cut_middle_map, cut_top_map)), "SplitSummer")
return c_map
def translate_value_to_colormap_fraction(value, logvmin):
return math.log(value, 10)/math.log(logvmin, 10)
def make_density_colormap():
resolution = 1001
cmap = cm.get_cmap("autumn", resolution)
return cmap
| [
"numpy.vstack",
"tensorflow.keras.losses.MeanAbsoluteError",
"numpy.linspace",
"matplotlib.cm.get_cmap"
] | NucleationModel/globalConstants.py | [(403, 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""summer"""', 'resolution'], {}), False, 'from matplotlib import cm\n'), (406, 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""Greys"""', '(10)'], {}), False, 'from matplotlib import cm\n'), (409, 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""summer"""', 'resolution'], {}), False, 'from matplotlib import cm\n'), (423, 'matplotlib.cm.get_cmap', 'cm.get_cmap', (['"""autumn"""', 'resolution'], {}), False, 'from matplotlib import cm\n'), (111, 'tensorflow.keras.losses.MeanAbsoluteError', 'keras.losses.MeanAbsoluteError', ([], {}), False, 'from tensorflow import keras\n'), (404, 'numpy.linspace', 'np.linspace', (['(0)', '(1 - band_bottom_fraction)', '(resolution - band_bottom_index)'], {}), True, 'import numpy as np\n'), (407, 'numpy.linspace', 'np.linspace', (['(0.9)', '(1.0)', '(band_bottom_index - band_top_index)'], {}), True, 'import numpy as np\n'), (410, 'numpy.linspace', 'np.linspace', (['(1 - band_top_fraction)', '(1)', 'band_top_index'], {}), True, 'import numpy as np\n'), (412, 'numpy.vstack', 'np.vstack', (['(cut_bottom_map, cut_middle_map, cut_top_map)'], {}), True, 'import numpy as np\n'), (418, 'math.log', 'math.log', (['value', '(10)'], {}), False, 'import math\n'), (418, 'math.log', 'math.log', (['logvmin', '(10)'], {}), False, 'import math\n')] |
wilcoln/transformers | 6331d4fe59e85840bb5693837e791f4caedcd53b | # coding=utf-8
# Copyright 2019 HuggingFace Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import copy
import inspect
import os
import random
import tempfile
import unittest
from importlib import import_module
from typing import List, Tuple
from transformers import is_tf_available
from transformers.testing_utils import _tf_gpu_memory_limit, is_pt_tf_cross_test, require_tf, slow
if is_tf_available():
import numpy as np
import tensorflow as tf
from transformers import (
TF_MODEL_FOR_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_MASKED_LM_MAPPING,
TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING,
TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING,
TF_MODEL_FOR_PRETRAINING_MAPPING,
TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING,
TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING,
TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING,
TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING,
TFSharedEmbeddings,
tf_top_k_top_p_filtering,
)
if _tf_gpu_memory_limit is not None:
gpus = tf.config.list_physical_devices("GPU")
for gpu in gpus:
# Restrict TensorFlow to only allocate x GB of memory on the GPUs
try:
tf.config.set_logical_device_configuration(
gpu, [tf.config.LogicalDeviceConfiguration(memory_limit=_tf_gpu_memory_limit)]
)
logical_gpus = tf.config.list_logical_devices("GPU")
print("Logical GPUs", logical_gpus)
except RuntimeError as e:
# Virtual devices must be set before GPUs have been initialized
print(e)
def _config_zero_init(config):
configs_no_init = copy.deepcopy(config)
for key in configs_no_init.__dict__.keys():
if "_range" in key or "_std" in key:
setattr(configs_no_init, key, 0.0)
return configs_no_init
@require_tf
class TFModelTesterMixin:
model_tester = None
all_model_classes = ()
all_generative_model_classes = ()
test_resize_embeddings = True
is_encoder_decoder = False
def _prepare_for_class(self, inputs_dict, model_class, return_labels=False) -> dict:
inputs_dict = copy.deepcopy(inputs_dict)
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict = {
k: tf.tile(tf.expand_dims(v, 1), (1, self.model_tester.num_choices) + (1,) * (v.ndim - 1))
if isinstance(v, tf.Tensor) and v.ndim > 0
else v
for k, v in inputs_dict.items()
}
if return_labels:
if model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
inputs_dict["labels"] = tf.ones(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values():
inputs_dict["start_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
inputs_dict["end_positions"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values():
inputs_dict["labels"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values():
inputs_dict["next_sentence_label"] = tf.zeros(self.model_tester.batch_size, dtype=tf.int32)
elif model_class in [
*TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values(),
*TF_MODEL_FOR_CAUSAL_LM_MAPPING.values(),
*TF_MODEL_FOR_MASKED_LM_MAPPING.values(),
*TF_MODEL_FOR_PRETRAINING_MAPPING.values(),
*TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values(),
]:
inputs_dict["labels"] = tf.zeros(
(self.model_tester.batch_size, self.model_tester.seq_length), dtype=tf.int32
)
return inputs_dict
def test_initialization(self):
pass
def test_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
after_outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assert_outputs_same(after_outputs, outputs)
def test_graph_mode(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
inputs = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
@tf.function
def run_in_graph_mode():
return model(inputs)
outputs = run_in_graph_mode()
self.assertIsNotNone(outputs)
def test_forward_signature(self):
config, _ = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
signature = inspect.signature(model.call)
# signature.parameters is an OrderedDict => so arg_names order is deterministic
arg_names = [*signature.parameters.keys()]
if model.config.is_encoder_decoder:
expected_arg_names = [
"input_ids",
"attention_mask",
"decoder_input_ids",
"decoder_attention_mask",
]
expected_arg_names.extend(
["head_mask", "decoder_head_mask", "encoder_outputs"]
if "head_mask" and "decoder_head_mask" in arg_names
else ["encoder_outputs"]
)
self.assertListEqual(arg_names[: len(expected_arg_names)], expected_arg_names)
else:
expected_arg_names = ["input_ids"]
self.assertListEqual(arg_names[:1], expected_arg_names)
def test_saved_model_creation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = False
config.output_attentions = False
if hasattr(config, "use_cache"):
config.use_cache = False
model_class = self.all_model_classes[0]
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_creation_extended(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
config.output_attentions = True
if hasattr(config, "use_cache"):
config.use_cache = True
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
model = model_class(config)
model(class_inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=True)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
self.assertTrue(os.path.exists(saved_model_dir))
@slow
def test_saved_model_with_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_hidden_states = True
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
# A saved model is always executed in graph mode, since we merged the PR #8777
# the booleans in graph mode are always the ones in the config, then we update
# the use_cache property if it exists in order to have similar booleans with the inputs
if "use_cache" in class_inputs_dict:
config.use_cache = class_inputs_dict.pop("use_cache")
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname)
saved_model_dir = os.path.join(tmpdirname, "saved_model")
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_hidden_states"] if isinstance(outputs, dict) else outputs[-1]
else:
output = outputs["hidden_states"] if isinstance(outputs, dict) else outputs[-1]
hidden_states = [t.numpy() for t in output]
self.assertEqual(len(outputs), num_out)
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
@slow
def test_saved_model_with_attentions_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.output_attentions = True
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
for model_class in self.all_model_classes:
class_inputs_dict = self._prepare_for_class(inputs_dict, model_class)
# A saved model is always executed in graph mode, since we merged the PR #8777
# the booleans in graph mode are always the ones in the config, then we update
# the use_cache property if it exists in order to have similar booleans with the inputs
if "use_cache" in class_inputs_dict:
config.use_cache = class_inputs_dict.pop("use_cache")
model = model_class(config)
num_out = len(model(class_inputs_dict))
with tempfile.TemporaryDirectory() as tmpdirname:
saved_model_dir = os.path.join(tmpdirname, "saved_model")
model.save_pretrained(saved_model_dir)
model = tf.keras.models.load_model(saved_model_dir)
outputs = model(class_inputs_dict)
if self.is_encoder_decoder:
output = outputs["encoder_attentions"] if isinstance(outputs, dict) else outputs[-1]
else:
output = outputs["attentions"] if isinstance(outputs, dict) else outputs[-1]
attentions = [t.numpy() for t in output]
self.assertEqual(len(outputs), num_out)
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
def test_keras_save_load(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(99, 32, name="shared")
config.use_cache = inputs_dict.pop("use_cache", None)
main_layer = main_layer_class(config, embed_tokens=shared)
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
model = tf.keras.Model(symbolic_inputs, outputs=main_layer(symbolic_inputs))
outputs = model(inputs_dict)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
after_outputs = model(inputs_dict)
self.assert_outputs_same(after_outputs, outputs)
def assert_outputs_same(self, after_outputs, outputs):
# Make sure we don't have nans
if isinstance(after_outputs, tf.Tensor):
out_1 = after_outputs.numpy()
elif isinstance(after_outputs, dict):
out_1 = after_outputs[list(after_outputs.keys())[0]].numpy()
else:
out_1 = after_outputs[0].numpy()
out_2 = outputs[0].numpy()
self.assertEqual(out_1.shape, out_2.shape)
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
@is_pt_tf_cross_test
def test_pt_tf_model_equivalence(self):
import torch
import transformers
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
pt_model_class_name = model_class.__name__[2:] # Skip the "TF" at the beginning
pt_model_class = getattr(transformers, pt_model_class_name)
config.output_hidden_states = True
tf_model = model_class(config)
pt_model = pt_model_class(config)
# Check we can load pt model in tf and vice-versa with model => model functions
tf_model = transformers.load_pytorch_model_in_tf2_model(
tf_model, pt_model, tf_inputs=self._prepare_for_class(inputs_dict, model_class)
)
pt_model = transformers.load_tf2_model_in_pytorch_model(pt_model, tf_model)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
pt_inputs_dict[name] = key
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class), training=False)
tf_hidden_states = tfo[0].numpy()
pt_hidden_states = pto[0].numpy()
tf_nans = np.copy(np.isnan(tf_hidden_states))
pt_nans = np.copy(np.isnan(pt_hidden_states))
pt_hidden_states[tf_nans] = 0
tf_hidden_states[tf_nans] = 0
pt_hidden_states[pt_nans] = 0
tf_hidden_states[pt_nans] = 0
max_diff = np.amax(np.abs(tf_hidden_states - pt_hidden_states))
self.assertLessEqual(max_diff, 4e-2)
# Check we can load pt model in tf and vice-versa with checkpoint => model functions
with tempfile.TemporaryDirectory() as tmpdirname:
pt_checkpoint_path = os.path.join(tmpdirname, "pt_model.bin")
torch.save(pt_model.state_dict(), pt_checkpoint_path)
tf_model = transformers.load_pytorch_checkpoint_in_tf2_model(tf_model, pt_checkpoint_path)
tf_checkpoint_path = os.path.join(tmpdirname, "tf_model.h5")
tf_model.save_weights(tf_checkpoint_path)
pt_model = transformers.load_tf2_checkpoint_in_pytorch_model(pt_model, tf_checkpoint_path)
# Check predictions on first output (logits/hidden-states) are close enought given low-level computational differences
pt_model.eval()
pt_inputs_dict = {}
for name, key in self._prepare_for_class(inputs_dict, model_class).items():
if type(key) == bool:
key = np.array(key, dtype=bool)
pt_inputs_dict[name] = torch.from_numpy(key).to(torch.long)
else:
pt_inputs_dict[name] = torch.from_numpy(key.numpy()).to(torch.long)
# need to rename encoder-decoder "inputs" for PyTorch
if "inputs" in pt_inputs_dict and self.is_encoder_decoder:
pt_inputs_dict["input_ids"] = pt_inputs_dict.pop("inputs")
with torch.no_grad():
pto = pt_model(**pt_inputs_dict)
tfo = tf_model(self._prepare_for_class(inputs_dict, model_class))
tfo = tfo[0].numpy()
pto = pto[0].numpy()
tf_nans = np.copy(np.isnan(tfo))
pt_nans = np.copy(np.isnan(pto))
pto[tf_nans] = 0
tfo[tf_nans] = 0
pto[pt_nans] = 0
tfo[pt_nans] = 0
max_diff = np.amax(np.abs(tfo - pto))
self.assertLessEqual(max_diff, 4e-2)
def test_train_pipeline_custom_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
tf_main_layer_classes = set(
module_member
for model_class in self.all_model_classes
for module in (import_module(model_class.__module__),)
for module_member_name in dir(module)
if module_member_name.endswith("MainLayer")
for module_member in (getattr(module, module_member_name),)
if isinstance(module_member, type)
and tf.keras.layers.Layer in module_member.__bases__
and getattr(module_member, "_keras_serializable", False)
)
for main_layer_class in tf_main_layer_classes:
# T5MainLayer needs an embed_tokens parameter when called without the inputs_embeds parameter
if "T5" in main_layer_class.__name__:
# Take the same values than in TFT5ModelTester for this shared layer
shared = TFSharedEmbeddings(self.model_tester.vocab_size, self.model_tester.hidden_size, name="shared")
config.use_cache = False
main_layer = main_layer_class(config, embed_tokens=shared)
del inputs_dict["use_cache"]
else:
main_layer = main_layer_class(config)
symbolic_inputs = {
name: tf.keras.Input(tensor.shape[1:], dtype=tensor.dtype) for name, tensor in inputs_dict.items()
}
if hasattr(self.model_tester, "num_labels"):
num_labels = self.model_tester.num_labels
else:
num_labels = 2
X = tf.data.Dataset.from_tensor_slices(
(inputs_dict, np.ones((self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)))
).batch(1)
hidden_states = main_layer(symbolic_inputs)[0]
outputs = tf.keras.layers.Dense(num_labels, activation="softmax", name="outputs")(hidden_states)
model = tf.keras.models.Model(inputs=symbolic_inputs, outputs=[outputs])
model.compile(loss="binary_crossentropy", optimizer="adam", metrics=["binary_accuracy"])
model.fit(X, epochs=1)
with tempfile.TemporaryDirectory() as tmpdirname:
filepath = os.path.join(tmpdirname, "keras_model.h5")
model.save(filepath)
if "T5" in main_layer_class.__name__:
model = tf.keras.models.load_model(
filepath,
custom_objects={
main_layer_class.__name__: main_layer_class,
"TFSharedEmbeddings": TFSharedEmbeddings,
},
)
else:
model = tf.keras.models.load_model(
filepath, custom_objects={main_layer_class.__name__: main_layer_class}
)
assert isinstance(model, tf.keras.Model)
model(inputs_dict)
def test_compile_tf_model(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
max_input = getattr(self.model_tester, "max_position_embeddings", 512)
optimizer = tf.keras.optimizers.Adam(learning_rate=3e-5, epsilon=1e-08, clipnorm=1.0)
loss = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
metric = tf.keras.metrics.SparseCategoricalAccuracy("accuracy")
for model_class in self.all_model_classes:
if self.is_encoder_decoder:
input_ids = {
"decoder_input_ids": tf.keras.Input(
batch_shape=(2, max_input),
name="decoder_input_ids",
dtype="int32",
),
"input_ids": tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32"),
}
elif model_class in TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values():
input_ids = tf.keras.Input(batch_shape=(4, 2, max_input), name="input_ids", dtype="int32")
else:
input_ids = tf.keras.Input(batch_shape=(2, max_input), name="input_ids", dtype="int32")
# Prepare our model
model = model_class(config)
model(self._prepare_for_class(inputs_dict, model_class)) # Model must be called before saving.
# Let's load it from the disk to be sure we can use pretrained weights
with tempfile.TemporaryDirectory() as tmpdirname:
model.save_pretrained(tmpdirname, saved_model=False)
model = model_class.from_pretrained(tmpdirname)
outputs_dict = model(input_ids)
hidden_states = outputs_dict[0]
# Add a dense layer on top to test integration with other keras modules
outputs = tf.keras.layers.Dense(2, activation="softmax", name="outputs")(hidden_states)
# Compile extended model
extended_model = tf.keras.Model(inputs=[input_ids], outputs=[outputs])
extended_model.compile(optimizer=optimizer, loss=loss, metrics=[metric])
def test_keyword_and_dict_args(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
outputs_dict = model(inputs)
inputs_keywords = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
input_ids = inputs_keywords.pop("input_ids", None)
outputs_keywords = model(input_ids, **inputs_keywords)
output_dict = outputs_dict[0].numpy()
output_keywords = outputs_keywords[0].numpy()
self.assertLess(np.sum(np.abs(output_dict - output_keywords)), 1e-6)
def test_attention_outputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
config.return_dict = True
decoder_seq_length = getattr(self.model_tester, "decoder_seq_length", self.model_tester.seq_length)
encoder_seq_length = getattr(self.model_tester, "encoder_seq_length", self.model_tester.seq_length)
decoder_key_length = getattr(self.model_tester, "key_length", decoder_seq_length)
encoder_key_length = getattr(self.model_tester, "key_length", encoder_seq_length)
def check_decoder_attentions_output(outputs):
out_len = len(outputs)
self.assertEqual(out_len % 2, 0)
decoder_attentions = outputs.decoder_attentions
self.assertEqual(len(decoder_attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(decoder_attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, decoder_seq_length, decoder_key_length],
)
def check_encoder_attentions_output(outputs):
attentions = [
t.numpy() for t in (outputs.encoder_attentions if config.is_encoder_decoder else outputs.attentions)
]
self.assertEqual(len(attentions), self.model_tester.num_hidden_layers)
self.assertListEqual(
list(attentions[0].shape[-3:]),
[self.model_tester.num_attention_heads, encoder_seq_length, encoder_key_length],
)
for model_class in self.all_model_classes:
inputs_dict["output_attentions"] = True
inputs_dict["use_cache"] = False
config.output_hidden_states = False
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
out_len = len(outputs)
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
if self.is_encoder_decoder:
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_decoder_attentions_output(outputs)
# Check that output attentions can also be changed via the config
del inputs_dict["output_attentions"]
config.output_attentions = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(config.output_hidden_states, False)
check_encoder_attentions_output(outputs)
# Check attention is always last and order is fine
inputs_dict["output_attentions"] = True
config.output_hidden_states = True
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
self.assertEqual(out_len + (2 if self.is_encoder_decoder else 1), len(outputs))
self.assertEqual(model.config.output_hidden_states, True)
check_encoder_attentions_output(outputs)
def test_hidden_states_output(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_hidden_states_output(config, inputs_dict, model_class):
model = model_class(config)
outputs = model(self._prepare_for_class(inputs_dict, model_class))
expected_num_layers = getattr(
self.model_tester, "expected_num_hidden_layers", self.model_tester.num_hidden_layers + 1
)
if model.config.is_encoder_decoder:
encoder_hidden_states = outputs.encoder_hidden_states
decoder_hidden_states = outputs.decoder_hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(encoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(encoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
self.assertEqual(len(decoder_hidden_states), expected_num_layers)
self.assertListEqual(
list(decoder_hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
else:
hidden_states = outputs.hidden_states
self.assertEqual(config.output_attentions, False)
self.assertEqual(len(hidden_states), expected_num_layers)
self.assertListEqual(
list(hidden_states[0].shape[-2:]),
[self.model_tester.seq_length, self.model_tester.hidden_size],
)
for model_class in self.all_model_classes:
inputs_dict["output_hidden_states"] = True
check_hidden_states_output(config, inputs_dict, model_class)
del inputs_dict["output_hidden_states"]
config.output_hidden_states = True
check_hidden_states_output(config, inputs_dict, model_class)
def test_model_common_attributes(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
list_lm_models = (
list(TF_MODEL_FOR_CAUSAL_LM_MAPPING.values())
+ list(TF_MODEL_FOR_MASKED_LM_MAPPING.values())
+ list(TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values())
)
for model_class in self.all_model_classes:
model = model_class(config)
assert isinstance(model.get_input_embeddings(), tf.keras.layers.Layer)
if model_class in list_lm_models:
x = model.get_output_embeddings()
assert isinstance(x, tf.keras.layers.Layer)
name = model.get_bias()
assert isinstance(name, dict)
for k, v in name.items():
assert isinstance(v, tf.Variable)
else:
x = model.get_output_embeddings()
assert x is None
name = model.get_bias()
assert name is None
def test_determinism(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
first, second = (
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
model(self._prepare_for_class(inputs_dict, model_class), training=False)[0],
)
out_1 = first.numpy()
out_2 = second.numpy()
out_1 = out_1[~np.isnan(out_1)]
out_2 = out_2[~np.isnan(out_2)]
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def test_model_outputs_equivalence(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def check_equivalence(model, tuple_inputs, dict_inputs, additional_kwargs={}):
tuple_output = model(tuple_inputs, return_dict=False, **additional_kwargs)
dict_output = model(dict_inputs, return_dict=True, **additional_kwargs).to_tuple()
def recursive_check(tuple_object, dict_object):
if isinstance(tuple_object, (List, Tuple)):
for tuple_iterable_value, dict_iterable_value in zip(tuple_object, dict_object):
recursive_check(tuple_iterable_value, dict_iterable_value)
elif tuple_object is None:
return
else:
self.assertTrue(
all(tf.equal(tuple_object, dict_object)),
msg=f"Tuple and dict output are not equal. Difference: {tf.math.reduce_max(tf.abs(tuple_object - dict_object))}",
)
recursive_check(tuple_output, dict_output)
for model_class in self.all_model_classes:
model = model_class(config)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs)
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class)
dict_inputs = self._prepare_for_class(inputs_dict, model_class)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_hidden_states": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(model, tuple_inputs, dict_inputs, {"output_attentions": True})
tuple_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
dict_inputs = self._prepare_for_class(inputs_dict, model_class, return_labels=True)
check_equivalence(
model, tuple_inputs, dict_inputs, {"output_hidden_states": True, "output_attentions": True}
)
def _get_embeds(self, wte, input_ids):
# ^^ In our TF models, the input_embeddings can take slightly different forms,
# so we try a few of them.
# We used to fall back to just synthetically creating a dummy tensor of ones:
try:
x = wte(input_ids, mode="embedding")
except Exception:
try:
x = wte([input_ids], mode="embedding")
except Exception:
try:
x = wte([input_ids, None, None, None], mode="embedding")
except Exception:
if hasattr(self.model_tester, "embedding_size"):
x = tf.ones(
input_ids.shape + [self.model_tester.embedding_size],
dtype=tf.dtypes.float32,
)
else:
x = tf.ones(
input_ids.shape + [self.model_tester.hidden_size],
dtype=tf.dtypes.float32,
)
return x
def test_inputs_embeds(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
inputs = copy.deepcopy(self._prepare_for_class(inputs_dict, model_class))
if not self.is_encoder_decoder:
input_ids = inputs["input_ids"]
del inputs["input_ids"]
else:
encoder_input_ids = inputs["input_ids"]
decoder_input_ids = inputs.get("decoder_input_ids", encoder_input_ids)
del inputs["input_ids"]
inputs.pop("decoder_input_ids", None)
wte = model.get_input_embeddings()
if not self.is_encoder_decoder:
inputs["inputs_embeds"] = self._get_embeds(wte, input_ids)
else:
inputs["inputs_embeds"] = self._get_embeds(wte, encoder_input_ids)
inputs["decoder_inputs_embeds"] = self._get_embeds(wte, decoder_input_ids)
model(inputs)
def test_numpy_arrays_inputs(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def prepare_numpy_arrays(inputs_dict):
inputs_np_dict = {}
for k, v in inputs_dict.items():
if tf.is_tensor(v):
inputs_np_dict[k] = v.numpy()
else:
inputs_np_dict[k] = np.array(k)
return inputs_np_dict
for model_class in self.all_model_classes:
model = model_class(config)
inputs = self._prepare_for_class(inputs_dict, model_class)
inputs_np = prepare_numpy_arrays(inputs)
model(inputs_np)
def test_resize_token_embeddings(self):
if not self.test_resize_embeddings:
return
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
def _get_word_embedding_weight(model, embedding_layer):
if hasattr(embedding_layer, "word_embeddings"):
return embedding_layer.word_embeddings
elif hasattr(embedding_layer, "weight"):
return embedding_layer.weight
elif hasattr(embedding_layer, "decoder"):
return embedding_layer.decoder
else:
# Here we build the word embeddings weights if not exists.
# And then we retry to get the attribute once built.
model(model.dummy_inputs)
if hasattr(embedding_layer, "word_embeddings"):
return embedding_layer.word_embeddings
elif hasattr(embedding_layer, "weight"):
return embedding_layer.weight
elif hasattr(embedding_layer, "decoder"):
return embedding_layer.decoder
else:
return None
for model_class in self.all_model_classes:
for size in [config.vocab_size - 10, config.vocab_size + 10, None]:
# build the embeddings
model = model_class(config=config)
old_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
old_bias = model.get_bias()
old_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# reshape the embeddings
model.resize_token_embeddings(size)
new_input_embeddings = _get_word_embedding_weight(model, model.get_input_embeddings())
new_bias = model.get_bias()
new_output_embeddings = _get_word_embedding_weight(model, model.get_output_embeddings())
# check that the resized embeddings size matches the desired size.
assert_size = size if size is not None else config.vocab_size
self.assertEqual(new_input_embeddings.shape[0], assert_size)
# check that weights remain the same after resizing
models_equal = True
for p1, p2 in zip(old_input_embeddings.value(), new_input_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_bias is not None and new_bias is not None:
for old_weight, new_weight in zip(old_bias.values(), new_bias.values()):
self.assertEqual(new_weight.shape[0], assert_size)
models_equal = True
for p1, p2 in zip(old_weight.value(), new_weight.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
if old_output_embeddings is not None and new_output_embeddings is not None:
self.assertEqual(new_output_embeddings.shape[0], assert_size)
self.assertEqual(new_output_embeddings.shape[1], old_output_embeddings.shape[1])
models_equal = True
for p1, p2 in zip(old_output_embeddings.value(), new_output_embeddings.value()):
if tf.math.reduce_sum(tf.math.abs(p1 - p2)) > 0:
models_equal = False
self.assertTrue(models_equal)
def test_lm_head_model_random_no_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
# iterate over all generative models
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids
with self.assertRaises(AssertionError):
model.generate(do_sample=True, max_length=5)
# num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5))
with self.assertRaises(AssertionError):
# generating multiple sequences when no beam search generation
# is not allowed as it would always generate the same sequences
model.generate(input_ids, do_sample=False, num_return_sequences=2)
# num_return_sequences > 1, sample
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=True, bad_words_ids=bad_words_ids, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_lm_head_model_random_beam_search_generate(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
input_ids = inputs_dict["input_ids"]
for model_class in self.all_generative_model_classes:
model = model_class(config)
if config.bos_token_id is None:
# if bos token id is not defined mobel needs input_ids, num_return_sequences = 1
self._check_generated_ids(model.generate(input_ids, do_sample=True, num_beams=2))
else:
# num_return_sequences = 1
self._check_generated_ids(model.generate(do_sample=True, max_length=5, num_beams=2))
with self.assertRaises(AssertionError):
# generating more sequences than having beams leads is not possible
model.generate(input_ids, do_sample=False, num_return_sequences=3, num_beams=2)
# num_return_sequences > 1, sample
self._check_generated_ids(
model.generate(
input_ids,
do_sample=True,
num_beams=2,
num_return_sequences=2,
)
)
# num_return_sequences > 1, greedy
self._check_generated_ids(model.generate(input_ids, do_sample=False, num_beams=2, num_return_sequences=2))
# check bad words tokens language generation
# create list of 1-seq bad token and list of 2-seq of bad tokens
bad_words_ids = [self._generate_random_bad_tokens(1, model), self._generate_random_bad_tokens(2, model)]
output_tokens = model.generate(
input_ids, do_sample=False, bad_words_ids=bad_words_ids, num_beams=2, num_return_sequences=2
)
# only count generated tokens
generated_ids = output_tokens[:, input_ids.shape[-1] :]
self.assertFalse(self._check_match_tokens(generated_ids.numpy().tolist(), bad_words_ids))
def test_loss_computation(self):
config, inputs_dict = self.model_tester.prepare_config_and_inputs_for_common()
for model_class in self.all_model_classes:
model = model_class(config)
if getattr(model, "compute_loss", None):
# The number of elements in the loss should be the same as the number of elements in the label
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
added_label = prepared_for_class[
sorted(list(prepared_for_class.keys() - inputs_dict.keys()), reverse=True)[0]
]
loss_size = tf.size(added_label)
if model.__class__ in TF_MODEL_FOR_CAUSAL_LM_MAPPING.values():
# if loss is causal lm loss, labels are shift, so that one label per batch
# is cut
loss_size = loss_size - self.model_tester.batch_size
# Test that model correctly compute the loss with kwargs
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
input_ids = prepared_for_class.pop("input_ids")
loss = model(input_ids, **prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a dict
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
loss = model(prepared_for_class)[0]
self.assertEqual(loss.shape, [loss_size])
# Test that model correctly compute the loss with a tuple
prepared_for_class = self._prepare_for_class(inputs_dict.copy(), model_class, return_labels=True)
# Get keys that were added with the _prepare_for_class function
label_keys = prepared_for_class.keys() - inputs_dict.keys()
signature = inspect.signature(model.call).parameters
signature_names = list(signature.keys())
# Create a dictionary holding the location of the tensors in the tuple
tuple_index_mapping = {0: "input_ids"}
for label_key in label_keys:
label_key_index = signature_names.index(label_key)
tuple_index_mapping[label_key_index] = label_key
sorted_tuple_index_mapping = sorted(tuple_index_mapping.items())
# Initialize a list with their default values, update the values and convert to a tuple
list_input = []
for name in signature_names:
if name != "kwargs":
list_input.append(signature[name].default)
for index, value in sorted_tuple_index_mapping:
list_input[index] = prepared_for_class[value]
tuple_input = tuple(list_input)
# Send to model
loss = model(tuple_input[:-1])[0]
self.assertEqual(loss.shape, [loss_size])
def _generate_random_bad_tokens(self, num_bad_tokens, model):
# special tokens cannot be bad tokens
special_tokens = []
if model.config.bos_token_id is not None:
special_tokens.append(model.config.bos_token_id)
if model.config.pad_token_id is not None:
special_tokens.append(model.config.pad_token_id)
if model.config.eos_token_id is not None:
special_tokens.append(model.config.eos_token_id)
# create random bad tokens that are not special tokens
bad_tokens = []
while len(bad_tokens) < num_bad_tokens:
token = tf.squeeze(ids_tensor((1, 1), self.model_tester.vocab_size), 0).numpy()[0]
if token not in special_tokens:
bad_tokens.append(token)
return bad_tokens
def _check_generated_ids(self, output_ids):
for token_id in output_ids[0].numpy().tolist():
self.assertGreaterEqual(token_id, 0)
self.assertLess(token_id, self.model_tester.vocab_size)
def _check_match_tokens(self, generated_ids, bad_words_ids):
# for all bad word tokens
for bad_word_ids in bad_words_ids:
# for all slices in batch
for generated_ids_slice in generated_ids:
# for all word idx
for i in range(len(bad_word_ids), len(generated_ids_slice)):
# if tokens match
if generated_ids_slice[i - len(bad_word_ids) : i] == bad_word_ids:
return True
return False
def ids_tensor(shape, vocab_size, rng=None, name=None, dtype=None):
"""Creates a random int32 tensor of the shape within the vocab size."""
if rng is None:
rng = random.Random()
total_dims = 1
for dim in shape:
total_dims *= dim
values = []
for _ in range(total_dims):
values.append(rng.randint(0, vocab_size - 1))
output = tf.constant(values, shape=shape, dtype=dtype if dtype is not None else tf.int32)
return output
@require_tf
class UtilsFunctionsTest(unittest.TestCase):
# tests whether the top_k_top_p_filtering function behaves as expected
def test_top_k_top_p_filtering(self):
logits = tf.convert_to_tensor(
[
[
8.2220991, # 3rd highest value; idx. 0
-0.5620044,
5.23229752,
4.0386393,
-6.8798378,
-0.54785802,
-3.2012153,
2.92777176,
1.88171953,
7.35341276, # 5th highest value; idx. 9
8.43207833, # 2nd highest value; idx. 10
-9.85711836,
-5.96209236,
-1.13039161,
-7.1115294,
-0.8369633,
-5.3186408,
7.06427407,
0.81369344,
-0.82023817,
-5.9179796,
0.58813443,
-6.99778438,
4.71551189,
-0.18771637,
7.44020759, # 4th highest value; idx. 25
9.38450987, # 1st highest value; idx. 26
2.12662941,
-9.32562038,
2.35652522,
], # cummulative prob of 5 highest values <= 0.6
[
0.58425518,
4.53139238,
-5.57510464,
-6.28030699,
-7.19529503,
-4.02122551,
1.39337037,
-6.06707057,
1.59480517,
-9.643119,
0.03907799,
0.67231762,
-8.88206726,
6.27115922, # 4th highest value; idx. 13
2.28520723,
4.82767506,
4.30421368,
8.8275313, # 2nd highest value; idx. 17
5.44029958, # 5th highest value; idx. 18
-4.4735794,
7.38579536, # 3rd highest value; idx. 20
-2.91051663,
2.61946077,
-2.5674762,
-9.48959302,
-4.02922645,
-1.35416918,
9.67702323, # 1st highest value; idx. 27
-5.89478553,
1.85370467,
], # cummulative prob of 5 highest values <= 0.6
],
dtype=tf.float32,
)
non_inf_expected_idx = tf.convert_to_tensor(
[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, 20], [1, 27]],
dtype=tf.int32,
) # expected non filtered idx as noted above
non_inf_expected_output = tf.convert_to_tensor(
[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, 5.4402995, 7.3857956, 9.677023],
dtype=tf.float32,
) # expected non filtered values as noted above
output = tf_top_k_top_p_filtering(logits, top_k=10, top_p=0.6, min_tokens_to_keep=4)
non_inf_output = output[output != -float("inf")]
non_inf_idx = tf.cast(
tf.where(tf.not_equal(output, tf.constant(-float("inf"), dtype=tf.float32))),
dtype=tf.int32,
)
tf.debugging.assert_near(non_inf_output, non_inf_expected_output, rtol=1e-12)
tf.debugging.assert_equal(non_inf_idx, non_inf_expected_idx)
| [
"tensorflow.convert_to_tensor",
"tensorflow.keras.models.load_model",
"tensorflow.math.abs",
"tensorflow.config.LogicalDeviceConfiguration",
"tensorflow.zeros",
"tensorflow.equal",
"tensorflow.debugging.assert_near",
"tensorflow.config.list_physical_devices",
"torch.no_grad",
"tensorflow.keras.Input",
"torch.from_numpy",
"tensorflow.debugging.assert_equal",
"tensorflow.is_tensor",
"tensorflow.keras.models.Model",
"numpy.isnan",
"tensorflow.keras.layers.Dense",
"tensorflow.config.list_logical_devices",
"tensorflow.keras.Model",
"numpy.array",
"tensorflow.size",
"tensorflow.constant",
"numpy.abs",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.ones",
"tensorflow.expand_dims",
"numpy.ones",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.abs"
] | tests/test_modeling_tf_common.py | [(30, 'transformers.is_tf_available', 'is_tf_available', ([], {}), False, 'from transformers import is_tf_available\n'), (64, 'copy.deepcopy', 'copy.deepcopy', (['config'], {}), False, 'import copy\n'), (1088, 'tensorflow.constant', 'tf.constant', (['values'], {'shape': 'shape', 'dtype': '(dtype if dtype is not None else tf.int32)'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (81, 'copy.deepcopy', 'copy.deepcopy', (['inputs_dict'], {}), False, 'import copy\n'), (509, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(3e-05)', 'epsilon': '(1e-08)', 'clipnorm': '(1.0)'}), True, 'import tensorflow as tf\n'), (510, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (511, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', (['"""accuracy"""'], {}), True, 'import tensorflow as tf\n'), (1078, 'random.Random', 'random.Random', ([], {}), False, 'import random\n'), (1098, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[8.2220991, -0.5620044, 5.23229752, 4.0386393, -6.8798378, -0.54785802, -\n 3.2012153, 2.92777176, 1.88171953, 7.35341276, 8.43207833, -9.85711836,\n -5.96209236, -1.13039161, -7.1115294, -0.8369633, -5.3186408, \n 7.06427407, 0.81369344, -0.82023817, -5.9179796, 0.58813443, -\n 6.99778438, 4.71551189, -0.18771637, 7.44020759, 9.38450987, 2.12662941,\n -9.32562038, 2.35652522], [0.58425518, 4.53139238, -5.57510464, -\n 6.28030699, -7.19529503, -4.02122551, 1.39337037, -6.06707057, \n 1.59480517, -9.643119, 0.03907799, 0.67231762, -8.88206726, 6.27115922,\n 2.28520723, 4.82767506, 4.30421368, 8.8275313, 5.44029958, -4.4735794, \n 7.38579536, -2.91051663, 2.61946077, -2.5674762, -9.48959302, -\n 4.02922645, -1.35416918, 9.67702323, -5.89478553, 1.85370467]]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1168, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, 0], [0, 9], [0, 10], [0, 25], [0, 26], [1, 13], [1, 17], [1, 18], [1, \n 20], [1, 27]]'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (1173, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[8.222099, 7.3534126, 8.432078, 7.4402075, 9.38451, 6.271159, 8.827531, \n 5.4402995, 7.3857956, 9.677023]'], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (1178, 'transformers.tf_top_k_top_p_filtering', 'tf_top_k_top_p_filtering', (['logits'], {'top_k': '(10)', 'top_p': '(0.6)', 'min_tokens_to_keep': '(4)'}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (1186, 'tensorflow.debugging.assert_near', 'tf.debugging.assert_near', (['non_inf_output', 'non_inf_expected_output'], {'rtol': '(1e-12)'}), True, 'import tensorflow as tf\n'), (1187, 'tensorflow.debugging.assert_equal', 'tf.debugging.assert_equal', (['non_inf_idx', 'non_inf_expected_idx'], {}), True, 'import tensorflow as tf\n'), (83, 'transformers.TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values', 'TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (148, 'inspect.signature', 'inspect.signature', (['model.call'], {}), False, 'import inspect\n'), (185, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (187, 'os.path.join', 'os.path.join', (['tmpdirname', '"""saved_model"""'], {}), False, 'import os\n'), (346, 'numpy.abs', 'np.abs', (['(out_1 - out_2)'], {}), True, 'import numpy as np\n'), (372, 'transformers.load_tf2_model_in_pytorch_model', 'transformers.load_tf2_model_in_pytorch_model', (['pt_model', 'tf_model'], {}), False, 'import transformers\n'), (483, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'symbolic_inputs', 'outputs': '[outputs]'}), True, 'import tensorflow as tf\n'), (543, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': '[input_ids]', 'outputs': '[outputs]'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.config.list_logical_devices', 'tf.config.list_logical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (92, 'transformers.TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values', 'TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (93, 'tensorflow.ones', 'tf.ones', (['self.model_tester.batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (123, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (188, 'os.path.exists', 'os.path.exists', (['saved_model_dir'], {}), False, 'import os\n'), (205, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (207, 'os.path.join', 'os.path.join', (['tmpdirname', '"""saved_model"""'], {}), False, 'import os\n'), (225, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (227, 'os.path.join', 'os.path.join', (['tmpdirname', '"""saved_model"""'], {}), False, 'import os\n'), (228, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (265, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (266, 'os.path.join', 'os.path.join', (['tmpdirname', '"""saved_model"""'], {}), False, 'import os\n'), (268, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['saved_model_dir'], {}), True, 'import tensorflow as tf\n'), (302, 'transformers.TFSharedEmbeddings', 'TFSharedEmbeddings', (['(99)', '(32)'], {'name': '"""shared"""'}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (309, 'tensorflow.keras.Input', 'tf.keras.Input', (['tensor.shape[1:]'], {'dtype': 'tensor.dtype'}), True, 'import tensorflow as tf\n'), (315, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (316, 'os.path.join', 'os.path.join', (['tmpdirname', '"""keras_model.h5"""'], {}), False, 'import os\n'), (387, 'torch.no_grad', 'torch.no_grad', ([], {}), False, 'import torch\n'), (393, 'numpy.isnan', 'np.isnan', (['tf_hidden_states'], {}), True, 'import numpy as np\n'), (394, 'numpy.isnan', 'np.isnan', (['pt_hidden_states'], {}), True, 'import numpy as np\n'), (401, 'numpy.abs', 'np.abs', (['(tf_hidden_states - pt_hidden_states)'], {}), True, 'import numpy as np\n'), (405, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (406, 'os.path.join', 'os.path.join', (['tmpdirname', '"""pt_model.bin"""'], {}), False, 'import os\n'), (408, 'transformers.load_pytorch_checkpoint_in_tf2_model', 'transformers.load_pytorch_checkpoint_in_tf2_model', (['tf_model', 'pt_checkpoint_path'], {}), False, 'import transformers\n'), (410, 'os.path.join', 'os.path.join', (['tmpdirname', '"""tf_model.h5"""'], {}), False, 'import os\n'), (412, 'transformers.load_tf2_checkpoint_in_pytorch_model', 'transformers.load_tf2_checkpoint_in_pytorch_model', (['pt_model', 'tf_checkpoint_path'], {}), False, 'import transformers\n'), (427, 'torch.no_grad', 'torch.no_grad', ([], {}), False, 'import torch\n'), (432, 'numpy.isnan', 'np.isnan', (['tfo'], {}), True, 'import numpy as np\n'), (433, 'numpy.isnan', 'np.isnan', (['pto'], {}), True, 'import numpy as np\n'), (440, 'numpy.abs', 'np.abs', (['(tfo - pto)'], {}), True, 'import numpy as np\n'), (461, 'transformers.TFSharedEmbeddings', 'TFSharedEmbeddings', (['self.model_tester.vocab_size', 'self.model_tester.hidden_size'], {'name': '"""shared"""'}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (469, 'tensorflow.keras.Input', 'tf.keras.Input', (['tensor.shape[1:]'], {'dtype': 'tensor.dtype'}), True, 'import tensorflow as tf\n'), (482, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_labels'], {'activation': '"""softmax"""', 'name': '"""outputs"""'}), True, 'import tensorflow as tf\n'), (488, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (489, 'os.path.join', 'os.path.join', (['tmpdirname', '"""keras_model.h5"""'], {}), False, 'import os\n'), (532, 'tempfile.TemporaryDirectory', 'tempfile.TemporaryDirectory', ([], {}), False, 'import tempfile\n'), (540, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {'activation': '"""softmax"""', 'name': '"""outputs"""'}), True, 'import tensorflow as tf\n'), (672, 'transformers.TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values', 'TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (705, 'numpy.abs', 'np.abs', (['(out_1 - out_2)'], {}), True, 'import numpy as np\n'), (819, 'tensorflow.is_tensor', 'tf.is_tensor', (['v'], {}), True, 'import tensorflow as tf\n'), (989, 'tensorflow.size', 'tf.size', (['added_label'], {}), True, 'import tensorflow as tf\n'), (94, 'transformers.TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values', 'TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (95, 'tensorflow.zeros', 'tf.zeros', (['self.model_tester.batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (96, 'tensorflow.zeros', 'tf.zeros', (['self.model_tester.batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (208, 'os.path.exists', 'os.path.exists', (['saved_model_dir'], {}), False, 'import os\n'), (319, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['filepath'], {'custom_objects': "{main_layer_class.__name__: main_layer_class, 'TFSharedEmbeddings':\n TFSharedEmbeddings}"}), True, 'import tensorflow as tf\n'), (327, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['filepath'], {'custom_objects': '{main_layer_class.__name__: main_layer_class}'}), True, 'import tensorflow as tf\n'), (344, 'numpy.isnan', 'np.isnan', (['out_1'], {}), True, 'import numpy as np\n'), (345, 'numpy.isnan', 'np.isnan', (['out_2'], {}), True, 'import numpy as np\n'), (419, 'numpy.array', 'np.array', (['key'], {'dtype': 'bool'}), True, 'import numpy as np\n'), (492, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['filepath'], {'custom_objects': "{main_layer_class.__name__: main_layer_class, 'TFSharedEmbeddings':\n TFSharedEmbeddings}"}), True, 'import tensorflow as tf\n'), (500, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['filepath'], {'custom_objects': '{main_layer_class.__name__: main_layer_class}'}), True, 'import tensorflow as tf\n'), (516, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(2, max_input)', 'name': '"""decoder_input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (521, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(2, max_input)', 'name': '"""input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (523, 'transformers.TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values', 'TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (524, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(4, 2, max_input)', 'name': '"""input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (526, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'batch_shape': '(2, max_input)', 'name': '"""input_ids"""', 'dtype': '"""int32"""'}), True, 'import tensorflow as tf\n'), (561, 'numpy.abs', 'np.abs', (['(output_dict - output_keywords)'], {}), True, 'import numpy as np\n'), (670, 'transformers.TF_MODEL_FOR_CAUSAL_LM_MAPPING.values', 'TF_MODEL_FOR_CAUSAL_LM_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (671, 'transformers.TF_MODEL_FOR_MASKED_LM_MAPPING.values', 'TF_MODEL_FOR_MASKED_LM_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (822, 'numpy.array', 'np.array', (['k'], {}), True, 'import numpy as np\n'), (991, 'transformers.TF_MODEL_FOR_CAUSAL_LM_MAPPING.values', 'TF_MODEL_FOR_CAUSAL_LM_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (1013, 'inspect.signature', 'inspect.signature', (['model.call'], {}), False, 'import inspect\n'), (54, 'tensorflow.config.LogicalDeviceConfiguration', 'tf.config.LogicalDeviceConfiguration', ([], {'memory_limit': '_tf_gpu_memory_limit'}), True, 'import tensorflow as tf\n'), (85, 'tensorflow.expand_dims', 'tf.expand_dims', (['v', '(1)'], {}), True, 'import tensorflow as tf\n'), (97, 'transformers.TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values', 'TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (98, 'tensorflow.zeros', 'tf.zeros', (['self.model_tester.batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (290, 'importlib.import_module', 'import_module', (['model_class.__module__'], {}), False, 'from importlib import import_module\n'), (448, 'importlib.import_module', 'import_module', (['model_class.__module__'], {}), False, 'from importlib import import_module\n'), (703, 'numpy.isnan', 'np.isnan', (['out_1'], {}), True, 'import numpy as np\n'), (704, 'numpy.isnan', 'np.isnan', (['out_2'], {}), True, 'import numpy as np\n'), (99, 'transformers.TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values', 'TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (100, 'tensorflow.zeros', 'tf.zeros', (['self.model_tester.batch_size'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (420, 'torch.from_numpy', 'torch.from_numpy', (['key'], {}), False, 'import torch\n'), (478, 'numpy.ones', 'np.ones', (['(self.model_tester.batch_size, self.model_tester.seq_length, num_labels, 1)'], {}), True, 'import numpy as np\n'), (879, 'tensorflow.math.abs', 'tf.math.abs', (['(p1 - p2)'], {}), True, 'import tensorflow as tf\n'), (108, 'tensorflow.zeros', 'tf.zeros', (['(self.model_tester.batch_size, self.model_tester.seq_length)'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (724, 'tensorflow.equal', 'tf.equal', (['tuple_object', 'dict_object'], {}), True, 'import tensorflow as tf\n'), (899, 'tensorflow.math.abs', 'tf.math.abs', (['(p1 - p2)'], {}), True, 'import tensorflow as tf\n'), (777, 'tensorflow.ones', 'tf.ones', (['(input_ids.shape + [self.model_tester.embedding_size])'], {'dtype': 'tf.dtypes.float32'}), True, 'import tensorflow as tf\n'), (782, 'tensorflow.ones', 'tf.ones', (['(input_ids.shape + [self.model_tester.hidden_size])'], {'dtype': 'tf.dtypes.float32'}), True, 'import tensorflow as tf\n'), (889, 'tensorflow.math.abs', 'tf.math.abs', (['(p1 - p2)'], {}), True, 'import tensorflow as tf\n'), (102, 'transformers.TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values', 'TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (103, 'transformers.TF_MODEL_FOR_CAUSAL_LM_MAPPING.values', 'TF_MODEL_FOR_CAUSAL_LM_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (104, 'transformers.TF_MODEL_FOR_MASKED_LM_MAPPING.values', 'TF_MODEL_FOR_MASKED_LM_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (105, 'transformers.TF_MODEL_FOR_PRETRAINING_MAPPING.values', 'TF_MODEL_FOR_PRETRAINING_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (106, 'transformers.TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values', 'TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING.values', ([], {}), False, 'from transformers import TF_MODEL_FOR_CAUSAL_LM_MAPPING, TF_MODEL_FOR_MASKED_LM_MAPPING, TF_MODEL_FOR_MULTIPLE_CHOICE_MAPPING, TF_MODEL_FOR_NEXT_SENTENCE_PREDICTION_MAPPING, TF_MODEL_FOR_PRETRAINING_MAPPING, TF_MODEL_FOR_QUESTION_ANSWERING_MAPPING, TF_MODEL_FOR_SEQ_TO_SEQ_CAUSAL_LM_MAPPING, TF_MODEL_FOR_SEQUENCE_CLASSIFICATION_MAPPING, TF_MODEL_FOR_TOKEN_CLASSIFICATION_MAPPING, TFSharedEmbeddings, tf_top_k_top_p_filtering\n'), (725, 'tensorflow.abs', 'tf.abs', (['(tuple_object - dict_object)'], {}), True, 'import tensorflow as tf\n')] |
QAlexBall/Learning_Py | 8a5987946928a9d86f6807555ed435ac604b2c44 | # Train a model
import tensorflow as tf
tf.executing_eagerly()
(mnist_images, mnist_labels), _ = tf.keras.datasets.mnist.load_data()
dataset = tf.data.Dataset.from_tensor_slices(
(tf.cast(mnist_images[..., tf.newaxis] / 255, tf.float32),
tf.cast(mnist_labels, tf.int64))
)
dataset = dataset.shuffle(1000).batch(32)
mnist_model = tf.keras.Sequential([
tf.keras.layers.Conv2D(16, [3, 3], activation='relu', input_shape=(None, None, 1)),
tf.keras.layers.Conv2D(16, [3, 3], activation='relu'),
tf.keras.layers.GlobalAveragePooling2D(),
tf.keras.layers.Dense(10)
])
for images, labels in dataset.take(1):
print("Logits: ", mnist_model(images[0:1]).numpy())
optimizer = tf.keras.optimizers.Adam()
loss_object = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_history = []
def train_step(images, labels):
with tf.GradientTape() as tape:
logits = mnist_model(images, training=True)
# Add asserts to check the shape of the output.
tf.debugging.assert_equal(logits.shape, (32, 10))
loss_value = loss_object(labels, logits)
loss_history.append(loss_value.numpy().mean())
grads = tape.gradient(loss_value, mnist_model.trainable_variables)
optimizer.apply_gradients(zip(grads, mnist_model.trainable_variables))
def train():
for epoch in range(3):
for (batch, (images, labels)) in enumerate(dataset):
train_step(images, labels)
print('Epoch {} finished'.format(epoch))
train()
import matplotlib.pyplot as plt
plt.plot(loss_history)
plt.xlabel('Batch #')
plt.ylabel('Loss [entropy]')
plt.show()
| [
"tensorflow.debugging.assert_equal",
"tensorflow.executing_eagerly",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.cast",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.datasets.mnist.load_data",
"matplotlib.pyplot.plot",
"tensorflow.GradientTape",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.xlabel",
"matplotlib.pyplot.show",
"matplotlib.pyplot.ylabel"
] | Three_Part_Moudule/Tensoflow-v2-beta/Eager_Execution/train_model.py | [(3, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (5, 'tensorflow.keras.datasets.mnist.load_data', 'tf.keras.datasets.mnist.load_data', ([], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (52, 'matplotlib.pyplot.plot', 'plt.plot', (['loss_history'], {}), True, 'import matplotlib.pyplot as plt\n'), (53, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Batch #"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (54, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Loss [entropy]"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (55, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (8, 'tensorflow.cast', 'tf.cast', (['(mnist_images[..., tf.newaxis] / 255)', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (9, 'tensorflow.cast', 'tf.cast', (['mnist_labels', 'tf.int64'], {}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '[3, 3]'], {'activation': '"""relu"""', 'input_shape': '(None, None, 1)'}), True, 'import tensorflow as tf\n'), (15, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(16)', '[3, 3]'], {'activation': '"""relu"""'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.debugging.assert_equal', 'tf.debugging.assert_equal', (['logits.shape', '(32, 10)'], {}), True, 'import tensorflow as tf\n')] |
DhruvAwasthi/TensorFlowSpecialization | aeaa57eefd74f96f7389458662e050667eab7a54 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
# ATTENTION: Please do not alter any of the provided code in the exercise. Only add your own code where indicated
# ATTENTION: Please do not add or remove any cells in the exercise. The grader will check specific cells based on the cell position.
# ATTENTION: Please use the provided epoch values when training.
import tensorflow as tf
print(tf.__version__)
# EXPECTED OUTPUT
# 2.0.0-beta1 (or later)
# In[2]:
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
def plot_series(time, series, format="-", start=0, end=None):
plt.plot(time[start:end], series[start:end], format)
plt.xlabel("Time")
plt.ylabel("Value")
plt.grid(True)
def trend(time, slope=0):
return slope * time
def seasonal_pattern(season_time):
"""Just an arbitrary pattern, you can change it if you wish"""
return np.where(season_time < 0.1,
np.cos(season_time * 7 * np.pi),
1 / np.exp(5 * season_time))
def seasonality(time, period, amplitude=1, phase=0):
"""Repeats the same pattern at each period"""
season_time = ((time + phase) % period) / period
return amplitude * seasonal_pattern(season_time)
def noise(time, noise_level=1, seed=None):
rnd = np.random.RandomState(seed)
return rnd.randn(len(time)) * noise_level
time = np.arange(4 * 365 + 1, dtype="float32")
baseline = 10
series = trend(time, 0.1)
baseline = 10
amplitude = 40
slope = 0.01
noise_level = 2
# Create the series
series = baseline + trend(time, slope) + seasonality(time, period=365, amplitude=amplitude)
# Update with noise
series += noise(time, noise_level, seed=42)
plt.figure(figsize=(10, 6))
plot_series(time, series)
plt.show()
# EXPECTED OUTPUTdata:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAmYAAAFzCAYAAACU38U/AAAABHNCSVQICAgIfAhkiAAAAAlwSFlzAAALEgAACxIB0t1+/AAAADh0RVh0U29mdHdhcmUAbWF0cGxvdGxpYiB2ZXJzaW9uMy4xLjEsIGh0dHA6Ly9tYXRwbG90bGliLm9yZy8QZhcZAAAgAElEQVR4nOydd5wU5f3HP89su07n6BxIkyJVUFFcEXss0WjUxMQ0k/zSNEYliTEae4wxxhgjMcYeewdBBA4EBOFAeoc7OModHNfrluf3x+zszk7b2b3dndnb7/v14sXu7JTn5pl5nu/zrYxzDoIgCIIgCMJ6BKsbQBAEQRAEQYiQYEYQBEEQBGETSDAjCIIgCIKwCSSYEQRBEARB2AQSzAiCIAiCIGwCCWYEQRAEQRA2wWl1A5JB7969eUlJScqv09zcjPz8/JRfJ5Oge6IN3Rc1dE+0ofuihu6JNnRf1GTqPSkrKzvBOe+j9VuXEMxKSkqwfv36lF+ntLQUXq835dfJJOieaEP3RQ3dE23ovqihe6IN3Rc1mXpPGGMVer+RKZMgCIIgCMImkGBGEARBEARhE0gwIwiCIAiCsAkkmBEEQRAEQdgEEswIgiAIgiBsAglmBEEQBEEQNoEEM4IgCIIgCJtAghlBEARBEIRNIMGMIAiCIAjCJpBgRhAEQRAEYRNIMCMIgiAIgrAJJJgRBEEQBEEAWLX3BHZXNVraBhLMCIIgCIIgAPzyfxvx0hfllraBBDOCIAiCIAgA3OoGgAQzgiAIgiAIAADnHAzM0jaQYEYQBEEQBGETSDAjCIIgCIKAaMpk1irMSDAjCIIgCIKQsFguI8GMIAiCIAgCALgNvP9JMCMIgiAIgkDI+d9iWyYJZgRBEASh4A/vb8V3nv/S6mYQWYjT6gYQBEEQhN14eU0FAKDNF0COy2Fxa4h0YQNLJmnMCIIgCEKPMX9YiK2H661uBpFGKCqTIAiCIGzMpso6q5tApAsbqMxIMCMIgiAIGUfqWqO+2yFSj0gPHKDM/wRBEARhJ655ZnXUd06SWVaR1aZMxlh3xtjbjLGdjLEdjLEzGWM9GWOLGWN7Qv/3sLKNBEEQRHZxtL4t6nuQ5LKswQ5CuNUasycBLOScjwEwEcAOAHMBLOGcjwSwJPSdIAiCICzBDpM1kT6yNvM/Y6wbgFkA/gMAnPMOznkdgCsBvBja7UUAV1nTQoIgCIIgjVk2YYeuZlatBBhjkwDMA7AdorasDMCvABzmnHcP7cMA1ErfFcffAuAWACguLp76+uuvp7zNTU1NKCgoSPl1Mgm6J9rQfVFD90Qbui9qrL4nNy9sjvp+wxg3LipxWdSaCFbfFzuS7Htyy+JmzB7sxPVjPEk7pxbnnXdeGed8mtZvViaYdQKYAuAXnPO1jLEnoTBbcs45Y0xTcuScz4Mo2GHatGnc6/WmuLlAaWkp0nGdTILuiTZ0X9TQPdGG7osay+/JwvlRX4cPPwXeWcMtakwEy++LDUn2PXEsWYghQ4bA6z01aeeMFyt9zCoBVHLO14a+vw1RUKtijPUHgND/1Ra1jyAIgiDAbWHgItKBHfraMsGMc34MwCHG2OjQpvMhmjU/BPDd0LbvAvjAguYRBEEQWYpTiHb/Jh+z7MJq53+ra2X+AsCrjDE3gP0AvgdRWHyTMfYDABUArrOwfQRBEESW4XQw+GXSGAVlZg926GtLBTPO+VcAtJzfzk93WwiCIAgCAFyCgDYEw9+DdpitibTAActVZlbnMSMIgrAtze1+XPPMauyparS6KUQacTiiZ2bKY5ZdUEkmgiAIm/J2WSXKKmrx8Cc7rW4KkUaUPmYkl2URNuhrEswIgiA0KKuoxR8/3AYAaOnwW9waIp04heipkZz/swcOnt21MgmCIOyKvJB1S0fAwpYQ6capMGU+8dlui1pCWIHVUZkkmBEEQcSABLPsQmnKJLIHO5itSTAjCIKIQSsJZlmFYLUti7AUq7ufBDOCIAgN5INzq48Es2zC6omZsA4bKMxIMCMIgtBCPjeT83920S3X+oLlhDVwzildBkEQhN1p8wXx2tqDVjeDSCPnjOxtdRMIi7BaY0qCGUEQhAl+994Wq5tApAk7mLMIa7BD35NgRhAEoQGzetlMWIZWZF6QkpllDVa/+SSYEQRBEEQMAnbIo0CkHDt0MwlmBEEQGkwv6Rn1vV9RjkUtIdINh1pjGiCNWfZgsbacBDOCIAgNRhYXhD/PGtUH3BbeJ0S6UE7NfhLMsgYyZRIEQdicQo+TksxmExr2rECABLOuDreDHRMkmBEEQWgiH6MLc5xo8wetawyRVkRTZvQ2f5D6P1uwOu6HBDOCiMHzKw/gwIlmq5tBWEi+x4kOf5Ai87II5dxMPmZdH5sozEgwIwgjGtt8+NPH23Hds19Y3RQizch9ygo8TgBAm5/MmdmA1gS9eEdV+htCpBWp2ynzP0HYmLoWHwDgeGO7xS0hrCQsmPnInJUtKKMyf//eVotaQqQbMmUShI052dwR/uwL0KScTci1Jh6XOFSSOSs7oAjc7ISc/wkiA6htiQhmI3//CZXlyUKunDQATkEcKoM2GbiJ1MK59SkTCOuwuu9JMCMIA+pbfVHfqZB19sAB9C5w48nrJ8MRGikpl1X2YLU5i0g/dnm7STCLg60n/OigkPmswke5i7IWUTkmzs5CaJamqMzsQFKMFuY4rW0IkVakfrdaKCfBzCRlFbX4y/p2PL54l9VNIdKIls/B3upGC1pCWIE0QDsd4gfyMcsmGFbeNRs3n1VidUOINKMM/Eg3JJiZ5HBdKwCgsrbV4pYQ6URrGv50O4XNZweR3pc0ZmTKzA6kXu6W60K3XJelbSHSh12CPkgwM0lrhx8AkOtyWNwSwmoKc2igzhakdbNDCJkyyfk/K+Cch7WlF43rBwDoXeCxsEVEOrDL602CmUlaQnXy8twkmGUVGi9qEfmdZAXyQdrByJSZbUhC+dgBRZg+rCdONLXjmdJ9lraJSA/kY5YhSIJZLglmWQ9pTbMHaYCWNGYkmGUnzlD/P7pwp8UtIbIBEsxMsONoAx5bJDr957lIW5JNaPkc0NycHURpzEgwyzrkWhOng6bKbCKrSzIxxsoZY1sYY18xxtaHtvVkjC1mjO0J/d/DyjYCgMcZuU0fbjqMhjafwd5EV0KanH923il4/NqJAMjPKJuQBmhBEsyo77MCZTe7BEpqlg3Y5fW2wzLgPM75JM75tND3uQCWcM5HAlgS+m4p3fPc4c/7jjfjjrc2Wdgawgq+fcZQTBzcDQBF5mULcm2pZMqiPGbZAQeP0ppI6VKIro30zpOPmZorAbwY+vwigKssbAsAtbO3lDqD6PrIp2FKMppdcC7zMSPn/6wjypQp2HGqJFKF1WK41U8bB/ApY6yMMXZLaFsx5/xo6PMxAMXWNC2C0r/ALupOIn0wMPIzykKkAVqgvs8qaIzPTuzS71Z7sp/NOT/MGOsLYDFjLCrkhXPOGWOatyokyN0CAMXFxSgtLU15YyUaG5vSej0709TUte/FrkOiP+EXX6yGVJ1p+44d6NW41/C4rn5fEiHT7snRY+1oaw+gtLQUe2rFqOwNX21CR2Vyo3Iz7b6kA6vvSXNLC44fbw23oaq6Lfyble2y+r7YkWTekza/OMjv378fpfxQUs6ZCJYKZpzzw6H/qxlj7wGYDqCKMdafc36UMdYfQLXOsfMAzAOAadOmca/Xm9K2/tF1APd9tB0AkF9QAK/3nJReL1MoLS1Fqu+9lRxeWwFs24qzzjpLdPpfvhQjRo2Gd/oQw+O6+n1JhEy7Jx8f34QDzTXwer0oOlgLrF2N8RMmwDu6b1Kvk2n3JR1YfU/yNixH374F8HqnAgBeO7geqBIrfljZLqvvix1J5j1pavcDny3CKacMh3fWKUk5ZyJYZspkjOUzxgqlzwAuBLAVwIcAvhva7bsAPrCmhdF8b+aw8Get+olE14aB/IyyDUowm71wHu38T92eHUhzu9XpMqzUmBUDeC9ULNQJ4DXO+ULG2DoAbzLGfgCgAsB1FraRyHLkk7NAZXmyFvIvzEJkczMtxrMLq6MyLRPMOOf7AUzU2F4D4Pz0t8g89I5mD+GuZqQ1yTbk6TKoVmZ2oezlIhOFzO9+fwvGD+iG62O4ORD2xS5vt9XO/xmJXSrQE+mDgcHhIMEs21CXZLKwMUT64NEpE3rlu3V3lXhlzUEAIMGM6DRWp8vISGheziJkGhLSmGUZcjN2qO/9QZLMspFR/QqtbgKRBuyiECfBLA6sTjpHWAdjMq2JXd5eIqVwRDRmTjJlZhVi30dG/GunDgIADOudb1GLiLQQer2ZxU5mJJjFwc3jRXW2vHYm0bWhzP/ZjRSdRabM7EM+NTPGcPnEAZa1hUgvVithyMcsDs4d5MKhQHccqWuLvTPRJZAUJAwRrQlNztmBPBJPoFqZWYVWFKZTYOTG0MWxi/84qX7ixO0U0EEzc9bBGAtPzi+vKbe2MUTaUJoyqYB9diA3Y0s4SDDr8oQX4lTEPLNwOwT4SDDLGrRWzieaOixoCZFutMzY5F+YvTgFRsEfWYLVpkwSzOLE5RDQ4aeXM9tQvqibDtVZ0g4ivUj97nGJQ2VdMwnl2QDn6ndeMKkx+/17WyghbYZil14jwSxO3E4SzLIJvRf1yqdXpbUdRPqRz61FOS5MGNgNS3dplu4luiDKyDxRYxZ76n517UEyeWc4FJWZYbgc5GOWTdjF54CwBvkAfdG4Ymw8WIdj9RT809XRcgJ3CAyBgDmBy29yP8Je2EXTSYJZnHhIY5aVWF3Ulkg/yiH64vH9AABLdlalvzFEWtEyZToFZtrHkHzRMhOpd61eiJNgFicuh4B2fxB1LeRrkg3YY/1EWAHnPGpyPqVPAfLcDuyrbrasTUQaUUVlCqZNlKv21mDt/poUNIpIB1Yvw0kwi5MTTe0AgNve+MrilhDpgMsTmRHZh6zfGWMY0D0XR+padXe/7Y2vcO2/VqehYUQq0VKM5bhEa4mZqPyfvFKGb85bk4KWEanEJpZMSjAbLydDUVmHDQZngiAyH60xekD3XByt13/339t4OHUNItKK0n2hZ6iQeV2LD30KPVY0iUgX5PyfWTS2+QEA+R6SabMJ6T199JoJ1jaESCvK4Tnf7UCrLxDzuEXbjqWmQYRlSILZSUqZ0mWhzP8ZyrgBRQCAAd1yLW4JkU6kCdouqm4iDWj0tdMhwGci4u7HL5eloEFEuuCcq5QmkmBW09xuQYuItGATzxUSzOLkzovHAACKi3IsbgmRDpSCGKUnyi6U+YxcDkaVP7IE5eTcu0A0X1Llj64PRWVmGG6ngB55LnQEYpsziMxHUm1LE/T0YT2sbA6RRrTMGi5BoBxVWYBWDw/sLlpJKmtbNI/pFdKoyVm99wRK5s7HoZPaxxD2wi5vNglmCUDZ/7MPaQE1om8hZo7ohalDSUDLBlS5rEhjljUotSb5Hid65rtRWasd/MEY0FcRFPBWWSUA4MsDJ1PSRiI1WJ23kgSzBCDBLHvQ8ikTmLmaeURmo9X3LodAglkWoOdL2rfQg+ON2j5mnIvPh+ZvyWoYkVLs4kNMglkCuKksU9YhXz07BIagXd5gIqUotSYuh7l6iURmw8E1tSYelwPtBotypyP6GPm35nY/fvJyGZX0sjER1xVr20GCWQK4nQ7SmGUJWlOwgzRmWYGW7O0kjVnWoDU5e5wC2nXSpXCIZZuU2wAxynP+lqNYuO0Y/vLpruQ2lEg6FJWZgbidguGqieg6RBL/R15VQSDBLBvQ0pq4BAZfgNum2DGRGvS61xNj7HcK0VOqJMS3dARw9/tbAYDGDhtjl9eaBLME8DjIxyzbiDJlMjJlZgtqU6Y4ZNLk2vXR1pjpmzI556pI3g0VtQCAN9YdCs8Z7208TAmIbQ6ZMjMQt5N8zLIFrZQJDtKYZQV6pkwAppLMEpmLXu96XALa/fqpkpTDwpGQP5nbGT3VUgJie2KXt5oEswTIczvQ2kF5zLIBzahMgVGi2SzFFXLu9gVjL8zmrdiX6uYQKUJ879VqkxynA+0+HY0ZEDZxK2tp5rkdqv0fXbgTQRpIbIXUf5QuIwMpynWhodVndTMIi3AwMmVlA1o9LJkyzSSZfWjBziS3iEgnmqZMl7GPmbSQG11cGLU916UWzJ4p3Yfd1Y2daiORIsiUmXkU5bjQECpmTmQH8kGanP+zB2VJJikdgtnIzDYTBc8JO6L9fovO/zpRmRzICQlgxUU5KMpxhn8TBO2ZnqpI2Au7uA6TYJYARblONLX74Sc/s6xBrtom5//sQDPBrCD5mJl792uaqa5ipqIlShk5/wPA6SU98MjVE3DflePw+HWTwtv15goaR+wJpcvIQIpyXACApnbSmnV1tNIikPN/9qBVkgkw7/x/QidLPGFvjNJldPiDmuMC5xyMMVw/fQgKPE7kuCLTq15SYvJVtidKTXm6sVwwY4w5GGMbGWMfh74PY4ytZYztZYy9wRhTV4a1mL5FomPnmv1U/6yrE85jpjBl0ko3G1D3cd/CHADAqr0nTJ2hhSbejIRD38cMgKk8ljkyvzK9gIEWMnXbCrsM65YLZgB+BWCH7PujAJ7gnI8AUAvgB5a0yoALxhYDAHYdI8fNbEE+RlPm/+yAc/XkPHNELzAGHKnTLmStxG8iepOwJ5olmZyisKUlmClHBLnDv55fGmnM7ElWmzIZY4MAXAbgudB3BmA2gLdDu7wI4CprWqeP9HIGaNDt8miWZCJTZtagFMwYY8h1OUz7mJFzd2aiV9lBMk/qlWWSPy9yU2abnsaMBDNboZW30gqcsXdJKX8DcCcAKba4F4A6zrnkvFUJYKDWgYyxWwDcAgDFxcUoLS1NbUsBNDU1ha/jYMD+8gqUlh5N+XXtjPyedEUOHBCdt1esWA4hNOoeOdyODp/f8O/u6vclETLtnpyoaUNjO1e1mfEADlQcQmlpteqYU3sK2HEyMglv3LQZ7JjxMJtp9yUdWH1POnw+HDlyGKWl0SbrA4fFNEnLV65Gn7xovYbf70dlZSVKS48DAI63RJ6DusYmzets2rYDvRv3mm6X1ffFjiTznlQ1i322c+cOlMbRL8nGMsGMMfY1ANWc8zLGmDfe4znn8wDMA4Bp06ZxrzfuU8RNaWkppOs4P/sEAwcPhtd7asqva2fk96Qrssm/B9i7G+ee64UjFPK+qnk72JGDhn93V78viZBp9+Sl8nUINrbD6z07anvuys/Qt38xvN4JqmPm7VkDnKwJf+8+8BSgTz68o/vqXifT7ks6sPqeuFZ8ioEDB8DrHR+1vXHTEWDLRkyedjpG9I3OVeZctgiDBw2G1zsWAHC8sR1Y8RkAgDk9ANpU1xk0dDi8555iul1W3xc7ksx7Un6iGfi8FGPHngrv5EFJOWciWKkxmwngCsbYpQByABQBeBJAd8aYM6Q1GwTgsIVt1MUpMATITNHlkVTbcouWIDAE7OIlSqQMPXOWy8HgM1kr908fbwcA7H/oUt1cVoT94FwvXYaoJdMzTcrJdcf2MSNTpr2wy6humY8Z5/y3nPNBnPMSANcDWMo5/xaAZQC+EdrtuwA+sKiJhjgEphsCTXQ9oqIyGaNSKlmCVmSeyyHo+phxDkwb2gPPfGtK1PaGNqoUkmlopUzwuMw7/+fI6mPqRXG2dFDKJTtCJZnU3AXg14yxvRB9zv5jcXs0cToEcgDPArSUJg5GGrNsQK+HXQ5mmMdMYAynDe4ete0fS/eiZO58mogzBD1tqaQx09OAyWU5qeA9oK8ZI42ZvdDr93RjC8GMc17KOf9a6PN+zvl0zvkIzvm1nHNbZmgkjVl2IPWwfPWc4xLAuf7gDADNPo4v9tXo/k5kBlrrZpdDQIeexiz0xLgUZsvnVh4AAFTWmkuzQViL3sgeEcw0NGYJTOqULsNeRMZ7S5thD8EsE3EKjNJlZCkFHtE1s7ldPzfR3StbccO/11CtxAxGb551OwXjcmwsWlsi57DJ/GeEPZGSxuoljI13PieNGaEFCWYJIjAGKpWZBWjMzgVSSS6dQvb3z9+O2nbxOD3NCmF/OKC5dBZ9zIy1Iw4dR/+jderIPMKGaCQXBiIas5+8UqZyZdF6Im6bM8rwMgu3HUNNky2NQlmJTSyZJJglitNBGrNsQTlAF3jEVbNerVS5ucps9B5hT7RNmUzflMkj+2hxspkm4UxBywFcXmapulEtZCvHil/NGYnvnDnU8Do/f21jYg0kUkbW18rMVMjHLDvQ6uECj3ER+/5FOeHPvgDH1sP1WLuf/M0yDf10GfpRmYAozDkF9dBa4HHiZDNFZ2YCeiN7vieSYcqlMFfraVtiTfFbD9ebbxiRYuwxp5NgliBOKsuTFWjlMyrIEQfnpnbtSbYoNzJ4bz9aj689tRLfnLcGy3ZV4+U1Fcb+SYSt0Fo4u43SZYT+19KY9ch3kcYsQ+Cca/Z9viw3mVbKnFialslDItG6V08ZiLH9i8LjCWE9knBtdcZBEswSxCEIpDHLEpSDreT836jjYyZfSX//hfXhz9/77zr84f2t+O+q8uQ3kkgbLocAn1//3WdMe4LumefGyRbSmGUKWpOzPKhDmTJHr86i/Fm4aFy/8GeP04FzRvbGyeaOsHZ20bZj+OpQXSdaTSQDisrMUEhjlh1oDbaFYY1ZYjmpDtQ0d6pNRPrQ9DFz6qfLMLKE9Mh3o7a5IyntIlKLmZFda/w3ms/7FnrgkM34LgdDj3w32v1BtIait3/8chmuenpVnK0lkoVdZnQSzBLEQYJZ1qAcbPPD6TK0BbNYTwVNzpmBns9QrkswTBSrlzW8Z74bWw7XY09VIy58YjleXlORjGYSKSKW1kQZ+6XrYxY6zy2zhked0yEw9MxzAwBqmmhMsBOU+T9DIY1ZdqA12Oa5HGBMP11GLOrInJUxaJkk8z1OtOjksNMzZwEIT8IXPLECu6ua8If3tyankUTSMZM2wa8Vla8xn8sneXkaFZdDQHE3MVCIEg/bA0qXkeGIUZnkxN3V4VCvnAWBocDtRKOexizGy200eRP2Qa+fCjxONHf4daM2pefl8Wsn4uNfnB3e3iPfnfQ2EqmBg+s68l8wthgAEFT5mGkjnYZzMf+lhENgGF1cCADYU91om3JA2Yz0zpOPWYZCpszsQUutXZDjTFhjRmQGWhG5gKgxC3KE/YKUx0hcM3UQxg/sFv7eLdeVglYSqUJvbr5q0kAA0EwwrjVWnDZIfAZG9SuEINOYOQWG4iIPinKc2HWsMSppMQlp1kJRmRkK5THLDvTGxwKPU9f5P5ZGzGr/BcI8RikT9Ppfb7UtZY2XQxOwPTHqFskcqVqY6xxz5aSBWPYbL84d1QfyghCMMTDGMLpfIXZXNUYJ+vd/vIOeDQuwyy0nwSxBzPqYHTrZkobWEKmCQ1tt4nEJmoWMia6D3iCdb1Ar1WhEcGsIZsN+uwD76qheoi3REbAlwUxpygT0hfJhvfPFY2U7SJ+G9spHZW1rVF3d51cdIF9UCwjnMSNTZmbiEISYgtk7ZZU458/LsK78ZJpaRaQLwzx2MeR1jaTwhE3R0m7GisrVOmZMv0JNjRkAbKwmwcxuGL3CUiozda3M2At1uSlT8jcrzHGisc0fJZgRVkNRmRmJU2Dwxyhk/Pme4wCA8hOUtypj0fEzcgks4Qz+FTUteKessnPtIlKO3kQrCVhmNaa7HrgYH//ibE2NGQC4aBS2H1zf5UASqLQWZrGmc7nzf5tfFMQKc1xoavfjlpfKovb1UXBZ2rFLYBYNCQmS53ag2SCXEQA0hUwdHlnhWyLz0FJrOx36PoYcgFsAltx+Ln5+3gjV75W1rbj9rU1JbiWREnRKMgHQLMuk5RfkcTrgdAiq2ooSTgGoqGnG08v2kl+RjdAzZxmZMmMhfwSq6sUi6IUhDeyuqsaofeXBAGv31+CFVQfivh6RGGTKzFCKcl2obzX2AZBU0xS9l7noDb1OQTDWmDHglD4FuuYrgBy/7Y5e97ic+oIZoD+ou3UFM4bvPP8lHlu0C2+trwyPKw/O345pD3wWX6OJpGCkOZH8xFSmTBOvs1xjVtsiJpXVqyIx85Gl4QCTb85bg3s/2h7+bXdVI9XcTQF2GZJJMEuQbrkuNLb5UVmr79zvDBUybmgjJ85MhXOuadJwOswFf7gMBDOK6rU/WjKWocbM4Fx6pkyHLFnxne9sxv+9Kpq0/v35AZxooqLnVqGnNAlrzDSLmBufUy6Y/fHycQCM54eTGhUB9h1vwoVPrMBfPt0NAPjqUB0WbTtmfGEiLqyOmyfBLEGkQfabz67R3Ufq3PpWHx7+ZAfeXH8oDS0jko2mKVNgUaYGOXJNmJ75CkBMH0XCWvR6R+rTDoNC5lroCWYBHm0W21vdFNd5ieRjKl2GyQSzWsdePK4fSkKRmt+YMiiOdnEcbxSF9Q0HawEAVz29Cj9+Odo/bfXeEyiroKCzRNFLLpwuSDBLkCN1YgmNw3X6pTQk5+CapnY8u3w/7nx7c1raRiQPvQHaKQiGlR+k19rt0H/BX11LtRJtDdcWyt1OcaOWCcpoQleatb8/cxgAUXMqV76QItV6tCp+SAh6ecwQO0ehpDGTC+IjQ9n/tegIREdqml3L3fjcWlzzzBfmdibCkCkzw/nhOcMBiCsfPSTB7EhdW1raRKQGraHWYeT8L9tspDF7YP4OdPiD5GtmY7QmWqlPfTpRmXqrbeWzcP6pfQEAb+32RT0D9DzYAz0hS9/HzES6jNApzQYO/HPZPtwlW9B3xMioUdvcAe9jy0ydm9CHTJkZyrDe+RjUIzec00iL9lA49BEDrRphb3TNWTHSpUgvdve8SBkeKWO8nJe+KMew3y7Awq3kI2I39BzAXQn6mDkVgplcgyZPvXGiqQMnm9W+RUT6MBKydDP/I7aP2YzhvTCgWw5+MXukqXa8u/Ew3pC5wHTIr6nRxGW7qlFeQ0nNE4XSZXQBPE4hLHxp0eYTB1sjcydhbzjX1oA4HfoJhuVbe+RFCle/839nqfb976pyAMBPXilT/UZYj9ZEaySYAfqrbUHxg1yDpsyJJvcz03z6Yr0AACAASURBVHIyJ1JPvOkyzPRSt1wXVv/2fEwc3D2hNilz0CojM/WeScIclPm/C+BxOsLClxaS0EalezIbrXdUdP6P3a/dQ4LZiL4FGNOvSPV7S4xceIR16ClNJCf+Di2NqYGmJc8drV0vzNHXtsuVa9uPNpBwlmaMM/9LGjP1b6mezz854AvPJ+2BIEb8/pOo3zWfSSJurBbM9EcGIiZivURjjVmvfDdqyCyRseipto3SZcjn5h75oikzVyfJcCuVYbE1ms7/sTRmOoN6t1wXPvv1LKzccwI7jzVieJ8C3evKHbe/9tRKvPT96Zg1qo/5hhOdgutU/AAiDvyqqMwkyER5bgdaDBzJlh7yo235PgCiP5mSDlICdAq7iLWkMesEHqeAdiONmS+A/t1zorbVt/iwYMvRVDeNSBKcQ3OEdgqCocZMmpz7Fubgoa9PwL9umgoA+PDnM3Hm8F7h/Yw0roS16KfLCEVlJjAJjuhbiJtnDsMj15wGALj/qvGmjmukJNXpR0fCNspj1llVy39vPj3mPlLes4Mn1b5kZMpMDrGia1MNCWadYF15Lb4sP4ndilIaEm3+ILrnuqO23fnOJvzfqxuw7zjlKspkBMbQ0OZHdaM64lapZbtxxhAM7J4LADhtUHfcPLMkHU0kkoDWAO0QGBiL3/lfi29OG4xh3WIPw3qpWb7YV4OD5OydVvSiMpOBMkBE4rppkVxnDsEoN2L0c/L8SirjFA92iYgmwawTSC/mjqMNqt/afAF0+IPoXRAtmC3aVgUA2H5EfQxhT7TWTpsq6wAAv3t3a9zncyq9wAlbojdIM8bgcgi6pXTi6V23U8A9Z+TE3E8vAviGf6/BLEqPkFSkftfN/O8wymOWGMt+48UL3ztdd2zoU+gJf25p19eeKn3M/vTxdnxKVQFME7575Pyf+fTMd6u2SdmZB/fM0zxG+p2wP1pRmc2hwTGgocmItegSrPYsJUxhlGQ0z+1AS7vaFyiRBbeZLOOSxswfCJK5ymIcGj5mndW0DOudD+/ovrq/9y2MCO97DCpDaD0bt7xMEd/xYvUIbZlgxhjLYYx9yRjbxBjbxhi7L7R9GGNsLWNsL2PsDcaYWuqxCVdMHABAu+ZhdQzBjAbXzEBvwJX8i3I1cpMBxi92KkwgRHrpnutCXat2jcNEyrncf+U4w98lTcg5f16G0+79NO7zE+aJlTJBsiQmkscsFpL/2BnDe0Zt11r8K+Gc6yY9joffv7dFZQKtbmzLilJhNrFkWqoxawcwm3M+EcAkABczxs4A8CiAJzjnIwDUAviBhW005JZZYvZ/rZdBKj48qEeu5rEHT7ZgP/mZ2R49rYkUsu5xagtmRiijuQh7YtRN3fLcqGtRR8UlmqDyDFlAiBaS79DR+jaK5E0Teg7gOaEI6zZZPyTrlR43oBtcDoZb54zCnFOLw9vNuD9874V1phf8bb6AbkaBV9cexJ8+3h617ayHl2LOX5ebOndmEzJjZ2utTC4iSSau0D8OYDaAt0PbXwRwlQXNM0Uk0aT6rZRekKIcl+o3QHz4Zz+eDQ965qP1il41WdSW6uWiMnqv7eJgSsRGb4DukedCXYuOxiyB6+g5fUtQwfv0EetO54fy0TVrmLI7G83XM9+NPQ9eijOG91L5MD5701RcXKI9nwBA6a7jpqN3x/xhIc551Lxvol75ua5K1poyAYAx5mCMfQWgGsBiAPsA1HHOpaerEsBAq9oXCylsXjM6K/QcKwsXKwkGuSqShrAPejLU7ReMhtspaPqLxRK8qLszA6Ne7J7rQq2WxizB+SuWpkMr0IBM4qkh7PxvkPk/xyWgOcXJofNkuQ85gIvG9cPIHsbzydYj9abPX01+zmEWbDmKkrnzUVlrjyo9liaY5ZwHAExijHUH8B6AMWaPZYzdAuAWACguLkZpaWlK2iinqakp6jrHW8TBcsu27ehevydq3+1HxZd23bp1hue86olF2Hw8gBcuzk9uY9OE8p50NQ4faYff59f8G/McHPsPVqK09HjU9srD7eCc696XLUe1B/SufB+BzHtWGhtaEWxlmm2ur2lHfbP6uWhqakWNrzmuv7OpqQm7Nq0HAJw/xIklB9XPx959+1HKKsPfS0tLoyLwMum+msHKZ0USeMvLD6C09LDmPm4WxO4DB1FaKkbZS+WZjI6Jl0uLORZuEz9v27YNeTW7AF8bjPQ55TruMfJ7KS8lZXSPtX6z43OWjGfl3xvEtEeL1oo3fPPmzQgeid9NJVnYIvM/57yOMbYMwJkAujPGnCGt2SAAmk8553wegHkAMG3aNO71elPeztLSUsivU9XQBqxYguEjR8E7Y2jUvg2bjgCbNmL69NOBlSt0z7n5uKgOT0f7U4HynnQ1PqvbAlfNMc2/sejLZejZuzu83slR25fWbwU7WqF7X2Z0BPDyjsVoDmX4dgoM/iDHzQub8cVvZ6N/t2i/xKqGNjy7fD9+d+mYKJNXuz+AH764HnMvGYNxA7p17g9NA5n2rDyxdSW657nh9U5X/ba8cRvWVVeq/p78TZ+jd/cceL2xE4VKSPflsjkc5TXNWKLh4jBoyFB4vaOBhfMBAP6+p+LM4T2BxWIgQCbdVzNY+az4AkHg008wrGQYvF7tYuM91i1Dt16Rdz8Q5MCiBRg2TP+YRFhQVYaF245h3Lhx8E7ojwMfLAGgzp0o0aGjeJXfy3s/3AagXLU9TOgZi/pNa5tNSMaz8krFeqC6CoMHDwYO7MfEiafhnJHWVdqwMiqzT0hTBsZYLoALAOwAsAzAN0K7fRfAB9a0MDZhHzPDSBiGiYO7Y2x/dZ1Ewv4YlWbJcQm6mfuNfBRy3Q78R5bhu1+3SCj8U0v34pnSfVH7/+7dLXh+1QGs2lcTtX3H0UZ8vucEfvfuFsO/gUgcPXOWx+kwyPyfmIeKILCowuZylKbMX76+UdO3lUgeRn6ieW5nlI9Zqv1Gpab0zev8lP3C6vJOn6OrIfW11IvZnPm/P4BljLHNANYBWMw5/xjAXQB+zRjbC6AXgP9Y2EZDIj5m6pdS/qJ+8LOZuO2CUYbnKquoxSOf7ExuA4mkoDdA57gcmpFNZsZoeZRVnizlxmtrD+LRhTuj/IekKE5lCRjpDORqlBqMbqvbKSaYVU7Ine0Kp0P7YVM6/wcS8E19be1BlMydj1aDWoyEufe3wONAU7s6+CPZ0/n5p4q5zUYWi3VV812du8JXh+o0t7f7A3h5TYUt0zhxznHHW5uwrvxkzH13HmtAydz5UQncqxracKTOnO9YLP/CdBFTMGOMFTPG/sMY+yT0fSxjrNMpLDjnmznnkznnp3HOx3PO/xTavp9zPp1zPoJzfi3n3LYeitLqVi8DOCCTxGO87dc8sxr/Wr6PSjXZDKNe65nvxtF6bbNCrPfaIRPMtAqc3/XOZpTMnY9gkMsEMIVgFl7lkWSWCoy0pVJQT7uG1qwzg7pDJy2CPxCMEtbb/cGotBnvlFVizl+XIxDkKKuoxey/lGL/8SZc+6/V4dQ9f18i+sFqBS0QEbiJlAl9C3NQ1RCZmlL1Bl47bTC23HshRvQt7NR5pEXdVU+v0vz9g6+O4A/vb8Wv39xkeB4rIsrb/UG8VVaJbz+3FgCwpbIez32+X3PfhVuPhf6P1KOe8dASnPXIUlPXskvAvBmN2QsAFgEYEPq+G8CtqWpQJhFJlxF7lSH198RBxr5A51MKDRuiPUBPGtwdu6oaw1UAJMwISk5ZvTutJLVvl4mO3r5gMBz5qdSMSdvtMph0RfQmZ0kwM1qUJYJL9lyU3T0n/LkjwFXaWfkkevtbm7C3ugmtvgDmvrMZ+0804+73t2JdeS1e//IggMg4RSXBOs/gnnmorG1RRcamQtNSqJNyScmYfvrCm0+n1mr4Gh7R3fyjTUcM97NCO6+MlLz8HyvxwPwdmvtKAqgQ5zMu7c0V363CjGDWm3P+JoAgAISc8kkXDnF16xCYYboMpvjetyh2XTzCPhgJPQO65YJzoF4rA3yMN9sRZcrUj8HxBXh4kFFqzCTIlJkajARst6QxU/gYdlajINVhFBjQq8CDX54vOpKv3HtcZY4pq6hVHe8PBMNad+kZk54faZyi58UYM104sEcufAEe1kZatTh69Yczwp+/dlp/bL73Qs399PwRJUHG7DMRr6kzGOQJvRPbjzTg0MkWNLT5dBPbap1XcvtwxCEht/kC+HR7VeiccTc1JZgRzJoZY70QEiZD2fnNJ0vp4rgczNAJN7LiNi6MS9gVrl8v0SNqulQaMzM+ZjJfIr2yToAYWCLJcCp/Jq69nUgesUyZWhqzzrzjkjZL0ob+OuSbeuhkK+7/WFtLIKcjEAxPspLQyMDw7xX70RBKPuqPoT0hRIzmdknDpHz305ExXq7xlFeMcDoE3YTmPn8Qa/bXqLZf/o+VOHSyRVPgaunwY9mu6qhtWrnzlu6sQsnc+TisWDg0tPkw/HcLMP2hJSg/0Wz8Rym49O+f45w/L4vyh2QMONkcMcNrtUW+Sc+XsmTufNz6+sbw9yZZHwaVGhWLMCOY/RrAhwBOYYytAvASgF+ktFUZhMshaEZnKVfbseqvEfZFr8vCGcA1BoBY3SzXfmn5mEnsP9Gsa8qUzBMkl6UGo/sqacz0IzMTQ9JyDZSVcps2tAcAoLwm9uQmXyRKps9lu6rx4IKIUEeJaTuPtJiS/PzS6ef5f95TAADPfGsKHALDgFBUt5GJuqHNh+vnrVFt33akAef8eZnmAuOnr2zA9/67DtWNET9arQoUr64RTeXbDkfra2pDQtTxxnZ8+z9rY/1ZmmyujJyzzRfElPsXh7/L21xR04zr532BhpD14vHFu3HqPQt1F63vfxUx2crfB2nRYvuoTM75BgDnAjgLwI8BjOOcb051wzKFxjY/XlhdrhsxInVvXmiF1avAk6aWEcnAaHKWoinfWHcw+hgT55UPBnkGGrNrnlktE8zUkXni9WiiTRVG6TIAaEbldmbx5XE68OT1k/D6LWeEtz333WkAgOqG2HFQ/kAwfH0pMEEpPC7bWR3TlyibiShN9DsyV6NeZrq47YJRKH/kMlwyoT8AYM5YsaamkWB2wV/1c2kC2ibK5bvFxNny5+f++duxaNsxXPevL7C3ugk/faUMS3aKWjWXosqNPDjFbKkoJT96ab1+m/2Rce+xRbuwZv9JvLo2eizWCs5R8smWSKDAK2sOGuyZPsxEZX4HwI0ApgKYAuCG0DZCxord0dnflRP6rJG98dDXJ+Duy07FF7+dHTZREPaGc/2JNj8kbP/vy0MavxrPzvKEsEamTCByfbmio6apHUtDAyIpQFKDocYsFPjz1cHo9APJ0F5eOWlgVJJhqWi2meLlvkAw/ORJQoNyvr73o+34xf82gjDGSMAOa8w60q+1VppLpTqWRvVWYwWpGGl+5VrYt8sq8eOXy/Bl+Uk8unAnPglFQQJqf8tmDRPhc5/vx7mPadfo7PAH8fIX5aY1uiea27Fq7wmU1wd0fd9aDFLD3P3+FlTUNOPej7arfrPasmUm8788hXUOgPMBbIBo0iRC6PkWSJsZY7hxxhAAorP3z88bgb8u3q15TDDIsamyDs3tAZw9sndK2kuYR2/lLAlmSswO0t1DhbCNTJlAxN9Insfsin+sCvt06AUFEMlAu+8lU+bcd7fg+ulDFEckd1T3OAUwpv9cDeudjwMhH56GNn/Exyw02WrVczVi25F63PHWZrz5kzNRoPOMd2XMaKBzdYRlKyb0G6cPwRvrDmH2mL4Jn+P+j9XCicT8zdraVWVeRaW/XXQCXvF/KZryZHMHeua7o/b/z8oDeHThTtMRlT98cX34uderhvCoIjeo3LT5ypqD+PrkQZrHJdtFIV7MmDJ/Ifv3I4has4LUNy2zUEaBxJorjR6+Jz7bja//c3XCdnkieZiJzNPCzAAtpcwwOg8Q0XjIV4VRjrYkl6UEo9uqZ35OhVmZMaY7npw9oneUhuHqf64Of5Y0ZvEK7o98shPbjzZgvYmEnl0Zo1dY0mK2pLiQuRnGD+yGfQ9digHdc1W/rbjjPJxjYnFvpKT6y6faCoSA4rlqUglmke+SQFSYIwr6U+5fjF3HGqP2l6Lb61o0otw1OGAioOCN9RFrRmtHAMcaogW42mbtnH6akfZpJJHM/80AhiW7IZmOnpxltHqWfAIGKl6oz/ecSFq7iM6jJ2T172Tqk1mhAbNnnttwP0nj4dcZPUkuSx16fW+UWyqdWpM8t0PX9BP2MYs3xUFoEpXn2ssmzMixkmCu9DGz2mlczqs/nIEhvfLQtzA1KZpKd0W776gEM5kZUXpE5Xfn6WV7seFgbfgeSvNhqoJTTr1nIS5/amXUNqWgJlFncRJmMz5mHzHGPgz9+xjALgDvpb5pmYVSA2bm0ZIisEb0jVZAUskU+2A0SAsCw9WTB2JQD+VK1dzA8vA1E7D4tlkYZZAYEoiYydt9Afz6ja+waNuxqN/JlJkajNKQSCt/9TGpao02+R6nrgO6JJjJnaSVNLf7cehkS9Q2KfIuS+WySJJRIx8zyZQZGqvt9ApK7T41VJ+5wGPsKqHEpVMWLBb1rT6cbO7AxX9bgfmbj0ZpE6UxSm763XmsAVf/czX+smgXAHW+vVRwoila4Dpar12qaewAa2tbm3Eg+Ivssx9ABee8MkXtyVj0/DiMXm6XQ0C7P6gyZZlx8iXSA4exScPlEDQHEjNDm8fpwMjiQt3BQeKdDeLrtnpfDT7dXoXNirB0adBraPOhrSNASYyTiF4/ygUzznmUj2kqNWZnDO+JNfsjJsYcl6A7XkiaB72Jrr7Vh288sxp7qptQ/shl4e3S8xSvb1pXwzAqM5wuI/re2uGWuQSxjmuOS79smBHTh/XEqr3qnGexONncgXc3VGLnsUb87LUNuOviMeHfJMFVHkiwu0pMhHyiqR2H61rDJcPSGemqV1Jv6tCeaWuDFmZ8zJbL/q0ioUwbpSnTTNJPKcmoMszZKJKESD9GSSOdDqbK7RPv6rmPyRQqUnbqfIV/k3S9i59YgekPLYnv4oQhuhG5smoNcsEoVYqTOy4ajfuvHIchPfOitjsFfcFMQm9innjfp9hTLU6OVQ1t4TFLEugenL8Dq/ep3Sou+/vnmPHQZ3H/DZmCmbFbSjDcagMfMyUv/WA6rpw0IKzVmxrKg2eWRAXyk80dUYKXvFKFkVa/uSOAH70YSYvR5kuf4/27Gw6n7VrxoCuYMcYaGWMNGv8aGWMNesdlK8qH2cwA3SsUlaI81orcOIQ2scZoPY1ZPDgdArbdd5Hp/V2KsHipjUd0Vn9EYsQyY0tc8uTnUb+lws/oZ+eNwE1nluC+K8bjX9+eGt5e1+qL+Yxq5VpTMuOhJfj+C+sw6U+fhrVAWw7X48Z/qwOQth1piCrg3eYL4J4PtqbML4dzHhUBuGrvCaxMsh/u7W9uwlvro9PeGMknjDHkuhyWJJiNxRnDe+HJ6yeHF5TfmKodeaiHcnwxy8nmjqhoxoMyE3mQc5XJXGLx9ipsPxoRKdIx/33nzKG6v739kzNTfv1Y6PYA57yQc16k8a+Qc26tAdaG6EVZGr3cl4USBCpplq3C7vlgKwlqNkarJFci/iby1Bv3fG2s4b5K07eyxM6boQmmzRfAt59bix1HaR2VKEZClhTuX1GjPeGkgly3AxeP7xf+3iMvdoFrsxr4ZbuOo67Fh2odh2gA+NXr6vxnH3x1GC99UYHHQr5CyeYX/9uI4b9bEP7+refWJj1i/Z0NlbjjbTFvutnXN9ftUKfLSGqrkgNjTJWaAgC8o/vAO7qPanuOS8DqubPxhizJMSBqbY2oa+kI1w4FgMpauWAGnPNn7fxlStrSkKqie67+e6MV3ZpuTIvGjLG+jLEh0r9UNioTUcllJt7uW+eMwqs/nIHbLhgZfajs2Je+qMD7G+2pbs0GYq2EXQ5Bs/ZgZwbob50xBEN75en+7lA8bEpT6p1vb8aEexdhQ0UtVu49gfs+2taJ1mQvsfpeKo0TdUyavcB/d+mpMfeJ1zXCKIrzg6/UOa2khYneX77/eBNK5s7XrY4ip7UjoLqHH28+qrN3ajDbhbkuhyUJZhNhya/PDefRlOAcmnnqcpwODOieqxJQclwOXDyun2r/L39/Pq6eMhCtvgB2HossAvcdj68+pkSyzMPFRfouIkZaQWeCwQ/JxExU5hWMsT0ADgBYDqAcwCcpblfGofdiGvknCQLDzBG9MaJvIcofuQy9dXyNGtp8hvlyJty7CA9/ErvAMZEABpn/AdEM6QvwqMmks2YNj9OBd356lu7vygzbWmk0Gtv8aAuZsHJiJLAl9DHqe11fnDSM6x/+fCaevWkqclwObP/TRZhzqnZy0cIEEsQqE4UCosD53kZt92IeDhbQPt/qfaIj+XsxFpiH61px6j0L8cqaCs3flYWwlS4EHf5gUlMtxCpInut2qNNlWD+na9Ij342hCv/EU/sX4f4rx6v29bi08yu6HAxP3ThZtX/fwhwUeJyoamjHuvJalBgsKj0xcjYCwGc7qmPuY4ZHrj5N9zel3+Vnvz43/Nllg3BkMy24H8AZAHZzzodBzPyvroaa5SgHhGT6HDy0YCfG3rMoalt9qw/Pfb4fnHM0tvnx7PL9SbseEY3RYOt2aOcY6+wA3bvAg/JHLsPIvupczkqfIb+OhkOqT5fjJMEsEWJpQbRW1ulSnJw2qDsuCmkv8txOPHvTNM39Eln9K2WbvyzahWG/XYDb3thkuL/AGFo7AiiZOx/vlEWEuEMhk1YsbeLBkEn4Ix0NmfcvpVHfT4aSg7Z0+FHd2IZRd3+Cq59ZrXGkPvUtPnVUdBwaM2nBbHOFmYrBPXPxmwtHoYfMxPn7kPZVqgOr1Co5BUFX0yRf/BlVq3nkmgkJtzlejEySPoWVQ56yypEJGjMAPs55DQCBMSZwzpcB0B4Fshi95J/xdbH51/ueD7bigfk78MX++MOaCfPE6hGpPp189Z6oWePP15yG608fHLVN6RsyvaSnKmpJ6eMmIU1cUtg8ET9GArbSpBw+JkVtMUKvLXluY43Z3745Kea5/7Fsr+Hv8vQa1Y2if9rfluxGQ5sPDy/YEV40alj88cv/bUTJ3PkAIvmzzGq9pBxi33x2DaY/KEYjbzpUB845nindh4qa2Ka0OU8sx5kPL9X8LVY/yp3/I8dYP6nrIS8dNmVID1VtTUmQGVksCilKM6ck5E8a3F117hyZJsyjWAjKg1X0rEKpwEg7d/NZJbq/ZYrGrI4xVgDgcwCvMsaehJj9nwBwxcQBAICAYtRJZHKO5xhp0tWblInkwDk3Vb0hGf1w3emD8cg10er3KYpQ937dcrCrKrqUiV5CSOkZUQ6UhDli9aiyDJupg1LI8zdPw9xLIrmjeuW70c3AyRkATh/W+XxN4azuLGLePXSyFX/9dDeeXRHR5CtL+ADAh5siPmuScFlWUYuH1hrn9gMQNtVvUeT1a2j149GFO3HuY6Uxz3G8sV21zay1I8ftkCWYtf843C3Xhb9eNxGAttB56YR+eO2HM3BjSIDTMmUCwIvfn47BPaO1UR6ZxmycIjnriL754c+nl6Q2P9hrP5oR/qysZSwtUG+dM9JQQNRb5KQTo3QZTzPGzgZwJYAWALcCWAhgH4DL09M8+yOtOHU1ZnH0caxXWx4yLq1SrX+Euj6GpkynhsYside+/YJR4c+Lb5uluQrUi2IijVnniCWU60diW/NWzh5TjPNGR3zNFt46C91jRG12tkh5MMjDlSgExqLelbKK2uh9DYSXNl8g6r3ZXSs+0x9t0i6gLR6j/dwrzVRbD9fjRFM79lY3ora5A6W7jH2YuEzQNMLjFFS+Snb1MZOI/G2Rhv7r21Pwx8vHgjGGs0b01n1+pRJd3XJdWHzbuVG/SeNS7wI3LhkfnW1gaC9RMLtxxhBTPmad4axTImbUnvnuKL/HklA7xvQrjMod2r9bdELuRCsfJBOjt3I3gMcA9AfwJoD/cc5fTEurMghBYBCYlo9Z/MRadXUEgsgRHKF9xW1Ujie1xDRlhgYrZWRksl5tublhZHGhZjLRQJDj4QU7VKk7alskwYw0ZgkTZ0da/TZKk8qAbjnoU+hJqWC2/3gTXl17EF8eEKMtleOgPF0CEL2wVNLQ6lO9Q4CYs0wPvTRCyuCFrynqIwLApj9eiCqNtCBvrDuIC8eKvnuxut7tFMIRrFb3u1nC5aZk2y4er522SYlcYMlxOfDaD2eEtUvSGDOmX5HKr9HlEPDVPRegwONMeNHiHd0HFTUtpgqXA8DZI3rDITD0LvCgOqQV/eE5w9GvKAczR/SKasdbirxlVi2s5BjlMXuSc34mgHMB1AB4njG2kzF2D2NslN5x2YhTEFQas/DKJI6RPdbLLdfKSAKZlj9GRU1z2JGW6DzGJZnU9d2SLSu//IPpePYm0U9DmUto4qBuAIBnV+xXmVPrW32hNpLGLBFidqNeJHayGxIHkgZXMht2y434KJ4xXG1G6ozZZvbjy/GflQfC3wXGosbB5vZowam+1RcW4pTUtvhUUZZtvgBeX3dIc39Av6LBfR9tj9n2xjYfLnxihWr7Xe9sMS1kuZOQXDrdXDK+H2aP6YvbY+Qkk5A/H8qi9meN6I0Zw3sBiGjMnA6mqmQDAN3z3CqfNjNMHdoDW+69EC98bzqW/cYbzrsmN9kr2f3AJXjx+9MBAH1lKTPy3Q6cPVKtERzUQz+K1CrMlGSq4Jw/yjmfDOAGAF8HQLkZZDgEphsZl0zkWZWlyV9Lg3LuY6WY9Zi5ZH6EMWYy/wOpLbx7zsg+4Qi8ob3y0bdQHGzuu2IcvjFtsO5xTaGoTBu4TGQsRrdOyxfJal8j6XmU5CNJY3bD9CGY9x0xZkvyD7pwbHFSr80Yi1ooKvOhLdt1HNc9Vav9AwAAIABJREFU+wVaOvx4etlevLb2YLi8WF1Lh+odulxD0yVHT2O2dGfEVKn3XhqV/fk0ZJqNpTlxO4TwmJwphot8jxPP33w6BppMolokqwlrFOEr9UXvAk9SNU6XTuiPwpyI1ve570zDzvsvxmmhBakWbqcQFihHFReGt2eS5SCmHpsx5gRwCYDrIabKKAVwb0pblWE4BabWmIUG7bh8zGK83FUN7fC4HCjwOMP7Ul3N1MJhPEBLE6G8/1NdnkVqjkNghqYo6dlIZmu2VNbj8n+sxOLbZmGkbNCT+OpQHQ6ebAkHxRhRVlGLnvluDOudH3NfOT96aT2umDgAl5u4RqeIceP0cxcmvylmkZzvJbOhlOHc7WAoynHh09tmobgoB03t/nBJuJe+Px3fef7LJFzb3AKlodUfrhLQI8+F5o4AWn0BlcZXquOph5mKKHr7tBqMm3Pf3QIgds4tt1OIWiwD9jCDJZOiXBdqW2Jr3iWBfMqQ+Opyyrll1nCU9MpH7wI3PttRhTfXV6q0b06HAKfDfEDTuAHdwvUwe2hUP7AruqM6Y+wCiBqySwF8CeB1ALdwzikiU4HDwXRDvON5TWOtti/9++dwCgx7H7o0PPm3aCSDtILqxjYcqWvTDKXOdIz6UFpFqgfo1LVHmnydJgWzp5buxc9nj0hKdObHW0Rn7MU7qjQFs6ueXgUApgSza0I5p8ofuSyuNizeXoXF26tSL5jBeKJNYi7TpCGZMgf1EDUiyslU0iDIozVnjVKX5UkEgTG8Z6IodJNszJI0G0aCkhz5e/ar17/C3hjCm97Cdcex2GXKYkW0ukLJpQFkjpNZnBTJtFVG09ONM4aiMMeFr08eGLV9ye3n6hyhZnpJT8wJaXElradegI3kQuJ2CnjwLA/uWKEdxVsY0vi5HCyj5iYjjdlvAbwG4HbOea3BflmPkY9ZsvEHORrbfOFJocUmdTQvemIFalt8cU+ydieWsOzW0JilepCWhiqHwMIDjxbymqsHa1rCgtSuY40Y3U8tVMmpb/WhsrYF4wZEmwwcCo1MVybWX6hpykxNU0zTLdeFZ741JSlpMABRoyVpTGLxVtmhqOLmWpYEINo5vyYUOdzqC5jydxt/b3Si7aeWGudYu+CvyzW33xmqi2lELMFMU2MW86yZxR0XjQ5rU5XRrnLcTgHXaBRLP6WPOkF2PGimpEFE4J4xrCf65OmnVpHMl1JAh5w/f+M09C6wpxbNyPl/Nuf8ORLKYuNyMPj0Cq92Il2GntahoqYl7PzfonCwNZNUMRWYHbwzEsOSTGrn/xiHdL45ksbMwaL8L4DoHELyCVByBl+6swoX/W0Fnli8WzOHk8Q3n/0Cl/1d7eMjTZ4Z5vOcMIY+ZjZ0/geASyb0D+dpygv5cClzOukhadp65LnwwvdOx4/PVdcD1UMulAHAngcv0dxP/lzKXTK0ojKVKAWhWDS0xWdRkEceFpkRzAJBcM5T7r5gFbNG9cH5Y8QULLpzXJKQLzIlS4AUWa5k6tAeuOmMoXjsG2Jeth460ce5IcFMy6R93bTBmD0muX6WyYLCtZJAntuhUpkn9JoqDvr7DZOx98FL8K9vT4naXl7THNGYya577b9Wm0qqSJgnVj9qOf+neoiWgqMcgqDSmMlND3KfHcnULgljTy7Zg/MUJW7k7DzWqLldMqNKgt5N/1mL2974ynTbNxysRW2z9mBrhnQ618e6ltavdnMCv3rKINw6ZyR+dt4IU/tL/TvvO9PgHd23U38PY0xzwmzWMC/e/f5WbK6sS/xiSWDioG5R70wsYdYdXpRFjuliLmYAIrUz9XJ1JoNrpgzCdJmWd1RIo19Zq60NczkE3H/VePQL5SBbeddsbPrjhar9pByOWkFydoYEsyRQ4HGiUcfXq7PpMpwOISqrMiAW85VWL62+yHXXlZNyM+lws+ky0jcjS8+UU8OU2axT7N4X4HhowQ7sqYr45DS1+1V1N+taOvDL/20Mf1cKJ5LGTDJlfr7nRMzi1PJzXf3P1bjh39Gldtv9AZz3l1Isi5H4MxjkMR3Ck4kY+GG0g15SafvMzm6ngFvnjIopZPTKd+PcUX3CEbzSguPSCWoTUDzcOGOIaptWkXQAWLFHP2dZOuinSDQqRT/rIfnzKaNPuxr3XjEO158+GOef2jf2zglyzdSBUe/NFRMH4MYZQ/Dz2eYWFPkep6bpWdKYmRHMrpw0IKwxthoSzJJAQY5TPdgkcel8QmFyOnCiBY3toulQylWlB+cc+46nbzLrihhNtJEEs/JUJumLylROuNuOaDs1f7GvBvNW7MdzsrxTADD67oXo8Aex/3gT2nwBzFuxP6pMTpAD7288jJK581Hb3BExZSbwN0qyq1IbV93QjgMnmvH7UDScHs+t3K+Ze0rJm+sOYU+VtsZPzr0fbkPJ3PlYY1Bv1kjE0lIgZKpJq+wPF+DF708PO1tL0XBDe+VH1To0yz+/JWr5r52qTufSpCOY9UljHUUt+neLTMqLb5sVU5gNa8v9QdtpSpNJ38IcPHLNaXEHDykz6iv50TnDwp/diiCVHJcDD319gum0HnpIPmZmgkuevH4yVt41u1PXSxaWCWaMscGMsWWMse2MsW2MsV+FtvdkjC1mjO0J/Z94/G2ayHc7wzmjlMSXLkP77b54fPSqtbqxLXy9BVuOGZ7zvY2Hcf7jy/H5nuPmGyKjZO58/LPU2MFWjtV5nJJNrIlWL49ZKpUmkp9NvtupirrTKyfy6MKduueraW7H7MeX4863N6v+2kCQ47+rywEA+080h01dz5Tuw06DyDatKGV56ih5AmTpkTlS36Z6ft4uq8SEexchEOSqEj963PnOZlxgQoB7IfR3XT9vjebvsR5l3ZJMMa9sXwSZ/6KE38DpW0L53A0OJe0s0UiF0qKj1f2yXDv5rBZ/vW5izIk/XiSN2Sl98jUjjpVoacwyue+Tybb7LsKy33gN97n9wtGYMFAMLkok+awZpMLsyc7Zl2qs1Jj5IUZ8jgVwBoCfMcbGApgLYAnnfCSAJaHvtqYgx6laBSZUkklne2GOC3dfdmr4++6qRtPO9psrxQK/chOWWSRz1Z8X7jJ/TNeSy8ATMGWm+hZIavmiXPWKXi9tixE1TaLP1+p9NaoSX0HOw9qTIOdRv7+rSI1Q0xTR7GrlszrYENkmT4DcJjOn7jvehFF3f4LHFomC5B8/2IrGNj9aOvy6g/fhulY89/l+zd86i5G29NpQFJrkYA/Yz8csXiJRt5FtymfqrovVWdeVLhtGyUg/NKh/aZY+hR4crVeXVOoM3tF9cPdlp+Ktn5xlan9Jy9PhD2aonjR15HucMRO6yqNwUyXQ9sx346t7LsCtczKrWJFlghnn/CjnfEPocyPEagIDIRZNl2pyvgjgKmtaaJ4Cj1Pl2xMpyWQe6Zi/fXOSynwgyCYIKfppQBwrRr2BY/W+EzjRpB2dl4i5yszqOtMw0n5FEsymLypTCvgoylH7VCgLCJvhxZDmSGBQPSgVNS3hydof4FECl/xv9AeCmPrAZ+HvWoLZw19qT6TyiCmBMXT4g3h62b6ofYJBwKWjofr+f9fhgfk7cLS+NalpPGJpS3NcDtwwfQjy3AoBOYPVJtKzLhfAlf6Tbo3Eq0o/K62yPBJbD8fOIRaLHnn6aQ5i+YYpOb2kB175wQyM6VeEH54zHD1NJiLV1JjZyL/Q7uilwkg23fPcutptu5J4BdskwhgrATAZwFoAxZzzo6GfjgHQ1EEyxm4BcAsAFBcXo7S0NOXtbGpq0rzO8WMdaG7zRf22p0LUaK1avRpFbnMPxeXDBby5K4DC2t1wCAylpRHz0/4KtYas2OOD0dqztLQUhw+LQteO3XvwvU27cc1INwpk7bl5YTOK8xgenaWuF9YhG5D17q/ynpQuXwGPwWo506g+3obm5qDu33+iVRyUt2zbgR71osm3qqoNwaD+MZ1FMmVu2fAlyhXP1hXFdRg62YOnNuqnwlDyVlklAMDv60DFwYNRv130txUY0V2cgL7csBF7ayOT0MFDkTqGC5dE54tavmIlCtwMFQ0BuAWG/gX6a8A168rCn1eviWSgLy0tRSAgCm2ln6/EiePR0ZzS/a2qE82id760At8dF5lU3/lkKXrlitdtaOcocEcvcOQs/GwZGACPM/J7W2sbqqqqDPux6lg72tv94X1aW9tQdcz4GCV644oV9HW2YyeA7ZvKcGKPeO8K/RzTih1YXyX2xYF9sV0bytavw+H8UP1EAUh2poXtm8o0t4/qIeBkW+yo324ehvp2cXwb6m6C//BWlJqLYQmz66i4GF+z9ksUht7DPXv2oLSjPL4TxYGdnpXOsmLFcjQ2ilGXZRvKULsvsQTYXemeSFgumDHGCgC8A+BWznmDfMXBOeeMMc1lK+d8HoB5ADBt2jTu9XpT3tbS0lJoXefLtp3gFfujfitfdQDYsR1nz5xpegXm9QJ/1vmtck0FsGNr1LaJIwZjY3W5wfm8KG3YBlSU46s6D/ZWN2HQwIG4/8LxAEKmyoULUNXC4fV6EQxyvLC6HNedPhgFnpB5dvGi8Lm0CN+ThfMBAGfNPFuVWyuTef1QGZrQDK93lubv1Q1twPIlOGXkKHhnDAUAvHt0Iw7UH9W9Z50mdK8vPv9cUWMX+u4QGObMPg9zADy1cX7cp83NycGgQf2A8ugAgWOtAoAgThk9Fk2H6oD94u9DhgwGDogmxJ8vbYk6Zuzk0zGwRy5uvnshgFB2/4XabTp1/GnAWlEga+k2FIC4IPF6vXAuWwQE/Dh9xhlY07wHOBwRBqX7K3y+GGjvwOeH/bj3+pnAp6KQ2FA0DNfMHIZj9W044+El+PUFo/DL80dib3WT2G9YGz7XTz5rgdspYPcDkdxbnrVL0a+4J7zeSbr3rLRhG9ZXV4bbkrNmKfr16wWvd6LuMapz6IwrVnDm2QFsP9KAyYrSOhfNEf1NAWD06FGqsUjJWWecgSG9xMXekgnNcaXxueOi0eGSTct+48WD83fgsx1VAICx/YvQp9CDqy+ahjtXfBJ1nENg+PSuSzDrz8uA1hbVeSV65rvRt9CD+lAQyrjRI+GdOUx3fz3ath4FNm3AlKnTRC3d0s8watRIeM8siftcZrHTs5IofVZ9huON7fB6vSjauhJoqMfUKVMxMcHM/F3hniixVDBjjLkgCmWvcs7fDW2uYoz155wfZYz1B2AcQ28DnA4x8z/nXKXKTpbuSMs0EE/WYknLIj+NMpPzsl3V+NPH27HveBMe/PoEBBJIAdHVLJmxzFlOWWRW5JjUcs2UQXhnQ2XYjLrijvPQEQhgWO9Ilu0LxhZj8faquM57uK5VFbUJRKLoNlTUYfW+SEoDI0v37MeXY6Ks0LCRiVFeUPqhBdFBClIwgC8Q1H225BGx8iSlko/LkXpxVb5kZzUuHt9PN7JTM3lpjBfYIbAu5VfpcTpUQpkSyQQ1qrgAu3V8V+Wm0KG9IgEAbocQM73Ez84bgSG+Q5h+xpkoLsrBUzdMxoMLtmPS4B64ctIA3ZqNUrukIfiTX52D3gUe1Ld2YM5fI30u+itG3EByEyxu7QhFZAeCmRqLaw0f/fzscEmsW2YNx89f24iSXvHVy+3qWCaYMVGC+Q+AHZzzv8p++hDAdwE8Evr/AwuaFxfOcDZ0HnZ6TfaLqmUjHxhHzpWDJ8UVpNyBWunU29Ammkul1B+J+It1NR8zzmP5mIX8rxT3MpXG3Me+cRoeunp8+LukmZAz76apGPbbBUm97vOrooW2WFnYN4UCTwDg5hfW6e6nF6X37obK8H31BYK65cfkt17epsbQ8ywJdwKDqXQbb6w7iNNLeppy5HcITO1f2HUs+ZpI413/brm6gpmef2qOK7ZgBgCFbobiIlF4ynU78MBVE2IeIyVelm6/U2DoU+hBH4XPWZsvGOXfJCVQjRfpPsjf/S7e9UmhX7eccATs104bgK+dlvqat5mGlVGZMwHcBGA2Y+yr0L9LIQpkFzDG9gCYE/puaxwaL6hEsgbp4Yqw8//efDqG9Iy9yvhIEQElj5ZSOvX6/Dy0T2QlGC/SgPzW+kN4a/2hGHtnPtLqPZ1JJgWBxcwppNTcTh6S/AK+8fzNK3brp2v51evalQN+/eYmtIcErQ4/R6uOACcXjOQJc6sa2nHvh9vwy/+J5zfrbHzXO1tw8ZOfA4idIFpgrMtpifUY2D0XU4f2iFp8fvKrc/DUDZPD+1w1SZxku+uUM5IHDvzlWrW59/YLEo+ei2jMxP+NHL7lEYF6foexEMIL8q6dx4xIP1ZGZa7knDPO+Wmc80mhfws45zWc8/M55yM553M45+aT21iEpDWRCzLJflGnlfTEoltnhRPundKnQJWUT8nJ5o5wkWCJpTuqw+YopeAlTbQurcLcJpHOecfbm3GHiULBdifWHQivmuXpMmw4Sr/3fzNx4OFLk3rOeOsWdgZfIIjmdh2NmawZ8ja9tf4QXlhdjsN1oilzvYk8aNLzK50n1pztcjD4gsFwn9ux75PFqrmz8c5PzwrXMWzrCODU/kW4XFbT94GvT8CXvz8fvXSSxcoXFBMGdsOEgd1w/1UR7e8vzh9puj2Lbo32+xQUpkyjvjBTMD0WWu9+l1eXEmmBMv8nAcnXQKsIbzwlmWIxul9huGREjkuAy2l87in3L1Zt21PdhBv/LTo9y31zHl24Ewu3islqpRpwmw7FX7vOSMv25vpDKKuwvZytwigE3iEwMKaRHsIG4/MHP5sZ9Z0xhn/cOFln7/hJt2CmZ8qM1phFPsdbwBoQk+1KSAKdER6nAM6zy5wlBfe0yMrB/fkbp2FY73zkux3oW6hO4zM0ZG6Xa+ydDoaPfnE2bjpjaELt6FcUfR2pLJ4koOkNRd+fOQyPXxsJ6Eg0xYW8CgZ5mRHJxPKozK5AxNcg9Q7g//zWFKzeV4O+RTm69TnNIp9MnimN5I2STJk/fXVD3Oc0EszuDGnQyh+5LGr7exsrseNoI3536alah0VR19KB8poWTIozgqeytgU9893qnFMxiKUAYYzBJQhpTTBrlomDu2PBL8+J0g587bQB2HK4Hs8ujy8ha698t0r7qldaJxV0BIK6pkz5I9feSWFx+oNLor7HmrIl01y7PwiXQ7BN36cSqT5ri6zMzXXTBuO6aerySxJv/fhMbD/agD9+uC28zSV0Ti+g5xsm9Zneu3vP5WMBiHVAF2w5hkSVZ3LfYuW1CaIzkMYsCTg1TJlhkvym9irwhE0HsUyZRjzw8XZNDR8A3agnMyTil3bbG5swb4U5QeHGf6/FVU+vivsaZz+6DN99/svYO6rgMbvQ5WDqkkwJXCkVjB1QhNH9osvL/PaS2AKwkismqR10y080J9yuePEFeJQgAACLtqnLkSmLsqcayTQn1x52dWuWVCy6zUT9QYm+RTnwjo4ugq2VqDYe5OPfw1dPwNs/ORMAMCpUTklekUGLuy4eg3NG9sZ5oxMrzh3lW5wNEjmRNkgwSwLSyskXTK+fUWcEqOdWHtCNoHR3IkFsvIJZrCLsSrYfFcOsE7m/68rN1VpUEmuidTqEKLNwVxykR/ZV1w7cn07BzB9UFSL+8ctlqm3JNq/GCnCIaMzEdnRhF7MwUnFvPdOyWQpyOmewkZzvzxnZGzdMH4JpJT0BiGbVl38wHYN7qqOV5QztlY+XfzAjZrFyPZwhjd8rX1SEt3V1oZxIDySYJYFwPhstH7MUvqh6BavNcs8H22LvFCfxlHHyBYKYeN+nCV0nHvmvM0KymUNdDiFKKAfsozFLhNljojUI35tZgm+ePhg/OfcUfOfMoVhy+7nIl2kjChOc2AAxmagZjtS3qkypgNqPsrOmTCV6AQcSHkkwk+ViS6ZfqR0pCglUP0ggKav82cpLMH+YnC9+Oxv//s60qG35Huf/t3fnUW5UZ9rAn1dL73a73e3d7b0dr2BM4wVDaGMCBmPMFmICATKEbVgmA98kLJOQBEg8mZmE4UzOMJ7ABJKAww5hGfYmIdhmX43BbWywDcbGC7i9dLda9/ujqqSSVGptJZWk+/zO8bFUklpXt0pVr+7yXhzZMsjx+Y9fcWTO72mxWsyeXbutHH+LkYcYmLkgmsuqsPPmgzl2BbzY8YXj9u5eFZkIkKlk3aNO4rv/7PcfeH1zn4sdZ9Iyl20C0Mfe/gzPrt2W3sy8mASzpX2anjehKXJ7ZEM1rl88FX6f4OrjJ+FnS6Zh/KA6DDYHXlf4fZg0LLE1LZn47qt0f1z85BHnHxH7e/LbYrY3xTi6SFemeeyW+r5PR8Dvw8ZlizKaQWm5zjaO1I31C4fVV6dcLNtu3CD3Epk6LdRe7kE5FQYDMxc45THLZhHzTNnHWHx79ijX/m6oN4zfPJ96PTwAuP/DbrxkC/B2OrRqJH2fuIjJfpG98p63cMXdb8T8vVc3Rmd0hjNoBUs3YF73+R6MufqxyGzUS+8yJj+kOtkGzZUfYpTo+fmfF02O6cpO1l1uHXuVcYHWOXNH48iWJqeXGM+P+3vppmSJf9oPFjq3tL2y0Z1ZvyceZCwGnyz5raXCqcWsRPd9IQSSHE+nzhyB681B+fnkRpoMS7b5z4hSYWDmAsd8NqZsp2Knw37RvOnkadi4bFEkz1kuQmGVMIj61hfW48V1X2D69U/i9P96CYDRmvDnj3rw7d9G1xw8J4MB9vH1tcchvcHMG57G5l3GqgWvfxIdI5ZOYNbTa+SXSrd17QUzCerDb/a1NHyigF9ixiKVwjijK5Mk8vzekeNwxmHNaBlsLO+U7EJmBSSVQV/M5104bSgaaqJLhZ10cOykAX9cK0MmLawAIjm0mhucxw89kWVLLwA88PeHR24PqDEGuKeaeVqp4RizfPjVGTPw3Sy6RjOVbpLhdNiXyeN+JzcxMHNBwLZmmqUQXRr2i6ZEcvfk/r7dveGEpVaWPbEWZ9+2Gnu6QpFEnV90djm9PMaYqx/D//5tg+NaiaG4rsx5y57DyvU7Ep639csDAGJbJZIFWz9++F3c88om7OjsQst1T+COlzbGPLevIK3S7BI5EBeUpjqXV8QP/kfxN5hdsaAlIW2JpTLgx7WLjC4npzVagWg3VFcoHHvMKaDRtobrv58Rm909/u8l5H9LoaHWCJgaM1gn9pkrnRegt5s3oREzbWtEWmv3fbwj+WLYQDQw02lWZilzo/vU4vSjhfue3MDAzAVWK0D8ouBAYS7Qlx89IXLbukgu/86hWf+9j3ekN9suWWC2K64788bH3necFBA/YB4AXuxIXLrn9FtXAogd2J2sd/LOlR/jB/e/jV8/8yEA4OG3Po0Jxi7+w2vOLwRQZV5kD2Q42yzgl9g8ZiX063n6iHqMH1SLSUP7xQRNVstCsq5Mq7t3z4FQzE8QBcSkRYh/ffzFLH5ZsFT6m8lNfSJYeljyvFl2TUmy0Fu+1dqMP35vTsy2CWaLYf8kSwtZ7HnMgLKckOu6Z648Ck/9Y+pgudjZx5jpMLaQCocJZl3glGiwUBfn+FaPb88ajV8/82FkkdhMVQZ8kQXPU9m+x3k82SFxM+V6wyqmy+rRtz/FmMZabN+TGNj1NZ7L3r2aavbnH1Z9AsDIUm4fx/T0ms+xrzuE825/BTecPC0mx5f1rPjALFVwHfD5ijaPWSqPXDYPYWWU116jVgCVzZicoyYOwjNXHuU4EH9ofTU+/yq633vDYVQFfehfFcQ2h+MhnpXctCsUxrLTDsLUEfX40UPv9vmabNLKVAR8uOuC2SmHBliD/2O7/ktl73vDCnpLnWOLmQfloPLDwMwFgb6WZCrwN/WKBRNw6fzx2PrVgci2Db84AWOveTyt19dWBrBpZ+qlaMZe8xguOWp82uV66M0tkduX3fVG0udt2rUvaXoLe4tZb1ihsyuE5S+sx4NvbsFd35vjmLeoX2UgofvylY278PLGnbjxsTX4/fmzI9utgMzeZQog5U6s8McGZqX061lE4DQx0hdpMUt9AMf0ZJq37RffX552EA6EerF5QwdqBw+OWeqrp1fh7euPgwjQct0TKd+rrtJowbJWAfjOnNGYOLgO31q+KulrnGbPpVLh90XyYvX5PHNZtO4MW/6o9AVsKxeUUis5FT92Zbog4JAuw6vvqYgg4PdFlh7qVxnIaAJCqmzZFqWAlR/FjgfrazbeNQ+8k9bfffjNT5OmybC3ZIWVwk8feQ+3PNeBTTv348hfPo9/uvethNd8+uX+hJl/VndvfMBmJStVcY8lG2dlEQFWfVR6a4D2xaqjQBrL5sR2ZSYe+Wcc1oxz5o7B4cMDaOoXOzasf3UQFQEfgn4fbjplWsJr41nHp30Gb6pWvWyW/kk2ezDheZEfZWZXJi/QRem788a4/jc5xozyhYGZC5zSZVi8ymtTZa4jV1/jPEbmtJkjHbc31hoXTpHERKPx3vgk2vLRMrgON39rRh/PTt+GJBnlD9hasp5a83lCwtF7X9vsWMbbX9wQs+3GR9cAcAjMzIt9WMXOSh2VIoP46g1GULZ2q7UqQZ9PLwnWsZxWa5PtA9enGJNljfeaNLQfbjplGi6dH211PWv2aJw5q++0L1ZamENHRVuznAZ0f2PKkJjH7714Ln5//qw+/7ZduvnVIj/KbC1mvDgXn+sXT0062SVbMbMyXf3LpDsGZi6wcjrZu8C8vjjXVARw48nTcJc5qPnCr4+LefxrQ+vw8nULEl5nJQ7tVxnAtOH9036/yqAv66VN4g3pnzg+7tG3P8VXB6LLN/3ooXfTntF3W1xgZgV0qzfsxCpbq58VmO3v7o0ZZzeuKb2klPYJCflMk1IIVgtQqtZCIHpRuuobE3HQyL4Xl28yZ1N+ub8HZ80eHRmjla454xqxcdkijGqMBstO+aT+55xW3Hfx3MjEmMPGDMQRE5pw5qxm3H/J3JTvk+5i99ZqUb5oAAAd40lEQVT4tWi6FF6ideE8xqy0v/dUHBiYuWC4OUDYyrcFRLt0vLw+nz1ndOQCdu0JsQtXhxUwuF9VQguF1aLRvzqIKxa04NSZI9J6r8qAPyHZaLacTniX3fUGdu2LXVdz1770k9na7bb9naXLV+HFdV9gzadfRboy9/f0YuHNf408Z0iKiRQXHWUEvVb3Xzlcmq3Zkul06Vk/QuIXqXZiHV/JEhHHf19uPTv17OL43FTHmq1lrWMG4qpjo4loRQS/OPUgHDo69dixYWlOnrECM3u6FF6a9eCPyWNWDt96KhYMzFzQUBNEfXWwoIs658paPDw+BrJaNOoqAwj4fZgzrhEAMKR/3ykHAj5xrZUo2bI6HZ/vibm/7avUs/jScfZtq3HCLX/Fc2u3AQD2xS2Mnap7bvZY40Kfbhb7UmCNl0zVpXffxXMz+hFiBWbprmm5cNpQTBvRd8tt/BCyW848JOXfta+U4VTudJf5iY4vNYPy8jkEKAXH1mRG5eQCBmYuEBGMGFCNz7+MzoQs5hP09Yun4PwjjCzb1kWpsbYCa29YiJYhRvqIT3cbMzOtVjArf1Qybi51kiww2xsXMO2Oa0HL1eZdxmfu2BabXNdK0ZCMPy7BcDHv+3RZwejoRudu3IcunYervjERrWMGZvR5rcH7ydYsnDzMCMKabAlkH72874Wn7V2Z/aoCaQVVPz9lOn5x6vSE7T9ZPAXnzB2d8vUWa2JBD8eYaUdEcIXZVV4O33kqHkyX4ZLGuoqEwehAcZ2k77pgNhprK2Nyd5108Aj8YdUn+NNFc1EV9OOoiYNw2JgGnH+E0T1nddXELz4dz83A7Gfm4PxUujPMGg8YgUF8i1gqTmPe7Jzy2BXRbs/KEROacOvZh2LBZOfuyRnNAzCj2RhPFlkXNo0PLSJ49PIjknYVnj17FA5pHoCpw/tndbHL9Tg8L8NlgaKD/5lgVkfWxJNCrI1M+mCLmUsG1lZgx153utby5fDxTTFBGQDMGjsQG5ctiuSdqq8O4t6LD8fCaUMBRGd3plpv0rogXhQ3ycBLKy6ck7DNvo5jOi6dPx7jB/WdEDM6K7d8BoCLCBZOG5pWctafnzods8YOTDtx6LQR9WhMko1fRDBtRD1EJKvlc9KZrOAm6/16YoJyXp51YY1vTJXwmigTDMxc0lhbiZ2dDi1mJX6SHtdkXGw3plimyTpBLTpoWN7LlK6xDrMpU40Xi1/m5/hpqT+PdXG2r2RQTC2l+TajeQDuuWhuxjMs88HqCk3HvPFG3r3TDnVOHZMOEUHAJ7Y8ZrxA68QXaS039n+pz8am4sDAzCX9qgLY290bWay7XE7QVg6vU5PkPbNYJ6hMF6XOl5XXHI1BdZU4ecZwDOoXbZ3pX9137338EjzVaSTctVrM/mHFmwA43sRLvzlrZtrPHdVYg43LFuGwNDL89yUYt/IDr8368EcCM48LQmWFgZlLgnGzsyylfpL2+QRrb1iIG5f0nZXdajEbkGFXYb4MqquEzye4eekhmGW78FakaNWJX2M0nYHkbo6vo8xZgfCkof1STlLJh/hF7Ekf1nnPGsbAMwG5gYGZS6x8T9YXtJxaTaqC/oTxPi1x44nqzJmL4wfV4dmrjooJhiyLDx6eUzlOOSR1TjUrka49/5Y9e32FQ/oHeyvZwNrYwDKYRtAVH5iV0a6nNAT9vuj33uOyUGFZ58UwW8zIRQzMXBIZBBz3y7kcf0FNHd4fNy+dgSsWtGDecCMga7SlNxg/qC5hzcQfLpyEYxxm+NWmuTYnAPz7Nw+O3K6vDuIv/zQfFx01DifPiAZ8154wOWHpFXsWd6fZpc0Do4FZQ23iWo6pOK0nWY77nZzt7Qrh+bXbI/e57/Vh/c7j4H9yE9NluCQ+ZUI5fk1HDazBJzv34dHLj4CIYOrwelzQYSx31FQbO8sufhLn9BH1kVmrJx08HJt37cPrn+xGdYU/IT9ZMvZWuzFNtRjVWINrjjdWNNiwYx8WJ5l4cPXxk3D3y58AMAaqP/7OVtywZCoG1FSgqa4SIwZU4+v/+jyA2Fmb6a6tx65Mb40bVIvh9VW4btHk1E/Og65QGFt270dXqLesWsopNX/C4H8vS0PlgoGZSwJxS7NEczuVzzf1vkvmYt3nnTGf6fDhATz9cSiSXsMStl2h/nzZEZg+sh4PvbElss3K/B4/uP68w8fgdy9tTHjv7x/TEnM/vkfy4UvnJS13fXUQk4b2w9qtezBlWD3W/Oy4mFa0zq5Q5HZDTRD3XDQXA2vTH6sUn6KhXCZ+FJO3rj826a+dqqAfL12TuO5roVkLmZfTd5765uPgf8oDdmW6xBr83xM/+N+LwuTJ4H5VmDehKWbb2Ho/Ni5bhOaBNTHb7dUwfWQ9gOj6oT4xAjAAmDg4Nq/a5GGx9wGju/P7x0yM2ebUfdiX6SOMMuzY25WwQHWFbTxa/6qgmZMrsRzJ2FvMrFm55bTfi0F9dRD1NYUf2J+JUK9iUK6ZSB4z63vPLz65gC1mLrEChWgGcL1P0E4XKHsr4jdbm/HN1mbs6OzCLc+uwx0rPwYQXd7IzqkFIsO4DP984hTUVQVw7JShCY/Z14PMJqmpPTDr7g1rvuf11cMR4NqxlgMLMyAnF3naYiYit4vINhF517ZtoIg8LSLrzP8bvCxjuuIXM7bo+gvK6TzltK2xrhI/taXicEo0P8rWGnfn380CkHmLWX11ENcvnuqYl0xE0FRXicPGZHeo2bsyI/msNN3vOvr5Kcaam71h3X+O6cfniz3vl3pCcSoOXreY/Q7AfwK407btagDPKqWWicjV5v0felC2jERbzMpnIetcWL8g77pgdmSbVSVOp66zZo/CkS2DIutfThnWHz9ePAW79/Xg0NHRgMma/eT2gPu//mB+ZPmpTPljAjOl/b7XjfWjzArKdf0xpiPrh2Q4xZJ1RJnwtMVMKfUXADvjNi8BcId5+w4AJxe0UFmKPzlbdB0IfK45hmyKbYkc1cdKvzedMh0Lpw2NtD7VVvoxZ1wjFk4bGpO5v7c3P4FZdYU/631lb72LXJxdKRWVAuuYDfWq8pyOTUlZXZmbd+8HwKCc3OF1i5mTIUqpz8zbWwEM8bIw6dIhXUYmzmhtxhmtsetORlvMkp+9rBNdsiDJ6jIophQVftsYte4Qx5jpJppcmt1ZurF6Bn700LspnkmUvmIMzCKUUkpEHK9zInIhgAsBYMiQIWhvb897eTo7O5O+z5rtRsqFl199DbvW+/HxRmNB80KUy0t91Um8UKfRmjRKtid9zfvbjHr86svdjs9Zaz6+/8sdRVO33bakwi++tAo7d3YjHO4tmvIVi0yOlVLywVbjmFy5+mX0hELYvHkT2tu3pf36cq2XXJRKnfx1fXfM/ffXvI/63evy9n6lUi+FVI51UoyB2eciMkwp9ZmIDAPgeIZTSi0HsBwAWltbVVtbW94L1t7ejmTvU9HxBfDaakw/eAbmjGvE690fAOs7kj6/XPRVJ06+fWLfj6sPtgGvv4KGAQ1oa5uT8PjXwwqVgz/CWXNGebIuopPuUBh4+gkAwCGtrXj88/exb9vOst/3mcr0WCkV3e9tBd58DYfMPBSBV1ehubkZbW1T0n59udZLLkqlTnb024wH1r0VuT95ymS0zUi9dFy2SqVeCqkc66QY85g9AuBc8/a5AB72sCxp88d1ZQIcb5ANKy9QskmXPp/gkrbxRROUAbGzMrtDHGOmm2BcVybp49SZRhBmzejWdUwxucvrdBl3A1gJ4GsisllEzgewDMA3RGQdgGPM+0XPGmeyz1xeiKfo3JTSOB177jNenPUTSZXTG2aCWc2ICEYMqI4ZzkCUK69nZZ6plBqmlAoqpUYqpW5TSu1QSi1QSrUopY5RSsXP2ixKVpLSC+58FYCRLqN0QoviYQ2mLbUfntcvNrquosvyeFkaKiSrtfz0W1diX08vv/eaCfoFPWwpJxcVY1dmSXKaJchm7cxZDQ6+Equ7yWZakFA4zDxmmgnasiJz3+sn4PchxFUfyEUMzFzSeSAUc585wLNj1VuJxWWxuaxIK/GL2JfasUu5CfgEPWwpJxcxMHOJtVB3v6roRFd+RzNXqi1m1hhDY1keBmc6CTqtI0baCPp9CYnFiXJRjOkySlJNRQBHtjShs8toOWOXRnassfOlFZbZWswiSUZJF/HDGDiEQS9BvzAwI1fxp56LAj6J6cri+Tlz0cH/pVV5fp99Zp7HhaGCCvpL61gldwX8vmhXJn+SkQsYmLnI3qTNa3N2RjZUAwBaxzSkeGZxsS7OkRYznp+1EYhLusddrxe2mJHb2JXpovixBvz1lLmpw+vx3FVHYUxjrddFyYjfZyUZZYuZbqor/F4XgTwU8EXP+/xBRm5gYOaigF8iLSa8OGdv3KA6r4uQMc7K1FdjbUXsBl6ctRK0dWUSuYFdmS4K+HyxF2aeoLURsHVlclamXgKclam1oF8iS/HxlE9u4BnFRRUBQXdkjBkvzjrxc1YmmTiEQS8MzMltPKJcZLSYmWPMuCSTVoLmGLNezsrU0uVHT/C6COSRoC1dCseYkRsYmLko4Ge6DF3542Zlkl4OGTXA6yKQR2oqOfmD3MXAzEUVfp+tK5N0EozMymQnto7sK1XwB5lehvavst3jzqfcMTBzkX1WJsCxJjqxJ5gFeHHWTXz2f9LH0Ppqr4tAZYaBmYsCPh96wwrhsILiQCOtxCzJxF2vHb+9xczDclDhDelfGbnNH2TkBgZmLqoIGNXZE2ariW58PoFPonnMuOv14mOLmbYGVFekfhJRBhiYuajCnDbdHeLMPB0FfD4O/teUnzPztFVfHYzc5q4nNzAwc5E1O2dvVy8Afkl1E/ALesNh5rDTkI/RmLb6V3MBHXIXAzMX1VUaX9DOrhAvzRry+4RLs2gqpsWMP8m00q/K1mLGAJ1cwMDMRbUVRmC2tysEgF9S3QT9xuQPdmPrx8/vurY4I5fcxsDMRXVV0cCMF2f9+H2CECd+aMlnO5Ny3xNRLhiYuSi2K1OxQ0MzAZ+x8gNjcv2w1YQAjismdzAwc1GtGZjt7Ta6Mvkt1UtPbxj3vrYZ+7t7ues1w65MInILAzMX1VQYszI37dzPrkwNfdHZDQBY89lXHpeECs3nY4JZnVnnfsbn5AYGZi6qNBPM/urpD7FjbzdP0ESaYIuZ3qzAjMgNDMxcVBmIfjl37+v2sCREVEgxY8wYpGmnxpyRz11PbmBg5iJrSSbAGAjOdBn6Yi4rvXBJJr2xxYzcxMDMRfZfzRxiRqQPLmKut2prjBn3PrmAgVme9IYVm7V1xn2vlYCfO1xnbDEjNzEwy5Oe3rDXRSCiAmmqq4zc5g8y/VhjzLpCPO9T7oo2MBORhSLygYh0iMjVXpcnU6FeJpjVGfe9fo6eNNjrIpBHrBaz/T0hj0tC5aAoAzMR8QP4DYDjAUwBcKaITPG2VJnpCXOUGZFOrDGmHGekHysw29fd63FJqBwUZWAGYBaADqXUR0qpbgArACzxuEwZ6QmFOStTY9zz+mEuM31VB42uzH1dDMwodwGvC5DECACbbPc3A5htf4KIXAjgQgAYMmQI2tvb816ozs7OtN/nyz170NOjClIuL2VSJzoJ9YZYL3HK/VjZueMAAGDjxg1ob9+S9uvKvV6yUWp1sn2ruerHhx1oD3+St/cptXophHKsk2INzFJSSi0HsBwAWltbVVtbW97fs729HSnf5/8eAwBUVtWgQvWkfn6JS6tOdGHuewAIBAKslzjlfqw8uPUNYOunGDt2LNraWtJ+XbnXSzZKrU7WBzbgz+vXYPSYMWhrm5i39ym1eimEcqyTYu3K3AKg2XZ/pLmt6P33dw4FAPSEw5ydRaQRdmXq6+w5o3DF0RNw0dfHe10UKgPFGpi9AqBFRMaKSAWApQAe8bhMaRnavwqAMSuT9MVLtH6ig/9JN5UBP6489muRRLNEuSjKrkylVEhELgPwJAA/gNuVUu95XKy0WCfnUFiBp2giffi5LBMRuaBYW8yglHpcKTVRKTVeKXWT1+VJV9BvVGmICWa1s+LCOV4XgTwUaTFjfEZEOSjawKxURVrMerkkk27mjGtEZYBfKV2xxYyI3MCriMuC5pp5PWG2mOnIWpKFl2j9RFvMuPeJKHsMzFwW02LmcVmIqHA4K5OI3MDAzGWRMWZckklrvEbrx+/nTiei3DEwc5l9nAkvzvrhGDN9WS1mSvFHGRFlj1cRlwV90SrlYsb6ueDIcV4XgTwSMH+UcUI2EeWCgZnL2J2hNx9n5mnLFwnMGJkRUfYYmLkswK5MrUW7szwuCBVcpMWMO5+IcsDAzGUBtphozZz7AbaZ6Mfv48QfIsodAzOXxQz+97Ac5A2rO4uNJvqxgvJerpNLRDlgYOYyEWGrmcasrswwIzPtsMWMiNzAwCwPmAFcX9a+56VZP011FQCA7Xu6PC4JEZUyBmZ5YCWZJf34Ii1mHheECm5kQw0AYNOufR6XhIhKGSOIPOBixvryc4yZtiYMqgMAHDZmoMclIaJSFvC6AOUoyFxm2rIG/7PFTD/1NUG8fO0CNNRWeF0UIiphDMzyIDrGzOOCUMFF8ph5XA7yxuD+VV4XgYhKHLsy8yBgzs5iYKafSB4zRmZERJQFBmZ5EGBXpraEmf+JiCgHDMzyYM+BkNdFII9E8ph5XA4iIipNDMzyYOfebgDApp37PS4JFZqfg/+JiCgHDMyIXBRdkomRGRERZY6BGZGLOCuTiIhywcCMyEWclUlERLlgYEbkIh9nZRIRUQ4YmBG5KDL43+NyEBFRaWJgRuQiH9fKJCKiHDAwy4Pfnz/L6yKQRyJ5zBiYERFRFhiY5YHVnUX6sfY94zIiIsoGA7M88HORTG352GJGREQ5YGCWB1wrU1/M/E9ERLnwJDATkW+KyHsiEhaR1rjHrhGRDhH5QESO86J8ufL7GO/qyspjxsz/RESUjYBH7/sugFMB/Ld9o4hMAbAUwFQAwwE8IyITlVK9hS9i9tiVqa+AGZQPqGJwTkREmfMkMFNKvQ8AkhjALAGwQinVBWCDiHQAmAVgZWFLmBsO/tfXpGH9cNn8Cajbu9nrohARUQkSL7tcRKQdwP9TSr1q3v9PAKuUUn8w798G4Aml1H0Or70QwIUAMGTIkENXrFiR9/J2dnairq4u5fO27Anjur/tBwD8bmFtvovlqXTrRDesl0SsE2esl0SsE2esl0SlWifz589/TSnV6vRY3lrMROQZAEMdHrpOKfVwrn9fKbUcwHIAaG1tVW1tbbn+yZTa29uRzvus394J/O0FAEjr+aUs3TrRDeslEevEGeslEevEGeslUTnWSd4CM6XUMVm8bAuAZtv9kea2ksIxZkRERJSNYhuh/AiApSJSKSJjAbQAeNnjMmWMY8yIiIgoG16lyzhFRDYDmAvgMRF5EgCUUu8BuAfAGgD/B+DSUpuRCTCPGREREWXHq1mZDwJ4MMljNwG4qbAlchdbzIiIiCgbxdaVWRY4xoyIiIiywcAsDwLM/E9ERERZYASRB36OMSMiIqIsMDDLA6srs8LP6iUiIqL0ebVWZlmrCvpwSdt4LD5ouNdFISIiohLCwCwPRAQ/XDjJ62IQERFRiWFfGxEREVGRYGBGREREVCQYmBEREREVCQZmREREREWCgRkRERFRkWBgRkRERFQkGJgRERERFQkGZkRERERFgoEZERERUZFgYEZERERUJBiYERERERUJBmZERERERYKBGREREVGREKWU12XImYhsB/BxAd6qCcAXBXifUsI6ccZ6ScQ6ccZ6ScQ6ccZ6SVSqdTJaKTXI6YGyCMwKRUReVUq1el2OYsI6ccZ6ScQ6ccZ6ScQ6ccZ6SVSOdcKuTCIiIqIiwcCMiIiIqEgwMMvMcq8LUIRYJ85YL4lYJ85YL4lYJ85YL4nKrk44xoyIiIioSLDFjIiIiKhIMDBLg4gsFJEPRKRDRK72ujyFIiLNIvK8iKwRkfdE5B/M7QNF5GkRWWf+32BuFxG5xaynt0VkprefIL9ExC8ib4jIo+b9sSKy2vz8fxKRCnN7pXm/w3x8jJflzhcRGSAi94nIWhF5X0Tm8lgBROQfze/PuyJyt4hU6XisiMjtIrJNRN61bcv4+BCRc83nrxORc734LG5JUif/an6H3haRB0VkgO2xa8w6+UBEjrNtL6trlFO92B67SkSUiDSZ98vvWFFK8V8f/wD4AawHMA5ABYC3AEzxulwF+uzDAMw0b/cD8CGAKQB+CeBqc/vVAP7FvH0CgCcACIA5AFZ7/RnyXD9XArgLwKPm/XsALDVv3wrgEvP23wO41by9FMCfvC57nurjDgDfM29XABig+7ECYASADQCqbcfIeToeKwC+DmAmgHdt2zI6PgAMBPCR+X+DebvB68/mcp0cCyBg3v4XW51MMa8/lQDGmtclfzleo5zqxdzeDOBJGHlLm8r1WGGLWWqzAHQopT5SSnUDWAFgicdlKgil1GdKqdfN23sAvA/jQrMExkUY5v8nm7eXALhTGVYBGCAiwwpc7IIQkZEAFgH4rXlfABwN4D7zKfH1YtXXfQAWmM8vGyJSD+NkehsAKKW6lVK7wWMFAAIAqkUkAKAGwGfQ8FhRSv0FwM64zZkeH8cBeFoptVMptQvA0wAW5r/0+eFUJ0qpp5RSIfPuKgAjzdtLAKxQSnUppTYA6IBxfSq7a1SSYwUAfg3gBwDsg+PL7lhhYJbaCACbbPc3m9u0YnapHAJgNYAhSqnPzIe2Ahhi3taprm6GcYIIm/cbAey2nVDtnz1SL+bjX5rPLydjAWwH8L9m9+5vRaQWmh8rSqktAP4NwCcwArIvAbwGvY8Vu0yPDy2OG5u/g9EaBGheJyKyBMAWpdRbcQ+VXb0wMKOURKQOwP0Avq+U+sr+mDLajLWa2isiJwLYppR6zeuyFJEAjK6H/1JKHQJgL4yuqQhNj5UGGL/oxwIYDqAWJfKrvdB0PD76IiLXAQgB+KPXZfGaiNQAuBbAj70uSyEwMEttC4x+bctIc5sWRCQIIyj7o1LqAXPz51a3k/n/NnO7LnU1D8BJIrIRRrfB0QD+A0YTesB8jv2zR+rFfLwewI5CFrgANgPYrJRabd6/D0agpvuxcgyADUqp7UqpHgAPwDh+dD5W7DI9PrQ4bkTkPAAnAjjLDFgBvetkPIwfN2+Z592RAF4XkaEow3phYJbaKwBazFlUFTAG5D7icZkKwhzbchuA95VSv7I99AgAa4bLuQAetm0/x5wlMwfAl7ZuirKhlLpGKTVSKTUGxvHwnFLqLADPAzjdfFp8vVj1dbr5/LJqGVBKbQWwSUS+Zm5aAGANND9WYHRhzhGRGvP7ZNWLtsdKnEyPjycBHCsiDWZr5LHmtrIhIgthDJM4SSm1z/bQIwCWmjN3xwJoAfAyNLhGKaXeUUoNVkqNMc+7m2FMTNuKcjxWvJ59UAr/YMz6+BDGzJfrvC5PAT/3ETC6Ft4G8Kb57wQYY16eBbAOwDMABprPFwC/MevpHQCtXn+GAtRRG6KzMsfBOFF2ALgXQKW5vcq832E+Ps7rcuepLmYAeNU8Xh6CMRNK+2MFwE8BrAXwLoDfw5hVp92xAuBuGOPsemBcWM/P5viAMe6qw/z3Xa8/Vx7qpAPG2CjrnHur7fnXmXXyAYDjbdvL6hrlVC9xj29EdFZm2R0rzPxPREREVCTYlUlERERUJBiYERERERUJBmZERERERYKBGREREVGRYGBGREREVCQCqZ9CRFQeRMRKzwAAQwH0wlhKCgD2KaUO96RgREQmpssgIi2JyE8AdCql/s3rshARWdiVSUQEQEQ6zf/bROQFEXlYRD4SkWUicpaIvCwi74jIePN5g0TkfhF5xfw3z9tPQETlgIEZEVGigwFcDGAygO8AmKiUmgXgtwAuN5/zHwB+rZQ6DMBp5mNERDnhGDMiokSvKHPtThFZD+Apc/s7AOabt48BMMVYAhMA0F9E6pRSnQUtKRGVFQZmRESJumy3w7b7YUTPmz4Ac5RSBwpZMCIqb+zKJCLKzlOIdmtCRGZ4WBYiKhMMzIiIsnMFgFYReVtE1sAYk0ZElBOmyyAiIiIqEmwxIyIiIioSDMyIiIiIigQDMyIiIqIiwcCMiIiIqEgwMCMiIiIqEgzMiIiIiIoEAzMiIiKiIsHAjIiIiKhI/H/mTzJZlehnHAAAAABJRU5ErkJggg==
# Chart as in the screencast. First should have 5 distinctive 'peaks'
# Now that we have the time series, let's split it so we can start forecasting
# In[8]:
split_time = 1100
time_train = time[:split_time]
x_train = series[:split_time]
time_valid = time[split_time:]
x_valid = series[split_time:]
plt.figure(figsize=(10, 6))
plot_series(time_train, x_train)
plt.show()
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plt.show()
# EXPECTED OUTPUT
# Chart WITH 4 PEAKS between 50 and 65 and 3 troughs between -12 and 0
# Chart with 2 Peaks, first at slightly above 60, last at a little more than that, should also have a single trough at about 0
# # Naive Forecast
# In[9]:
naive_forecast = series[split_time - 1: -1]
# In[10]:
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, naive_forecast)
# Expected output: Chart similar to above, but with forecast overlay
# Let's zoom in on the start of the validation period:
# In[11]:
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid, start=0, end=150)
plot_series(time_valid, naive_forecast, start=1, end=151)
# EXPECTED - Chart with X-Axis from 1100-1250 and Y Axes with series value and projections. Projections should be time stepped 1 unit 'after' series
# Now let's compute the mean squared error and the mean absolute error between the forecasts and the predictions in the validation period:
# In[12]:
print(keras.metrics.mean_squared_error(x_valid, naive_forecast).numpy())
print(keras.metrics.mean_absolute_error(x_valid, naive_forecast).numpy())
# Expected Output
# 19.578304
# 2.6011968
# That's our baseline, now let's try a moving average:
# In[13]:
def moving_average_forecast(series, window_size):
"""Forecasts the mean of the last few values.
If window_size=1, then this is equivalent to naive forecast"""
forecast = []
for time in range(len(series) - window_size):
forecast.append(series[time:time + window_size].mean())
return np.array(forecast)
# In[14]:
moving_avg = moving_average_forecast(series, 30)[split_time - 30:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, moving_avg)
# EXPECTED OUTPUT
# CHart with time series from 1100->1450+ on X
# Time series plotted
# Moving average plotted over it
# In[15]:
print(keras.metrics.mean_squared_error(x_valid, moving_avg).numpy())
print(keras.metrics.mean_absolute_error(x_valid, moving_avg).numpy())
# EXPECTED OUTPUT
# 65.786224
# 4.3040023
# In[17]:
diff_series = (series[365:]- series[:-365])
diff_time = time[365:]
plt.figure(figsize=(10, 6))
plot_series(diff_time, diff_series)
plt.show()
# EXPECETED OUTPUT: CHart with diffs
# Great, the trend and seasonality seem to be gone, so now we can use the moving average:
# In[18]:
diff_moving_avg = moving_average_forecast(diff_series, 50)[split_time - 365 - 50:]
plt.figure(figsize=(10, 6))
plot_series(time_valid, diff_series[split_time - 365:])
plot_series(time_valid, diff_moving_avg)
plt.show()
# Expected output. Diff chart from 1100->1450 +
# Overlaid with moving average
# Now let's bring back the trend and seasonality by adding the past values from t – 365:
# In[19]:
diff_moving_avg_plus_past = series[split_time - 365:-365] + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, diff_moving_avg_plus_past)
plt.show()
# Expected output: Chart from 1100->1450+ on X. Same chart as earlier for time series, but projection overlaid looks close in value to it
# In[20]:
print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_past).numpy())
print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_past).numpy())
# EXPECTED OUTPUT
# 8.498155
# 2.327179
# Better than naive forecast, good. However the forecasts look a bit too random, because we're just adding past values, which were noisy. Let's use a moving averaging on past values to remove some of the noise:
# In[21]:
diff_moving_avg_plus_smooth_past = moving_average_forecast(series[split_time - 370:-360], 10) + diff_moving_avg
plt.figure(figsize=(10, 6))
plot_series(time_valid, x_valid)
plot_series(time_valid, diff_moving_avg_plus_smooth_past)
plt.show()
# EXPECTED OUTPUT:
# Similar chart to above, but the overlaid projections are much smoother
# In[23]:
print(keras.metrics.mean_squared_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())
print(keras.metrics.mean_absolute_error(x_valid, diff_moving_avg_plus_smooth_past).numpy())
# EXPECTED OUTPUT
# 12.527958
# 2.2034433
# In[ ]:
# Now click the 'Submit Assignment' button above.
# Once that is complete, please run the following two cells to save your work and close the notebook
# In[ ]:
get_ipython().run_cell_magic('javascript', '', '<!-- Save the notebook -->\nIPython.notebook.save_checkpoint();')
# In[ ]:
get_ipython().run_cell_magic('javascript', '', 'IPython.notebook.session.delete();\nwindow.onbeforeunload = null\nsetTimeout(function() { window.close(); }, 1000);')
| [
"tensorflow.keras.metrics.mean_absolute_error",
"numpy.arange",
"numpy.cos",
"matplotlib.pyplot.plot",
"matplotlib.pyplot.ylabel",
"matplotlib.pyplot.grid",
"numpy.exp",
"matplotlib.pyplot.xlabel",
"numpy.array",
"matplotlib.pyplot.show",
"tensorflow.keras.metrics.mean_squared_error",
"numpy.random.RandomState",
"matplotlib.pyplot.figure"
] | 4. Sequences, Time Series and Prediction/Week 1/Exercise_1_Create_and_predict_synthetic_data_Question-FINAL.py | [(50, 'numpy.arange', 'np.arange', (['(4 * 365 + 1)'], {'dtype': '"""float32"""'}), True, 'import numpy as np\n'), (63, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (65, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (81, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (83, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (85, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (87, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (105, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (117, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (155, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (181, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (183, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (195, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (198, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (211, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (214, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (235, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(10, 6)'}), True, 'import matplotlib.pyplot as plt\n'), (238, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (27, 'matplotlib.pyplot.plot', 'plt.plot', (['time[start:end]', 'series[start:end]', 'format'], {}), True, 'import matplotlib.pyplot as plt\n'), (28, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Time"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (29, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Value"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (30, 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), True, 'import matplotlib.pyplot as plt\n'), (47, 'numpy.random.RandomState', 'np.random.RandomState', (['seed'], {}), True, 'import numpy as np\n'), (147, 'numpy.array', 'np.array', (['forecast'], {}), True, 'import numpy as np\n'), (38, 'numpy.cos', 'np.cos', (['(season_time * 7 * np.pi)'], {}), True, 'import numpy as np\n'), (39, 'numpy.exp', 'np.exp', (['(5 * season_time)'], {}), True, 'import numpy as np\n'), (129, 'tensorflow.keras.metrics.mean_squared_error', 'keras.metrics.mean_squared_error', (['x_valid', 'naive_forecast'], {}), False, 'from tensorflow import keras\n'), (130, 'tensorflow.keras.metrics.mean_absolute_error', 'keras.metrics.mean_absolute_error', (['x_valid', 'naive_forecast'], {}), False, 'from tensorflow import keras\n'), (168, 'tensorflow.keras.metrics.mean_squared_error', 'keras.metrics.mean_squared_error', (['x_valid', 'moving_avg'], {}), False, 'from tensorflow import keras\n'), (169, 'tensorflow.keras.metrics.mean_absolute_error', 'keras.metrics.mean_absolute_error', (['x_valid', 'moving_avg'], {}), False, 'from tensorflow import keras\n'), (221, 'tensorflow.keras.metrics.mean_squared_error', 'keras.metrics.mean_squared_error', (['x_valid', 'diff_moving_avg_plus_past'], {}), False, 'from tensorflow import keras\n'), (222, 'tensorflow.keras.metrics.mean_absolute_error', 'keras.metrics.mean_absolute_error', (['x_valid', 'diff_moving_avg_plus_past'], {}), False, 'from tensorflow import keras\n'), (247, 'tensorflow.keras.metrics.mean_squared_error', 'keras.metrics.mean_squared_error', (['x_valid', 'diff_moving_avg_plus_smooth_past'], {}), False, 'from tensorflow import keras\n'), (248, 'tensorflow.keras.metrics.mean_absolute_error', 'keras.metrics.mean_absolute_error', (['x_valid', 'diff_moving_avg_plus_smooth_past'], {}), False, 'from tensorflow import keras\n')] |
gabrielmahia/obamAI | ba45f0a6efae793d7f5e356a1dbf5c6835a65dba | """Build, train and evaluate an IIC Model
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.keras.layers import Input, Dense, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.callbacks import LearningRateScheduler
from tensorflow.keras.utils import plot_model
from tensorflow.keras import backend as K
from tensorflow.keras.datasets import mnist
import numpy as np
import os
import argparse
import vgg
from data_generator import DataGenerator
from utils import unsupervised_labels, center_crop
from utils import AccuracyCallback, lr_schedule
class IIC:
def __init__(self,
args,
backbone):
"""Contains the encoder model, the loss function,
loading of datasets, train and evaluation routines
to implement IIC unsupervised clustering via mutual
information maximization
Arguments:
args : Command line arguments to indicate choice
of batch size, number of heads, folder to save
weights file, weights file name, etc
backbone (Model): IIC Encoder backbone (eg VGG)
"""
self.args = args
self.backbone = backbone
self._model = None
self.train_gen = DataGenerator(args, siamese=True)
self.n_labels = self.train_gen.n_labels
self.build_model()
self.load_eval_dataset()
self.accuracy = 0
def build_model(self):
"""Build the n_heads of the IIC model
"""
inputs = Input(shape=self.train_gen.input_shape, name='x')
x = self.backbone(inputs)
x = Flatten()(x)
# number of output heads
outputs = []
for i in range(self.args.heads):
name = "z_head%d" % i
outputs.append(Dense(self.n_labels,
activation='softmax',
name=name)(x))
self._model = Model(inputs, outputs, name='encoder')
optimizer = Adam(lr=1e-3)
self._model.compile(optimizer=optimizer, loss=self.mi_loss)
self._model.summary()
def mi_loss(self, y_true, y_pred):
"""Mutual information loss computed from the joint
distribution matrix and the marginals
Arguments:
y_true (tensor): Not used since this is
unsupervised learning
y_pred (tensor): stack of softmax predictions for
the Siamese latent vectors (Z and Zbar)
"""
size = self.args.batch_size
n_labels = y_pred.shape[-1]
# lower half is Z
Z = y_pred[0: size, :]
Z = K.expand_dims(Z, axis=2)
# upper half is Zbar
Zbar = y_pred[size: y_pred.shape[0], :]
Zbar = K.expand_dims(Zbar, axis=1)
# compute joint distribution (Eq 10.3.2 & .3)
P = K.batch_dot(Z, Zbar)
P = K.sum(P, axis=0)
# enforce symmetric joint distribution (Eq 10.3.4)
P = (P + K.transpose(P)) / 2.0
# normalization of total probability to 1.0
P = P / K.sum(P)
# marginal distributions (Eq 10.3.5 & .6)
Pi = K.expand_dims(K.sum(P, axis=1), axis=1)
Pj = K.expand_dims(K.sum(P, axis=0), axis=0)
Pi = K.repeat_elements(Pi, rep=n_labels, axis=1)
Pj = K.repeat_elements(Pj, rep=n_labels, axis=0)
P = K.clip(P, K.epsilon(), np.finfo(float).max)
Pi = K.clip(Pi, K.epsilon(), np.finfo(float).max)
Pj = K.clip(Pj, K.epsilon(), np.finfo(float).max)
# negative MI loss (Eq 10.3.7)
neg_mi = K.sum((P * (K.log(Pi) + K.log(Pj) - K.log(P))))
# each head contribute 1/n_heads to the total loss
return neg_mi/self.args.heads
def train(self):
"""Train function uses the data generator,
accuracy computation, and learning rate
scheduler callbacks
"""
accuracy = AccuracyCallback(self)
lr_scheduler = LearningRateScheduler(lr_schedule,
verbose=1)
callbacks = [accuracy, lr_scheduler]
self._model.fit(x=self.train_gen,
use_multiprocessing=False,
epochs=self.args.epochs,
callbacks=callbacks,
shuffle=True)
def load_eval_dataset(self):
"""Pre-load test data for evaluation
"""
(_, _), (x_test, self.y_test) = self.args.dataset.load_data()
image_size = x_test.shape[1]
x_test = np.reshape(x_test,[-1, image_size, image_size, 1])
x_test = x_test.astype('float32') / 255
x_eval = np.zeros([x_test.shape[0], *self.train_gen.input_shape])
for i in range(x_eval.shape[0]):
x_eval[i] = center_crop(x_test[i])
self.x_test = x_eval
def load_weights(self):
"""Reload model weights for evaluation
"""
if self.args.restore_weights is None:
raise ValueError("Must load model weights for evaluation")
if self.args.restore_weights:
folder = "weights"
os.makedirs(folder, exist_ok=True)
path = os.path.join(folder, self.args.restore_weights)
print("Loading weights... ", path)
self._model.load_weights(path)
def eval(self):
"""Evaluate the accuracy of the current model weights
"""
y_pred = self._model.predict(self.x_test)
print("")
# accuracy per head
for head in range(self.args.heads):
if self.args.heads == 1:
y_head = y_pred
else:
y_head = y_pred[head]
y_head = np.argmax(y_head, axis=1)
accuracy = unsupervised_labels(list(self.y_test),
list(y_head),
self.n_labels,
self.n_labels)
info = "Head %d accuracy: %0.2f%%"
if self.accuracy > 0:
info += ", Old best accuracy: %0.2f%%"
data = (head, accuracy, self.accuracy)
else:
data = (head, accuracy)
print(info % data)
# if accuracy improves during training,
# save the model weights on a file
if accuracy > self.accuracy \
and self.args.save_weights is not None:
self.accuracy = accuracy
folder = self.args.save_dir
os.makedirs(folder, exist_ok=True)
path = os.path.join(folder, self.args.save_weights)
print("Saving weights... ", path)
self._model.save_weights(path)
@property
def model(self):
return self._model
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='IIC Keras')
parser.add_argument('--save-dir',
default="weights",
help='Folder for storing model weights (h5)')
parser.add_argument('--save-weights',
default=None,
help='Folder for storing model weights (h5)')
parser.add_argument('--dataset',
default=mnist,
help='Dataset to use')
parser.add_argument('--epochs',
type=int,
default=1200,
metavar='N',
help='Number of epochs to train')
parser.add_argument('--batch-size',
type=int,
default=512,
metavar='N',
help='Train batch size')
parser.add_argument('--heads',
type=int,
default=1,
metavar='N',
help='Number of heads')
parser.add_argument('--train',
default=False,
action='store_true',
help='Train the model')
parser.add_argument('--restore-weights',
default=None,
help='Restore saved model weights')
parser.add_argument('--eval',
default=False,
action='store_true',
help='Evaluate a pre trained model. Must indicate weights file.')
parser.add_argument('--crop',
type=int,
default=4,
help='Pixels to crop from the image')
parser.add_argument('--plot-model',
default=False,
action='store_true',
help='Plot all network models')
args = parser.parse_args()
# build backbone
backbone = vgg.VGG(vgg.cfg['F'])
backbone.model.summary()
# instantiate IIC object
iic = IIC(args, backbone.model)
if args.plot_model:
plot_model(backbone.model,
to_file="model-vgg.png",
show_shapes=True)
plot_model(iic.model,
to_file="model-iic.png",
show_shapes=True)
if args.eval:
iic.load_weights()
iic.eval()
elif args.train:
iic.train()
| [
"tensorflow.keras.backend.repeat_elements",
"tensorflow.keras.backend.batch_dot",
"tensorflow.keras.models.Model",
"numpy.reshape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.backend.sum",
"tensorflow.keras.callbacks.LearningRateScheduler",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.backend.transpose",
"numpy.finfo",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.backend.expand_dims",
"numpy.argmax",
"tensorflow.keras.backend.log",
"tensorflow.keras.backend.epsilon",
"numpy.zeros",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
] | chapter13-mi-unsupervised/iic-13.5.1.py | [(196, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""IIC Keras"""'}), False, 'import argparse\n'), (244, 'vgg.VGG', 'vgg.VGG', (["vgg.cfg['F']"], {}), False, 'import vgg\n'), (46, 'data_generator.DataGenerator', 'DataGenerator', (['args'], {'siamese': '(True)'}), False, 'from data_generator import DataGenerator\n'), (55, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.train_gen.input_shape', 'name': '"""x"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten\n'), (65, 'tensorflow.keras.models.Model', 'Model', (['inputs', 'outputs'], {'name': '"""encoder"""'}), False, 'from tensorflow.keras.models import Model\n'), (66, 'tensorflow.keras.optimizers.Adam', 'Adam', ([], {'lr': '(0.001)'}), False, 'from tensorflow.keras.optimizers import Adam\n'), (84, 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['Z'], {'axis': '(2)'}), True, 'from tensorflow.keras import backend as K\n'), (87, 'tensorflow.keras.backend.expand_dims', 'K.expand_dims', (['Zbar'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (89, 'tensorflow.keras.backend.batch_dot', 'K.batch_dot', (['Z', 'Zbar'], {}), True, 'from tensorflow.keras import backend as K\n'), (90, 'tensorflow.keras.backend.sum', 'K.sum', (['P'], {'axis': '(0)'}), True, 'from tensorflow.keras import backend as K\n'), (98, 'tensorflow.keras.backend.repeat_elements', 'K.repeat_elements', (['Pi'], {'rep': 'n_labels', 'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (99, 'tensorflow.keras.backend.repeat_elements', 'K.repeat_elements', (['Pj'], {'rep': 'n_labels', 'axis': '(0)'}), True, 'from tensorflow.keras import backend as K\n'), (114, 'utils.AccuracyCallback', 'AccuracyCallback', (['self'], {}), False, 'from utils import AccuracyCallback, lr_schedule\n'), (115, 'tensorflow.keras.callbacks.LearningRateScheduler', 'LearningRateScheduler', (['lr_schedule'], {'verbose': '(1)'}), False, 'from tensorflow.keras.callbacks import LearningRateScheduler\n'), (130, 'numpy.reshape', 'np.reshape', (['x_test', '[-1, image_size, image_size, 1]'], {}), True, 'import numpy as np\n'), (132, 'numpy.zeros', 'np.zeros', (['[x_test.shape[0], *self.train_gen.input_shape]'], {}), True, 'import numpy as np\n'), (249, 'tensorflow.keras.utils.plot_model', 'plot_model', (['backbone.model'], {'to_file': '"""model-vgg.png"""', 'show_shapes': '(True)'}), False, 'from tensorflow.keras.utils import plot_model\n'), (252, 'tensorflow.keras.utils.plot_model', 'plot_model', (['iic.model'], {'to_file': '"""model-iic.png"""', 'show_shapes': '(True)'}), False, 'from tensorflow.keras.utils import plot_model\n'), (57, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten\n'), (94, 'tensorflow.keras.backend.sum', 'K.sum', (['P'], {}), True, 'from tensorflow.keras import backend as K\n'), (96, 'tensorflow.keras.backend.sum', 'K.sum', (['P'], {'axis': '(1)'}), True, 'from tensorflow.keras import backend as K\n'), (97, 'tensorflow.keras.backend.sum', 'K.sum', (['P'], {'axis': '(0)'}), True, 'from tensorflow.keras import backend as K\n'), (100, 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (101, 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (102, 'tensorflow.keras.backend.epsilon', 'K.epsilon', ([], {}), True, 'from tensorflow.keras import backend as K\n'), (134, 'utils.center_crop', 'center_crop', (['x_test[i]'], {}), False, 'from utils import unsupervised_labels, center_crop\n'), (147, 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), False, 'import os\n'), (148, 'os.path.join', 'os.path.join', (['folder', 'self.args.restore_weights'], {}), False, 'import os\n'), (164, 'numpy.argmax', 'np.argmax', (['y_head'], {'axis': '(1)'}), True, 'import numpy as np\n'), (92, 'tensorflow.keras.backend.transpose', 'K.transpose', (['P'], {}), True, 'from tensorflow.keras import backend as K\n'), (100, 'numpy.finfo', 'np.finfo', (['float'], {}), True, 'import numpy as np\n'), (101, 'numpy.finfo', 'np.finfo', (['float'], {}), True, 'import numpy as np\n'), (102, 'numpy.finfo', 'np.finfo', (['float'], {}), True, 'import numpy as np\n'), (183, 'os.makedirs', 'os.makedirs', (['folder'], {'exist_ok': '(True)'}), False, 'import os\n'), (184, 'os.path.join', 'os.path.join', (['folder', 'self.args.save_weights'], {}), False, 'import os\n'), (62, 'tensorflow.keras.layers.Dense', 'Dense', (['self.n_labels'], {'activation': '"""softmax"""', 'name': 'name'}), False, 'from tensorflow.keras.layers import Input, Dense, Flatten\n'), (104, 'tensorflow.keras.backend.log', 'K.log', (['P'], {}), True, 'from tensorflow.keras import backend as K\n'), (104, 'tensorflow.keras.backend.log', 'K.log', (['Pi'], {}), True, 'from tensorflow.keras import backend as K\n'), (104, 'tensorflow.keras.backend.log', 'K.log', (['Pj'], {}), True, 'from tensorflow.keras import backend as K\n')] |
ryoma-jp/samples | 85c0be62f9de1194121d225adee12c9810229960 | #! -*- coding: utf-8 -*-
#---------------------------------
# モジュールのインポート
#---------------------------------
import os
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.preprocessing.image import ImageDataGenerator
#---------------------------------
# クラス; 学習モジュール基底クラス
#---------------------------------
class Trainer():
# --- コンストラクタ ---
def __init__(self, output_dir=None, model_file=None, optimizer='adam'):
# --- 出力ディレクトリ作成 ---
self.output_dir = output_dir
if (output_dir is not None):
os.makedirs(output_dir, exist_ok=True)
# --- モデル構築 ---
def _load_model(model_file):
if (model_file is not None):
return None
else:
return None
self.model = _load_model(model_file)
if (self.model is not None):
self._compile_model(optimizer)
return
# --- モデルの構成 ---
# * lr_decay: 学習率減衰する(=True),しない(=False; default)を指定
def _compile_model(self, optimizer='adam'):
if (optimizer == 'adam'):
opt = tf.keras.optimizers.Adam()
elif (optimizer == 'sgd'):
opt = tf.keras.optimizers.SGD()
elif (optimizer == 'adam_lrs'):
# --- parameters ---
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay
# but, initial learning rate is default of Adam()
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.001,
decay_steps=1000,
decay_rate=0.90,
staircase=True)
opt = tf.keras.optimizers.Adam(learning_rate=lr_schedule)
elif (optimizer == 'sgd_lrs'):
# --- parameters ---
# https://www.tensorflow.org/api_docs/python/tf/keras/optimizers/schedules/ExponentialDecay
# but, initial learning rate is default of Adam()
lr_schedule = tf.keras.optimizers.schedules.ExponentialDecay(
0.01,
decay_steps=1000,
decay_rate=0.9,
staircase=True)
opt = tf.keras.optimizers.SGD(learning_rate=lr_schedule)
else:
print('[ERROR] Unknown optimizer: {}'.format(optimizer))
quit()
self.model.compile(
optimizer=opt,
loss = 'sparse_categorical_crossentropy',
metrics=['accuracy'])
return
# --- 学習 ---
def fit(self, x_train, y_train, x_val=None, y_val=None, x_test=None, y_test=None,
da_enable=False,
batch_size=32, epochs=1000000):
# --- 学習 ---
os.makedirs(os.path.join(self.output_dir, 'checkpoints'), exist_ok=True)
checkpoint_path = os.path.join(self.output_dir, 'checkpoints', 'model.ckpt')
cp_callback = keras.callbacks.ModelCheckpoint(checkpoint_path, save_weights_only=True, verbose=1)
es_callback = keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0, patience=5, verbose=1, mode='auto')
if (da_enable):
# --- no tuning ---
datagen = ImageDataGenerator(
rotation_range=10,
width_shift_range=0.2,
height_shift_range=0.2,
horizontal_flip=True)
else:
datagen = ImageDataGenerator()
datagen.fit(x_train)
if ((x_val is not None) and (y_val is not None)):
history = self.model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train)/batch_size, validation_data=(x_val, y_val),
epochs=epochs, callbacks=[cp_callback, es_callback])
else:
history = self.model.fit(datagen.flow(x_train, y_train, batch_size=batch_size),
steps_per_epoch=len(x_train)/batch_size, validation_split=0.2,
epochs=epochs, callbacks=[cp_callback, es_callback])
# --- 学習結果を評価 ---
if ((x_test is not None) and (y_test is not None)):
test_loss, test_acc = self.model.evaluate(x_test, y_test, verbose=2)
print('Test Accuracy: {}'.format(test_acc))
print('Test Loss: {}'.format(test_loss))
# --- メトリクスを保存 ---
metrics = history.history
os.makedirs(os.path.join(self.output_dir, 'metrics'), exist_ok=True)
df_metrics = pd.DataFrame(metrics)
df_metrics.to_csv(os.path.join(self.output_dir, 'metrics', 'metrics.csv'), index_label='epoch')
epoch = df_metrics.index.values
for column in df_metrics.columns:
plt.figure()
plt.plot(epoch, df_metrics[column])
plt.xlabel(column)
plt.ylabel('epoch')
plt.grid(True)
plt.tight_layout()
graph_name = os.path.join(self.output_dir, 'metrics', '{}.png'.format(column))
plt.savefig(graph_name)
plt.close()
return
# --- 推論 ---
def predict(self, x_test):
predictions = self.model.predict(x_test)
return predictions
# --- モデル保存 ---
def save_model(self):
# --- 保存先ディレクトリ作成 ---
model_dir = os.path.join(self.output_dir, 'models')
os.makedirs(os.path.join(model_dir, 'checkpoint'), exist_ok=True)
os.makedirs(os.path.join(model_dir, 'saved_model'), exist_ok=True)
os.makedirs(os.path.join(model_dir, 'hdf5'), exist_ok=True)
# --- checkpoint ---
self.model.save_weights(os.path.join(model_dir, 'checkpoint', 'model.ckpt'))
# --- saved_model ---
self.model.save(os.path.join(model_dir, 'saved_model'))
# --- hdf5 ---
self.model.save(os.path.join(model_dir, 'hdf5', 'model.h5'))
return
# --- ラベルインデックス取得 ---
def GetLabelIndex(self, label, onehot=True):
if (onehot):
label = np.argmax(label, axis=1)
n_category = max(label)+1
return np.array([np.arange(len(label))[label==i] for i in range(n_category)])
#---------------------------------
# クラス; ResNet学習モジュール
#---------------------------------
class TrainerResNet(Trainer):
# --- コンストラクタ ---
def __init__(self, input_shape, classes, output_dir=None, model_type='custom', optimizer='adam'):
# --- Residual Block ---
# * アプリケーションからkeras.applications.resnet.ResNetにアクセスできない為,
# 必要なモジュールをTensorFlow公式からコピー
# https://github.com/tensorflow/tensorflow/blob/v2.5.0/tensorflow/python/keras/applications/resnet.py#L212
def block1(x, filters, kernel_size=3, stride=1, conv_shortcut=True, name=None):
bn_axis = 3
if conv_shortcut:
shortcut = keras.layers.Conv2D(4 * filters, 1, strides=stride, name=name + '_0_conv')(x)
shortcut = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_0_bn')(shortcut)
else:
shortcut = x
x = keras.layers.Conv2D(filters, 1, strides=stride, name=name + '_1_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_1_bn')(x)
x = keras.layers.Activation('relu', name=name + '_1_relu')(x)
x = keras.layers.Conv2D(filters, kernel_size, padding='SAME', name=name + '_2_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_2_bn')(x)
x = keras.layers.Activation('relu', name=name + '_2_relu')(x)
x = keras.layers.Conv2D(4 * filters, 1, name=name + '_3_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name=name + '_3_bn')(x)
x = keras.layers.Add(name=name + '_add')([shortcut, x])
x = keras.layers.Activation('relu', name=name + '_out')(x)
return x
# --- Residual Block stack ---
# * アプリケーションからkeras.applications.resnet.ResNetにアクセスできない為,
# 必要なモジュールをTensorFlow公式からコピー
# https://github.com/tensorflow/tensorflow/blob/v2.5.0/tensorflow/python/keras/applications/resnet.py#L257
def stack1(x, filters, blocks, stride1=2, name=None):
x = block1(x, filters, stride=stride1, name=name + '_block1')
for i in range(2, blocks + 1):
x = block1(x, filters, conv_shortcut=False, name=name + '_block' + str(i))
return x
# --- モデル構築 ---
# * stack_fn()の関数ポインタを引数に設定してカスタマイズ
def _load_model(input_shape, classes, stack_fn):
input = keras.layers.Input(shape=input_shape)
bn_axis = 3
x = keras.layers.ZeroPadding2D(padding=((3, 3), (3, 3)), name='conv1_pad')(input)
x = keras.layers.Conv2D(64, 7, strides=2, use_bias=True, name='conv1_conv')(x)
x = keras.layers.BatchNormalization(axis=bn_axis, epsilon=1.001e-5, name='conv1_bn')(x)
x = keras.layers.Activation('relu', name='conv1_relu')(x)
x = keras.layers.ZeroPadding2D(padding=((1, 1), (1, 1)), name='pool1_pad')(x)
x = keras.layers.MaxPooling2D(3, strides=2, name='pool1_pool')(x)
x = stack_fn(x)
x = keras.layers.GlobalAveragePooling2D(name='avg_pool')(x)
x = keras.layers.Dense(classes, activation='softmax', name='predictions')(x)
model = keras.models.Model(input, x)
model.summary()
return model
def _load_model_resnet50(input_shape, classes, dbg_mode=1):
# --- TensorFlowのResNet50のモデル ---
# https://www.tensorflow.org/api_docs/python/tf/keras/applications/resnet50/ResNet50
# dbg_mode=0: original ResNet50, dbg_mode=11: custom ResNet50
if (dbg_mode == 0):
print('[INFO] Load ResNet50 model from keras.applications')
model = keras.applications.resnet50.ResNet50()
elif (dbg_mode == 1):
def stack_fn(x):
x = stack1(x, 64, 3, stride1=1, name='conv2')
x = stack1(x, 128, 4, name='conv3')
x = stack1(x, 256, 6, name='conv4')
return stack1(x, 512, 3, name='conv5')
print('[INFO] Load ResNet50 model (custom implementation)')
model = _load_model(input_shape, classes, stack_fn)
return model
# --- 基底クラスの初期化 ---
super().__init__(output_dir)
# --- モデル構築 ---
if (model_type == 'custom'):
def stack_fn(x):
x = stack1(x, 32, 3, stride1=1, name='conv2')
return stack1(x, 64, 4, name='conv3')
self.model = _load_model(input_shape, classes, stack_fn)
self._compile_model(optimizer=optimizer)
elif (model_type == 'resnet50'):
self.model = _load_model_resnet50(input_shape, classes, dbg_mode=1)
self._compile_model(optimizer=optimizer)
else:
print('[ERROR] Unknown model_type: {}'.format(model_type))
return
if (self.output_dir is not None):
keras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)
return
#---------------------------------
# クラス; CNN学習モジュール
#---------------------------------
class TrainerCNN(Trainer):
# --- コンストラクタ ---
def __init__(self, input_shape, output_dir=None, optimizer='adam'):
# --- モデル構築 ---
def _load_model(input_shape):
model = keras.models.Sequential()
model.add(keras.layers.Conv2D(32, (3, 3), activation='relu', input_shape=input_shape))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(keras.layers.MaxPooling2D((2, 2)))
model.add(keras.layers.Flatten(input_shape=input_shape))
model.add(keras.layers.Dense(64, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()
return model
# --- 基底クラスの初期化 ---
super().__init__(output_dir)
# --- モデル構築 ---
self.model = _load_model(input_shape)
self._compile_model(optimizer=optimizer)
if (self.output_dir is not None):
keras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)
return
#---------------------------------
# クラス; MLP学習モジュール
#---------------------------------
class TrainerMLP(Trainer):
# --- コンストラクタ ---
def __init__(self, input_shape, output_dir=None, optimizer='adam'):
# --- モデル構築 ---
def _load_model(input_shape):
model = keras.models.Sequential()
model.add(keras.layers.Flatten(input_shape=input_shape))
model.add(keras.layers.Dense(128, activation='relu'))
model.add(keras.layers.Dense(10, activation='softmax'))
model.summary()
return model
# --- 基底クラスの初期化 ---
super().__init__(output_dir)
# --- モデル構築 ---
self.model = _load_model(input_shape)
self._compile_model(optimizer=optimizer)
if (self.output_dir is not None):
keras.utils.plot_model(self.model, os.path.join(self.output_dir, 'plot_model.png'), show_shapes=True)
return
#---------------------------------
# メイン処理; Trainerモジュールテスト
#---------------------------------
def main():
import argparse
def _argparse():
parser = argparse.ArgumentParser(description='Trainerモジュールテスト\n'
' * test_mode=\'ResNet\': ResNetのモデル構造確認(ResNet50の構造をTensorFlow公開モデルと比較)',
formatter_class=argparse.RawTextHelpFormatter)
# --- 引数を追加 ---
parser.add_argument('--test_mode', dest='test_mode', type=str, default='ResNet', required=False, \
help='テストモード(ResNet)')
args = parser.parse_args()
return args
# --- 引数処理 ---
args = _argparse()
print(args.test_mode)
# --- モジュールテスト ---
if (args.test_mode == 'ResNet'):
trainer = TrainerResNet([224, 224, 3], 1000, output_dir=None, model_type='resnet50')
else:
print('[ERROR] Unknown test_mode: {}'.format(args.test_mode))
return
if __name__ == '__main__':
main()
| [
"pandas.DataFrame",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.optimizers.SGD",
"tensorflow.keras.applications.resnet50.ResNet50",
"matplotlib.pyplot.tight_layout",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.layers.Conv2D",
"numpy.argmax",
"matplotlib.pyplot.close",
"tensorflow.keras.layers.Add",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten",
"matplotlib.pyplot.figure",
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.savefig",
"matplotlib.pyplot.ylabel",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"matplotlib.pyplot.grid",
"matplotlib.pyplot.xlabel",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Input"
] | python/tensorflow_sample/Ver2.x/06_optimizer/trainer/trainer.py | [(83, 'os.path.join', 'os.path.join', (['self.output_dir', '"""checkpoints"""', '"""model.ckpt"""'], {}), False, 'import os\n'), (84, 'tensorflow.keras.callbacks.ModelCheckpoint', 'keras.callbacks.ModelCheckpoint', (['checkpoint_path'], {'save_weights_only': '(True)', 'verbose': '(1)'}), False, 'from tensorflow import keras\n'), (85, 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(5)', 'verbose': '(1)', 'mode': '"""auto"""'}), False, 'from tensorflow import keras\n'), (116, 'pandas.DataFrame', 'pd.DataFrame', (['metrics'], {}), True, 'import pandas as pd\n'), (143, 'os.path.join', 'os.path.join', (['self.output_dir', '"""models"""'], {}), False, 'import os\n'), (349, 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'description': '"""Trainerモジュールテスト\n * test_mode=\'ResNet\': ResNetのモデル構造確認(ResNet50の構造をTensorFlow公開モデルと比較)"""', 'formatter_class': 'argparse.RawTextHelpFormatter'}), False, 'import argparse\n'), (24, 'os.makedirs', 'os.makedirs', (['output_dir'], {'exist_ok': '(True)'}), False, 'import os\n'), (43, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n'), (82, 'os.path.join', 'os.path.join', (['self.output_dir', '"""checkpoints"""'], {}), False, 'import os\n'), (89, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'rotation_range': '(10)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'horizontal_flip': '(True)'}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (95, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (115, 'os.path.join', 'os.path.join', (['self.output_dir', '"""metrics"""'], {}), False, 'import os\n'), (117, 'os.path.join', 'os.path.join', (['self.output_dir', '"""metrics"""', '"""metrics.csv"""'], {}), False, 'import os\n'), (121, 'matplotlib.pyplot.figure', 'plt.figure', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (122, 'matplotlib.pyplot.plot', 'plt.plot', (['epoch', 'df_metrics[column]'], {}), True, 'import matplotlib.pyplot as plt\n'), (123, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['column'], {}), True, 'import matplotlib.pyplot as plt\n'), (124, 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""epoch"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (125, 'matplotlib.pyplot.grid', 'plt.grid', (['(True)'], {}), True, 'import matplotlib.pyplot as plt\n'), (126, 'matplotlib.pyplot.tight_layout', 'plt.tight_layout', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (129, 'matplotlib.pyplot.savefig', 'plt.savefig', (['graph_name'], {}), True, 'import matplotlib.pyplot as plt\n'), (131, 'matplotlib.pyplot.close', 'plt.close', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (144, 'os.path.join', 'os.path.join', (['model_dir', '"""checkpoint"""'], {}), False, 'import os\n'), (145, 'os.path.join', 'os.path.join', (['model_dir', '"""saved_model"""'], {}), False, 'import os\n'), (146, 'os.path.join', 'os.path.join', (['model_dir', '"""hdf5"""'], {}), False, 'import os\n'), (149, 'os.path.join', 'os.path.join', (['model_dir', '"""checkpoint"""', '"""model.ckpt"""'], {}), False, 'import os\n'), (152, 'os.path.join', 'os.path.join', (['model_dir', '"""saved_model"""'], {}), False, 'import os\n'), (155, 'os.path.join', 'os.path.join', (['model_dir', '"""hdf5"""', '"""model.h5"""'], {}), False, 'import os\n'), (162, 'numpy.argmax', 'np.argmax', (['label'], {'axis': '(1)'}), True, 'import numpy as np\n'), (214, 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': 'input_shape'}), False, 'from tensorflow import keras\n'), (231, 'tensorflow.keras.models.Model', 'keras.models.Model', (['input', 'x'], {}), False, 'from tensorflow import keras\n'), (286, 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), False, 'from tensorflow import keras\n'), (321, 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), False, 'from tensorflow import keras\n'), (45, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {}), True, 'import tensorflow as tf\n'), (186, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['filters', '(1)'], {'strides': 'stride', 'name': "(name + '_1_conv')"}), False, 'from tensorflow import keras\n'), (187, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': 'bn_axis', 'epsilon': '(1.001e-05)', 'name': "(name + '_1_bn')"}), False, 'from tensorflow import keras\n'), (188, 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {'name': "(name + '_1_relu')"}), False, 'from tensorflow import keras\n'), (190, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['filters', 'kernel_size'], {'padding': '"""SAME"""', 'name': "(name + '_2_conv')"}), False, 'from tensorflow import keras\n'), (191, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': 'bn_axis', 'epsilon': '(1.001e-05)', 'name': "(name + '_2_bn')"}), False, 'from tensorflow import keras\n'), (192, 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {'name': "(name + '_2_relu')"}), False, 'from tensorflow import keras\n'), (194, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(4 * filters)', '(1)'], {'name': "(name + '_3_conv')"}), False, 'from tensorflow import keras\n'), (195, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': 'bn_axis', 'epsilon': '(1.001e-05)', 'name': "(name + '_3_bn')"}), False, 'from tensorflow import keras\n'), (197, 'tensorflow.keras.layers.Add', 'keras.layers.Add', ([], {'name': "(name + '_add')"}), False, 'from tensorflow import keras\n'), (198, 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {'name': "(name + '_out')"}), False, 'from tensorflow import keras\n'), (217, 'tensorflow.keras.layers.ZeroPadding2D', 'keras.layers.ZeroPadding2D', ([], {'padding': '((3, 3), (3, 3))', 'name': '"""conv1_pad"""'}), False, 'from tensorflow import keras\n'), (218, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(64)', '(7)'], {'strides': '(2)', 'use_bias': '(True)', 'name': '"""conv1_conv"""'}), False, 'from tensorflow import keras\n'), (220, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': 'bn_axis', 'epsilon': '(1.001e-05)', 'name': '"""conv1_bn"""'}), False, 'from tensorflow import keras\n'), (221, 'tensorflow.keras.layers.Activation', 'keras.layers.Activation', (['"""relu"""'], {'name': '"""conv1_relu"""'}), False, 'from tensorflow import keras\n'), (223, 'tensorflow.keras.layers.ZeroPadding2D', 'keras.layers.ZeroPadding2D', ([], {'padding': '((1, 1), (1, 1))', 'name': '"""pool1_pad"""'}), False, 'from tensorflow import keras\n'), (224, 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(3)'], {'strides': '(2)', 'name': '"""pool1_pool"""'}), False, 'from tensorflow import keras\n'), (228, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'keras.layers.GlobalAveragePooling2D', ([], {'name': '"""avg_pool"""'}), False, 'from tensorflow import keras\n'), (229, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['classes'], {'activation': '"""softmax"""', 'name': '"""predictions"""'}), False, 'from tensorflow import keras\n'), (242, 'tensorflow.keras.applications.resnet50.ResNet50', 'keras.applications.resnet50.ResNet50', ([], {}), False, 'from tensorflow import keras\n'), (274, 'os.path.join', 'os.path.join', (['self.output_dir', '"""plot_model.png"""'], {}), False, 'import os\n'), (287, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""', 'input_shape': 'input_shape'}), False, 'from tensorflow import keras\n'), (288, 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(2, 2)'], {}), False, 'from tensorflow import keras\n'), (289, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), False, 'from tensorflow import keras\n'), (290, 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(2, 2)'], {}), False, 'from tensorflow import keras\n'), (291, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""relu"""'}), False, 'from tensorflow import keras\n'), (292, 'tensorflow.keras.layers.MaxPooling2D', 'keras.layers.MaxPooling2D', (['(2, 2)'], {}), False, 'from tensorflow import keras\n'), (293, 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': 'input_shape'}), False, 'from tensorflow import keras\n'), (294, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(64)'], {'activation': '"""relu"""'}), False, 'from tensorflow import keras\n'), (295, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), False, 'from tensorflow import keras\n'), (308, 'os.path.join', 'os.path.join', (['self.output_dir', '"""plot_model.png"""'], {}), False, 'import os\n'), (322, 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {'input_shape': 'input_shape'}), False, 'from tensorflow import keras\n'), (323, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow import keras\n'), (324, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""'}), False, 'from tensorflow import keras\n'), (337, 'os.path.join', 'os.path.join', (['self.output_dir', '"""plot_model.png"""'], {}), False, 'import os\n'), (50, 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['(0.001)'], {'decay_steps': '(1000)', 'decay_rate': '(0.9)', 'staircase': '(True)'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': 'lr_schedule'}), True, 'import tensorflow as tf\n'), (181, 'tensorflow.keras.layers.Conv2D', 'keras.layers.Conv2D', (['(4 * filters)', '(1)'], {'strides': 'stride', 'name': "(name + '_0_conv')"}), False, 'from tensorflow import keras\n'), (182, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {'axis': 'bn_axis', 'epsilon': '(1.001e-05)', 'name': "(name + '_0_bn')"}), False, 'from tensorflow import keras\n'), (60, 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['(0.01)'], {'decay_steps': '(1000)', 'decay_rate': '(0.9)', 'staircase': '(True)'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': 'lr_schedule'}), True, 'import tensorflow as tf\n')] |
a5372935/Oct_resnet18 | 9e835634151398bb6704c251807d28b21fde5b86 | import numpy as np
import warnings
from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import add, Flatten
from tensorflow.keras.models import Model
from tensorflow.keras.preprocessing import image
import tensorflow.keras.backend as K
from keras_applications.imagenet_utils import _obtain_input_shape
from keras.engine.topology import get_source_inputs
def identity_block(input_tensor, kernel_size, filters, stage, block):
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), name=conv_name_base + '2a')(input_tensor) # kernel_initializer='he_normal' ???
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size,
padding='same', name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
x = add([x, input_tensor])
x = Activation('relu')(x)
return x
def conv_block(input_tensor, kernel_size, filters, stage, block, strides=(2, 2)):
filters1, filters2, filters3 = filters
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
conv_name_base = 'res' + str(stage) + block + '_branch'
bn_name_base = 'bn' + str(stage) + block + '_branch'
x = Conv2D(filters1, (1, 1), strides=strides,
name=conv_name_base + '2a')(input_tensor)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2a')(x)
x = Activation('relu')(x)
x = Conv2D(filters2, kernel_size, padding='same',
name=conv_name_base + '2b')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2b')(x)
x = Activation('relu')(x)
x = Conv2D(filters3, (1, 1), name=conv_name_base + '2c')(x)
x = BatchNormalization(axis=bn_axis, name=bn_name_base + '2c')(x)
shortcut = Conv2D(filters3, (1, 1), strides=strides,
name=conv_name_base + '1')(input_tensor)
shortcut = BatchNormalization(axis=bn_axis, name=bn_name_base + '1')(shortcut)
x = add([x, shortcut])
x = Activation('relu')(x)
return x
def ResNet50(include_top = False,
weights=None,
input_tensor=None, input_shape=None,
pooling=None,
classes=1000):
# if weights not in {'imagenet', None}:
# raise ValueError('The `weights` argument should be either '
# '`None` (random initialization) or `imagenet` '
# '(pre-training on ImageNet).')
# if weights == 'imagenet' and include_top and classes != 1000:
# raise ValueError('If using `weights` as imagenet with `include_top`'
# ' as true, `classes` should be 1000')
input_shape = _obtain_input_shape(input_shape,
default_size=224,
min_size=32,
data_format=K.image_data_format(),
require_flatten = include_top)
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
if not K.is_keras_tensor(input_tensor):
img_input = Input(tensor=input_tensor, shape=input_shape)
else:
img_input = input_tensor
if K.image_data_format() == 'channels_last':
bn_axis = 3
else:
bn_axis = 1
#x = ZeroPadding2D((3, 3))(img_input)
x = Conv2D(64, (7, 7), strides=(2, 2), name='conv1')(img_input)
x = BatchNormalization(axis=bn_axis, name='bn_conv1')(x)
x = Activation('relu')(x)
#x = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = conv_block(x, 3, [64, 64, 256], stage=2, block='a', strides=(1, 1))
x = identity_block(x, 3, [64, 64, 256], stage=2, block='b')
x = identity_block(x, 3, [64, 64, 256], stage=2, block='c')
x = conv_block(x, 3, [128, 128, 512], stage=3, block='a')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='b')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='c')
x = identity_block(x, 3, [128, 128, 512], stage=3, block='d')
x = conv_block(x, 3, [256, 256, 1024], stage=4, block='a')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='b')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='c')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='d')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='e')
x = identity_block(x, 3, [256, 256, 1024], stage=4, block='f')
x = conv_block(x, 3, [512, 512, 2048], stage=5, block='a')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='b')
x = identity_block(x, 3, [512, 512, 2048], stage=5, block='c')
x = AveragePooling2D(name='avg_pool')(x)
if include_top:
x = Flatten()(x)
x = Dense(classes, activation='softmax', name='fc1000')(x)
else:
if pooling == 'avg':
x = GlobalAveragePooling2D()(x)
x = Dense(classes, activation='softmax', name='resnet50')(x)
elif pooling == 'max':
x = GlobalMaxPooling2D()(x)
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
model = Model(inputs, x, name='resnet50')
# if weights == 'imagenet':
# if include_top:
# weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels.h5',
# WEIGHTS_PATH,
# cache_subdir='models',
# md5_hash='a7b3fe01876f51b976af0dea6bc144eb')
# else:
# weights_path = get_file('resnet50_weights_tf_dim_ordering_tf_kernels_notop.h5',
# WEIGHTS_PATH_NO_TOP,
# cache_subdir='models',
# md5_hash='a268eb855778b3df3c7506639542a6af')
# model.load_weights(weights_path)
# if K.backend() == 'theano':
# layer_utils.convert_all_kernels_in_model(model)
# if K.image_data_format() == 'channels_first':
# if include_top:
# maxpool = model.get_layer(name='avg_pool')
# shape = maxpool.output_shape[1:]
# dense = model.get_layer(name='fc1000')
# layer_utils.convert_dense_weights_data_format(dense, shape, 'channels_first')
# if K.backend() == 'tensorflow':
# warnings.warn('You are using the TensorFlow backend, yet you '
# 'are using the Theano '
# 'image data format convention '
# '(`image_data_format="channels_first"`). '
# 'For best performance, set '
# '`image_data_format="channels_last"` in '
# 'your Keras config '
# 'at ~/.keras/keras.json.')
return model | [
"tensorflow.keras.layers.AveragePooling2D",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.backend.image_data_format",
"tensorflow.keras.models.Model",
"tensorflow.keras.backend.is_keras_tensor",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.GlobalMaxPooling2D",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.layers.Input"
] | Resnet_models/res50.py | [(35, 'tensorflow.keras.layers.add', 'add', (['[x, input_tensor]'], {}), False, 'from tensorflow.keras.layers import add, Flatten\n'), (66, 'tensorflow.keras.layers.add', 'add', (['[x, shortcut]'], {}), False, 'from tensorflow.keras.layers import add, Flatten\n'), (147, 'tensorflow.keras.models.Model', 'Model', (['inputs', 'x'], {'name': '"""resnet50"""'}), False, 'from tensorflow.keras.models import Model\n'), (16, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (23, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters1', '(1, 1)'], {'name': "(conv_name_base + '2a')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (24, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2a')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (25, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (27, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters2', 'kernel_size'], {'padding': '"""same"""', 'name': "(conv_name_base + '2b')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (29, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2b')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (30, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (32, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters3', '(1, 1)'], {'name': "(conv_name_base + '2c')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (33, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2c')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (36, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (42, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (49, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters1', '(1, 1)'], {'strides': 'strides', 'name': "(conv_name_base + '2a')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (51, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2a')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (52, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (54, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters2', 'kernel_size'], {'padding': '"""same"""', 'name': "(conv_name_base + '2b')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (56, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2b')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (57, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (59, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters3', '(1, 1)'], {'name': "(conv_name_base + '2c')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (60, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '2c')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (62, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters3', '(1, 1)'], {'strides': 'strides', 'name': "(conv_name_base + '1')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (64, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': "(bn_name_base + '1')"}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (67, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (93, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (99, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (105, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(7, 7)'], {'strides': '(2, 2)', 'name': '"""conv1"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (106, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': 'bn_axis', 'name': '"""bn_conv1"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (107, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (130, 'tensorflow.keras.layers.AveragePooling2D', 'AveragePooling2D', ([], {'name': '"""avg_pool"""'}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (143, 'keras.engine.topology.get_source_inputs', 'get_source_inputs', (['input_tensor'], {}), False, 'from keras.engine.topology import get_source_inputs\n'), (89, 'tensorflow.keras.backend.image_data_format', 'K.image_data_format', ([], {}), True, 'import tensorflow.keras.backend as K\n'), (95, 'tensorflow.keras.backend.is_keras_tensor', 'K.is_keras_tensor', (['input_tensor'], {}), True, 'import tensorflow.keras.backend as K\n'), (96, 'tensorflow.keras.layers.Input', 'Input', ([], {'tensor': 'input_tensor', 'shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (133, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import add, Flatten\n'), (134, 'tensorflow.keras.layers.Dense', 'Dense', (['classes'], {'activation': '"""softmax"""', 'name': '"""fc1000"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (137, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n'), (138, 'tensorflow.keras.layers.Dense', 'Dense', (['classes'], {'activation': '"""softmax"""', 'name': '"""resnet50"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (140, 'tensorflow.keras.layers.GlobalMaxPooling2D', 'GlobalMaxPooling2D', ([], {}), False, 'from tensorflow.keras.layers import Input, Conv2D, BatchNormalization, Activation, ZeroPadding2D, AveragePooling2D, MaxPooling2D, GlobalAveragePooling2D, GlobalMaxPooling2D\n')] |
blueprintparadise/Embedding_Facenet | 9b4004243047a82f95739ad1cb508019d762e83b | from Face_Recog.basemodels import VGGFace
import os
from pathlib import Path
import gdown
import numpy as np
import tensorflow as tf
tf_version = int(tf.__version__.split(".")[0])
if tf_version == 1:
import keras
from keras.models import Model, Sequential
from keras.layers import Convolution2D, Flatten, Activation
elif tf_version == 2:
from tensorflow import keras
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Convolution2D, Flatten, Activation
#url = 'https://drive.google.com/uc?id=1YCox_4kJ-BYeXq27uUbasu--yz28zUMV'
def loadModel(url = 'https://github.com/serengil/deepface_models/releases/download/v1.0/age_model_weights.h5'):
model = VGGFace.baseModel()
#--------------------------
classes = 101
base_model_output = Sequential()
base_model_output = Convolution2D(classes, (1, 1), name='predictions')(model.layers[-4].output)
base_model_output = Flatten()(base_model_output)
base_model_output = Activation('softmax')(base_model_output)
#--------------------------
age_model = Model(inputs=model.input, outputs=base_model_output)
#--------------------------
#load weights
home = str(Path.home())
if os.path.isfile(home+'/.deepface/weights/age_model_weights.h5') != True:
print("age_model_weights.h5 will be downloaded...")
output = home+'/.deepface/weights/age_model_weights.h5'
gdown.download(url, output, quiet=False)
age_model.load_weights(home+'/.deepface/weights/age_model_weights.h5')
return age_model
#--------------------------
def findApparentAge(age_predictions):
output_indexes = np.array([i for i in range(0, 101)])
apparent_age = np.sum(age_predictions * output_indexes)
return apparent_age
| [
"tensorflow.__version__.split",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Convolution2D",
"tensorflow.keras.models.Sequential",
"numpy.sum",
"tensorflow.keras.layers.Flatten"
] | Face_Recog/extendedmodels/Age.py | [(23, 'Face_Recog.basemodels.VGGFace.baseModel', 'VGGFace.baseModel', ([], {}), False, 'from Face_Recog.basemodels import VGGFace\n'), (28, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Model, Sequential\n'), (35, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'model.input', 'outputs': 'base_model_output'}), False, 'from tensorflow.keras.models import Model, Sequential\n'), (57, 'numpy.sum', 'np.sum', (['(age_predictions * output_indexes)'], {}), True, 'import numpy as np\n'), (8, 'tensorflow.__version__.split', 'tf.__version__.split', (['"""."""'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.keras.layers.Convolution2D', 'Convolution2D', (['classes', '(1, 1)'], {'name': '"""predictions"""'}), False, 'from tensorflow.keras.layers import Convolution2D, Flatten, Activation\n'), (30, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Convolution2D, Flatten, Activation\n'), (31, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import Convolution2D, Flatten, Activation\n'), (41, 'pathlib.Path.home', 'Path.home', ([], {}), False, 'from pathlib import Path\n'), (43, 'os.path.isfile', 'os.path.isfile', (["(home + '/.deepface/weights/age_model_weights.h5')"], {}), False, 'import os\n'), (47, 'gdown.download', 'gdown.download', (['url', 'output'], {'quiet': '(False)'}), False, 'import gdown\n')] |
MLPA-DKU/Gait-Analysis | 2c288561be65e76bebd894df8293d856c4078e2c | import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.models import Model
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import concatenate
from tensorflow.keras.optimizers import Adam
import numpy as np
from Code.preprocessing import to_categorical_unit
class binary_blocks:
def __init__(self):
self.stack = None
self.state = False
def stack_block(self, binary):
if self.state is False:
self.stack = binary
self.state = True
else:
self.stack = tf.stack(binary)
class cropnet:
def __init__(self):
self.batch_size = 256
self.row_point = 128
self.in_col = dict()
self.channels = 1
self.input_shape = list()
self.nb_class = None
self.batch_dataset = None
self.generator = None
self.discriminator = None
def network_init(self, train, nb_class):
self.nb_class = nb_class
for i in range(3):
data = train[f"data_{i}"]
row, col = data.shape
self.in_col[f"{i}"] = col
self.input_shape.append((self.row_point, col, self.channels))
batch_per_epoch = row // self.batch_size
batch = dict()
for i in range(3):
batch[f"data_{i}"] = list()
batch["label"] = list()
for i in range(3):
target = train[f"data_{i}"]
for batch_time in range(batch_per_epoch):
temp_batch = np.zeros((batch_per_epoch, self.batch_size, self.in_col[f"{i}"]))
temp_label = np.zeros((batch_per_epoch, self.batch_size, nb_class))
pre = batch_time * self.batch_size
aft = (batch_time + 1) * self.batch_size
for n, batch_item in enumerate(target[pre:aft]):
onehot_label = to_categorical_unit(train["tag"][n], nb_class)
temp_batch[batch_time, n, :] = batch_item
temp_label[batch_time, n, :] = onehot_label
batch[f"data_{i}"].append(temp_batch)
batch["label"].append(temp_label)
self.batch_dataset = batch
# self.generator = self.build_generator()
def build_generator(self):
input1 = Input(self.input_shape[0])
input2 = Input(self.input_shape[1])
input3 = Input(self.input_shape[2])
cnn_block = gen_cnn_block(input1)
attention_block1 = gen_lstm_block(1, input2)
attention_block2 = gen_lstm_block(2, input3)
cnn_block = keras.backend.sum(attention_block1)(cnn_block)
summation = keras.backend.sum(attention_block2)(cnn_block)
bc = Dense(units=1, activation='tanh')(summation)
model = Model(inputs=[input1, input2, input3], output=bc)
return model
def train(self, dataset, epochs=20, batch_size=128):
train_x, train_y, test_x, test_y = dataset
bb = binary_blocks()
batch_data1 = np.zeros((batch_size, self.row_point, 16))
batch_data2 = np.zeros((batch_size, self.row_point, 6))
batch_data3 = np.zeros((batch_size, self.row_point, 6))
batch_label = np.zeros((batch_size, 2))
def gen_lstm_block(idx, input_layer):
hidden_cells = 64
lstm1 = keras.layers.LSTM(hidden_cells, return_sequences=True, recurrent_dropout=0.2, name=f'{idx}_gen_lstm_layer1')(input_layer)
lstm2 = keras.layers.LSTM(hidden_cells, return_sequences=True, recurrent_dropout=0.2, name=f'{idx}_gen_lstm_layer2')(lstm1)
lstm_flatten = Flatten(lstm2, name=f'{idx}_gen_lstm_flatten')
lstm_dense1 = Dense(units=256, name=f'{idx}_gen_lstm_dense1')(lstm_flatten)
lstm_dense2 = Dense(units=64, name=f'{idx}_gen_lstm_dense2')(lstm_dense1)
return lstm_dense2
def gen_cnn_block(input_layer):
nb_filter = 32
nb_strides = 1
input1_cnn1 = keras.layers.Conv1D(filters=nb_filter, kernel_size=20,
strides=nb_strides, activation='relu')(input_layer)
input1_batchnorm1 = keras.layers.BatchNormalization()(input1_cnn1)
input1_cnn2 = keras.layers.Conv1D(filters=nb_filter * 2, kernel_size=20,
strides=nb_strides, activation='relu')(input1_batchnorm1)
input1_batchnorm2 = keras.layers.BatchNormalization()(input1_cnn2)
input1_cnn3 = keras.layers.Conv1D(filters=nb_filter * 4, kernel_size=20,
strides=nb_strides, activation='relu')(input1_batchnorm2)
input1_batchnorm3 = keras.layers.BatchNormalization()(input1_cnn3)
input1_flatten = keras.layers.Flatten()(input1_batchnorm3)
dense_layer1 = keras.layers.Dense(units=256, activation='relu')(input1_flatten)
dense_layer2 = keras.layers.Dense(units=64, activation='relu')(dense_layer1)
return dense_layer2
def generative_block(shape_list, train):
input1 = Input(shape_list[0])
input2 = Input(shape_list[1])
input3 = Input(shape_list[2])
cnn_block = gen_cnn_block(input1)
attention_block1 = gen_lstm_block(1, input2)
attention_block2 = gen_lstm_block(2, input3)
cnn_block = keras.backend.sum(attention_block1)(cnn_block)
summation = keras.backend.sum(attention_block2)(cnn_block)
bc = Dense(units=1, activation='tanh')(summation)
model = Model(inputs=[input1, input2, input3], output=bc)
return model(train)
def validate_block():
NotImplemented
def cropping_network(dataset, nb_class):
bb = binary_blocks()
framework = cropnet()
train, test = dataset
framework.network_init(train, nb_class)
# train_x = train[:, :-2]
# train_y = train[:, -1]
# test_x = test[:, :-2]
# test_y = test[:, -1]
| [
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D",
"tensorflow.keras.backend.sum",
"tensorflow.stack",
"tensorflow.keras.layers.LSTM",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Flatten",
"numpy.zeros",
"tensorflow.keras.layers.Input"
] | Code/Model/cropNet_model.py | [(112, 'tensorflow.keras.layers.Flatten', 'Flatten', (['lstm2'], {'name': 'f"""{idx}_gen_lstm_flatten"""'}), False, 'from tensorflow.keras.layers import Flatten\n'), (141, 'tensorflow.keras.layers.Input', 'Input', (['shape_list[0]'], {}), False, 'from tensorflow.keras.layers import Input\n'), (142, 'tensorflow.keras.layers.Input', 'Input', (['shape_list[1]'], {}), False, 'from tensorflow.keras.layers import Input\n'), (143, 'tensorflow.keras.layers.Input', 'Input', (['shape_list[2]'], {}), False, 'from tensorflow.keras.layers import Input\n'), (152, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input1, input2, input3]', 'output': 'bc'}), False, 'from tensorflow.keras.models import Model\n'), (82, 'tensorflow.keras.layers.Input', 'Input', (['self.input_shape[0]'], {}), False, 'from tensorflow.keras.layers import Input\n'), (83, 'tensorflow.keras.layers.Input', 'Input', (['self.input_shape[1]'], {}), False, 'from tensorflow.keras.layers import Input\n'), (84, 'tensorflow.keras.layers.Input', 'Input', (['self.input_shape[2]'], {}), False, 'from tensorflow.keras.layers import Input\n'), (93, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': '[input1, input2, input3]', 'output': 'bc'}), False, 'from tensorflow.keras.models import Model\n'), (102, 'numpy.zeros', 'np.zeros', (['(batch_size, self.row_point, 16)'], {}), True, 'import numpy as np\n'), (103, 'numpy.zeros', 'np.zeros', (['(batch_size, self.row_point, 6)'], {}), True, 'import numpy as np\n'), (104, 'numpy.zeros', 'np.zeros', (['(batch_size, self.row_point, 6)'], {}), True, 'import numpy as np\n'), (105, 'numpy.zeros', 'np.zeros', (['(batch_size, 2)'], {}), True, 'import numpy as np\n'), (110, 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['hidden_cells'], {'return_sequences': '(True)', 'recurrent_dropout': '(0.2)', 'name': 'f"""{idx}_gen_lstm_layer1"""'}), True, 'import tensorflow.keras as keras\n'), (111, 'tensorflow.keras.layers.LSTM', 'keras.layers.LSTM', (['hidden_cells'], {'return_sequences': '(True)', 'recurrent_dropout': '(0.2)', 'name': 'f"""{idx}_gen_lstm_layer2"""'}), True, 'import tensorflow.keras as keras\n'), (113, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(256)', 'name': 'f"""{idx}_gen_lstm_dense1"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (114, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(64)', 'name': 'f"""{idx}_gen_lstm_dense2"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (122, 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': 'nb_filter', 'kernel_size': '(20)', 'strides': 'nb_strides', 'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (124, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras as keras\n'), (126, 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(nb_filter * 2)', 'kernel_size': '(20)', 'strides': 'nb_strides', 'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (128, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras as keras\n'), (130, 'tensorflow.keras.layers.Conv1D', 'keras.layers.Conv1D', ([], {'filters': '(nb_filter * 4)', 'kernel_size': '(20)', 'strides': 'nb_strides', 'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (132, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow.keras as keras\n'), (133, 'tensorflow.keras.layers.Flatten', 'keras.layers.Flatten', ([], {}), True, 'import tensorflow.keras as keras\n'), (135, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(256)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (136, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', ([], {'units': '(64)', 'activation': '"""relu"""'}), True, 'import tensorflow.keras as keras\n'), (148, 'tensorflow.keras.backend.sum', 'keras.backend.sum', (['attention_block1'], {}), True, 'import tensorflow.keras as keras\n'), (149, 'tensorflow.keras.backend.sum', 'keras.backend.sum', (['attention_block2'], {}), True, 'import tensorflow.keras as keras\n'), (150, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""tanh"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (30, 'tensorflow.stack', 'tf.stack', (['binary'], {}), True, 'import tensorflow as tf\n'), (89, 'tensorflow.keras.backend.sum', 'keras.backend.sum', (['attention_block1'], {}), True, 'import tensorflow.keras as keras\n'), (90, 'tensorflow.keras.backend.sum', 'keras.backend.sum', (['attention_block2'], {}), True, 'import tensorflow.keras as keras\n'), (91, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(1)', 'activation': '"""tanh"""'}), False, 'from tensorflow.keras.layers import Dense\n'), (65, 'numpy.zeros', 'np.zeros', (["(batch_per_epoch, self.batch_size, self.in_col[f'{i}'])"], {}), True, 'import numpy as np\n'), (66, 'numpy.zeros', 'np.zeros', (['(batch_per_epoch, self.batch_size, nb_class)'], {}), True, 'import numpy as np\n'), (71, 'Code.preprocessing.to_categorical_unit', 'to_categorical_unit', (["train['tag'][n]", 'nb_class'], {}), False, 'from Code.preprocessing import to_categorical_unit\n')] |
AshivDhondea/DFTS_compat_v1 | 976e6087ff629c45f7bbc79a3de25718ed143db5 | """
Downloading and saving Keras models as h5 files.
including the top, in order to do inference later on.
@Hans Dhondea
4 August 2020
"""
import tensorflow as tf
print('TensorFlow version')
print(tf.__version__)
print('VGG16')
vgg16_model = tf.keras.applications.VGG16(weights='imagenet',include_top=True)
vgg16_model.summary()
tf.keras.utils.plot_model(vgg16_model,to_file='main_save_keras_models_vgg16_model_architecture.png',show_shapes=True)
tf.keras.utils.plot_model(vgg16_model,to_file='main_save_keras_models_vgg16_model_architecture.pdf',show_shapes=True)
vgg16_model.save("vgg16_model.h5")
print('VGG19')
vgg19_model = tf.keras.applications.VGG19(weights='imagenet',include_top=True)
vgg19_model.summary()
tf.keras.utils.plot_model(vgg19_model,to_file='main_save_keras_models_vgg19_model_architecture.png',show_shapes=True)
tf.keras.utils.plot_model(vgg19_model,to_file='main_save_keras_models_vgg19_model_architecture.pdf',show_shapes=True)
vgg19_model.save("vgg19_model.h5")
print('Xception')
xception_model = tf.keras.applications.Xception(weights='imagenet',include_top=True)
xception_model.summary()
tf.keras.utils.plot_model(xception_model,to_file='main_save_keras_models_xception_model_architecture.png',show_shapes=True)
tf.keras.utils.plot_model(xception_model,to_file='main_save_keras_models_xception_model_architecture.pdf',show_shapes=True)
xception_model.save("xception_model.h5")
print('ResNet50')
resnet50_model = tf.keras.applications.ResNet50(weights='imagenet',include_top=True)
resnet50_model.summary()
tf.keras.utils.plot_model(resnet50_model,to_file='main_save_keras_models_resnet50_architecture.png',show_shapes=True)
tf.keras.utils.plot_model(resnet50_model,to_file='main_save_keras_models_resnet50_architecture.pdf',show_shapes=True)
resnet50_model.save("resnet50_model.h5")
print('ResNet50v2')
resnet50_v2_model = tf.keras.applications.ResNet50V2(weights='imagenet',include_top=True)
resnet50_v2_model.summary()
tf.keras.utils.plot_model(resnet50_v2_model,to_file='main_save_keras_models_resnet50_v2_model_architecture.png',show_shapes=True)
tf.keras.utils.plot_model(resnet50_v2_model,to_file='main_save_keras_models_resnet50_v2_model_architecture.pdf',show_shapes=True)
resnet50_v2_model.save("resnet50_v2_model.h5")
print('InceptionResNetV2')
inceptionresnet_v2_model = tf.keras.applications.InceptionResNetV2(weights='imagenet',include_top=True)
inceptionresnet_v2_model.summary()
tf.keras.utils.plot_model(inceptionresnet_v2_model,to_file='main_save_keras_models_inceptionresnet_v2_model_architecture.png',show_shapes=True)
tf.keras.utils.plot_model(inceptionresnet_v2_model,to_file='main_save_keras_models_inceptionresnet_v2_model_architecture.pdf',show_shapes=True)
inceptionresnet_v2_model.save("inceptionresnet_v2_model.h5")
print('Models saved. Architectures saved.')
| [
"tensorflow.keras.applications.ResNet50V2",
"tensorflow.keras.utils.plot_model",
"tensorflow.keras.applications.VGG19",
"tensorflow.keras.applications.Xception",
"tensorflow.keras.applications.ResNet50",
"tensorflow.keras.applications.VGG16",
"tensorflow.keras.applications.InceptionResNetV2"
] | keras_models/main_save_keras_models.py | [(16, 'tensorflow.keras.applications.VGG16', 'tf.keras.applications.VGG16', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), True, 'import tensorflow as tf\n'), (19, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['vgg16_model'], {'to_file': '"""main_save_keras_models_vgg16_model_architecture.png"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['vgg16_model'], {'to_file': '"""main_save_keras_models_vgg16_model_architecture.pdf"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.keras.applications.VGG19', 'tf.keras.applications.VGG19', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['vgg19_model'], {'to_file': '"""main_save_keras_models_vgg19_model_architecture.png"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['vgg19_model'], {'to_file': '"""main_save_keras_models_vgg19_model_architecture.pdf"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.keras.applications.Xception', 'tf.keras.applications.Xception', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['xception_model'], {'to_file': '"""main_save_keras_models_xception_model_architecture.png"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (36, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['xception_model'], {'to_file': '"""main_save_keras_models_xception_model_architecture.pdf"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.keras.applications.ResNet50', 'tf.keras.applications.ResNet50', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['resnet50_model'], {'to_file': '"""main_save_keras_models_resnet50_architecture.png"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['resnet50_model'], {'to_file': '"""main_save_keras_models_resnet50_architecture.pdf"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.keras.applications.ResNet50V2', 'tf.keras.applications.ResNet50V2', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['resnet50_v2_model'], {'to_file': '"""main_save_keras_models_resnet50_v2_model_architecture.png"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['resnet50_v2_model'], {'to_file': '"""main_save_keras_models_resnet50_v2_model_architecture.pdf"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (56, 'tensorflow.keras.applications.InceptionResNetV2', 'tf.keras.applications.InceptionResNetV2', ([], {'weights': '"""imagenet"""', 'include_top': '(True)'}), True, 'import tensorflow as tf\n'), (59, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['inceptionresnet_v2_model'], {'to_file': '"""main_save_keras_models_inceptionresnet_v2_model_architecture.png"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (60, 'tensorflow.keras.utils.plot_model', 'tf.keras.utils.plot_model', (['inceptionresnet_v2_model'], {'to_file': '"""main_save_keras_models_inceptionresnet_v2_model_architecture.pdf"""', 'show_shapes': '(True)'}), True, 'import tensorflow as tf\n')] |
aarkwright/arkml | a2f3d9bea5298233187d9c82457ed9e83cd37ceb | import glob
import matplotlib.pyplot as plt
from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from keras.optimizers import SGD
from tensorflow.keras.models import Model
from tensorflow.keras.layers import Dense, GlobalAveragePooling2D
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
# Count of files in this path and it's subfolders
def get_num_files(path):
if not os.path.exists(path):
return 0
return sum([len(files) for r, d, files in os.walk(path)])
# Count of subfolders directly below the path (aka our categories)
def get_num_subfolders(path):
if not os.path.exists(path):
return 0
return sum([len(d) for r, d, files in os.walk(path)])
# Image generater function
def create_img_generator():
return ImageDataGenerator(
preprocessing_function=preprocess_input,
rotation_range=30,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True
)
# Main code
Image_width, Image_height = 299, 299
Training_Epochs = 2
Batch_Size = 32
Number_FC_Neurons = 1024
train_dir = './data/train'
validate_dir = './data/validate'
num_train_samples = get_num_files(train_dir)
num_classes = get_num_subfolders(train_dir)
num_validate_samples = get_num_files(validate_dir)
num_epoch = Training_Epochs
batch_size = Batch_Size
# define data pre-processing
train_image_gen = create_img_generator()
test_image_gen = create_img_generator()
# Connect the image generator to a folder which contains the source image that the image generator alters
# Training image generator:
train_generator = train_image_gen.flow_from_directory(
train_dir,
target_size=(Image_width, Image_height),
batch_size=batch_size,
seed=420
)
# Training image generator:
validation_generator = train_image_gen.flow_from_directory(
validate_dir,
target_size=(Image_width, Image_height),
batch_size=batch_size,
seed=420
)
# Fully connected layer
InceptionV3_base_model = InceptionV3(
weights='imagenet',
include_top=False, # excludes the final FC layer
)
print('[+] Inception v3 base model without last FC loaded.')
# Define the layers
L0 = InceptionV3_base_model.output
L1 = GlobalAveragePooling2D()(L0)
L2 = Dense(Number_FC_Neurons, activation='relu')(L1)
predictions = Dense(num_classes, activation='softmax')(L2)
# New model
model = Model(inputs=InceptionV3_base_model.input, outputs=predictions)
print(model.summary())
print('[+] Performing basic transfer Learning')
# Freeze all layers in the Inception V3 base model
for layer in InceptionV3_base_model.layers:
layer.trainable = False
# Define model copile for basaic Transfer Learning
model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])
# By using generators we can ask continute to request sample images and the generators will pull images from
# the training or validation folders and alter them slightly
history_transfer_learning = model.fit_generator(
train_generator,
epochs=num_epoch,
steps_per_epoch=num_train_samples // batch_size,
validation_data=validation_generator,
validation_steps=num_validate_samples // batch_size,
class_weight='auto'
)
# Save the model
model.save('inceptionv3-transfer-learning.model')
# Option 2 specific to Inception
print('\n[+] Fine tuning existing model')
Layers_To_Freeze = 172
for layer in model.layers[:Layers_To_Freeze]:
layer.trainable = False
for layer in model.layers[Layers_To_Freeze:]:
layer.trainable = True
model.compile(
optimizer=SGD(lr=0.0001, momentum=0.9),
loss='categorical_crossentropy',
metrics=['accuracy']
)
history_transfer_learning = model.fit_generator(
train_generator,
epochs=num_epoch,
steps_per_epoch=num_train_samples // batch_size,
validation_data=validation_generator,
validation_steps=num_validate_samples // batch_size,
class_weight='auto'
)
model.save('inceptionv3-fine-tune.model') | [
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.applications.inception_v3.InceptionV3"
] | transfer_learn.py | [(79, 'tensorflow.keras.applications.inception_v3.InceptionV3', 'InceptionV3', ([], {'weights': '"""imagenet"""', 'include_top': '(False)'}), False, 'from tensorflow.keras.applications.inception_v3 import InceptionV3, preprocess_input\n'), (93, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'InceptionV3_base_model.input', 'outputs': 'predictions'}), False, 'from tensorflow.keras.models import Model\n'), (27, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {'preprocessing_function': 'preprocess_input', 'rotation_range': '(30)', 'width_shift_range': '(0.2)', 'height_shift_range': '(0.2)', 'shear_range': '(0.2)', 'zoom_range': '(0.2)', 'horizontal_flip': '(True)'}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (87, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D\n'), (88, 'tensorflow.keras.layers.Dense', 'Dense', (['Number_FC_Neurons'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D\n'), (89, 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {'activation': '"""softmax"""'}), False, 'from tensorflow.keras.layers import Dense, GlobalAveragePooling2D\n'), (15, 'os.path.exists', 'os.path.exists', (['path'], {}), False, 'import os\n'), (21, 'os.path.exists', 'os.path.exists', (['path'], {}), False, 'import os\n'), (131, 'keras.optimizers.SGD', 'SGD', ([], {'lr': '(0.0001)', 'momentum': '(0.9)'}), False, 'from keras.optimizers import SGD\n'), (17, 'os.walk', 'os.walk', (['path'], {}), False, 'import os\n'), (23, 'os.walk', 'os.walk', (['path'], {}), False, 'import os\n')] |
kimbfs/Proyecto | 3065b2d721365d3e92d93bc449c1cea92bbf4ed8 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""YOLO_v3 VGG16 Model Defined in Keras."""
from tensorflow.keras.layers import Conv2D, UpSampling2D, Concatenate, MaxPooling2D
from tensorflow.keras.models import Model
from tensorflow.keras.applications.vgg16 import VGG16
from common.backbones.layers import YoloConv2D
from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers
def yolo3_vgg16_body(inputs, num_anchors, num_classes):
"""Create YOLO_V3 model CNN body in Keras."""
'''
Layer Name input_1 Output: Tensor("input_1:0", shape=(?, 416, 416, 3), dtype=float32)
Layer Name block1_conv1 Output: Tensor("block1_conv1/Relu:0", shape=(?, 416, 416, 64), dtype=float32)
Layer Name block1_conv2 Output: Tensor("block1_conv2/Relu:0", shape=(?, 416, 416, 64), dtype=float32)
Layer Name block1_pool Output: Tensor("block1_pool/MaxPool:0", shape=(?, 208, 208, 64), dtype=float32)
Layer Name block2_conv1 Output: Tensor("block2_conv1/Relu:0", shape=(?, 208, 208, 128), dtype=float32)
Layer Name block2_conv2 Output: Tensor("block2_conv2/Relu:0", shape=(?, 208, 208, 128), dtype=float32)
Layer Name block2_pool Output: Tensor("block2_pool/MaxPool:0", shape=(?, 104, 104, 128), dtype=float32)
Layer Name block3_conv1 Output: Tensor("block3_conv1/Relu:0", shape=(?, 104, 104, 256), dtype=float32)
Layer Name block3_conv2 Output: Tensor("block3_conv2/Relu:0", shape=(?, 104, 104, 256), dtype=float32)
Layer Name block3_conv3 Output: Tensor("block3_conv3/Relu:0", shape=(?, 104, 104, 256), dtype=float32)
Layer Name block3_pool Output: Tensor("block3_pool/MaxPool:0", shape=(?, 52, 52, 256), dtype=float32)
Layer Name block4_conv1 Output: Tensor("block4_conv1/Relu:0", shape=(?, 52, 52, 512), dtype=float32)
Layer Name block4_conv2 Output: Tensor("block4_conv2/Relu:0", shape=(?, 52, 52, 512), dtype=float32)
Layer Name block4_conv3 Output: Tensor("block4_conv3/Relu:0", shape=(?, 52, 52, 512), dtype=float32)
Layer Name block4_pool Output: Tensor("block4_pool/MaxPool:0", shape=(?, 26, 26, 512), dtype=float32)
Layer Name block5_conv1 Output: Tensor("block5_conv1/Relu:0", shape=(?, 26, 26, 512), dtype=float32)
Layer Name block5_conv2 Output: Tensor("block5_conv2/Relu:0", shape=(?, 26, 26, 512), dtype=float32)
Layer Name block5_conv3 Output: Tensor("block5_conv3/Relu:0", shape=(?, 26, 26, 512), dtype=float32)
Layer Name block5_pool Output: Tensor("block5_pool/MaxPool:0", shape=(?, 13, 13, 512), dtype=float32)
'''
#net, endpoint = inception_v2.inception_v2(inputs)
vgg16 = VGG16(input_tensor=inputs,weights='imagenet',include_top=False)
x = vgg16.get_layer('block5_pool').output
x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv1')(x)
x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv2')(x)
x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv3')(x)
x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv4')(x)
# input: 416 x 416 x 3
# block6_conv3 :13 x 13 x 512
# block5_conv3 :26 x 26 x 512
# block4_conv3 : 52 x 52 x 512
# f1 :13 x 13 x 1024 13 x 13 x 512
x, y1 = make_last_layers(x, 512, num_anchors * (num_classes + 5), predict_id='1')
x = compose(
DarknetConv2D_BN_Leaky(256, (1,1)),
UpSampling2D(2))(x)
f2 = vgg16.get_layer('block5_conv3').output
# f2: 26 x 26 x 512
x = Concatenate()([x,f2])
x, y2 = make_last_layers(x, 256, num_anchors*(num_classes+5), predict_id='2')
x = compose(
DarknetConv2D_BN_Leaky(128, (1,1)),
UpSampling2D(2))(x)
f3 = vgg16.get_layer('block4_conv3').output
# f3 : 52 x 52 x 256
x = Concatenate()([x, f3])
x, y3 = make_last_layers(x, 128, num_anchors*(num_classes+5), predict_id='3')
return Model(inputs = inputs, outputs=[y1,y2,y3])
def tiny_yolo3_vgg16_body(inputs, num_anchors, num_classes):
'''Create Tiny YOLO_v3 VGG16 model CNN body in keras.'''
vgg16 = VGG16(input_tensor=inputs,weights='imagenet',include_top=False)
x = vgg16.get_layer('block5_pool').output
x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv1')(x)
x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv2')(x)
x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv3')(x)
#x = YoloConv2D(512, (3, 3), activation='relu', padding='same', name='block6_conv4')(x)
# input: 416 x 416 x 3
# block6_conv3 :13 x 13 x 512
# block5_conv3 :26 x 26 x 512
# block4_conv3 : 52 x 52 x 512
x1 = vgg16.get_layer('block5_conv3').output
x2 = x
x2 = DarknetConv2D_BN_Leaky(512, (1,1))(x2)
y1 = compose(
DarknetConv2D_BN_Leaky(1024, (3,3)),
#Depthwise_Separable_Conv2D_BN_Leaky(filters=1024, kernel_size=(3, 3), block_id_str='14'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))(x2)
x2 = compose(
DarknetConv2D_BN_Leaky(256, (1,1)),
UpSampling2D(2))(x2)
y2 = compose(
Concatenate(),
DarknetConv2D_BN_Leaky(512, (3,3)),
#Depthwise_Separable_Conv2D_BN_Leaky(filters=512, kernel_size=(3, 3), block_id_str='15'),
DarknetConv2D(num_anchors*(num_classes+5), (1,1)))([x2,x1])
return Model(inputs, [y1,y2])
| [
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.applications.vgg16.VGG16",
"tensorflow.keras.models.Model"
] | yolo3/models/yolo3_vgg16.py | [(38, 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'input_tensor': 'inputs', 'weights': '"""imagenet"""', 'include_top': '(False)'}), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), (52, 'yolo3.models.layers.make_last_layers', 'make_last_layers', (['x', '(512)', '(num_anchors * (num_classes + 5))'], {'predict_id': '"""1"""'}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (62, 'yolo3.models.layers.make_last_layers', 'make_last_layers', (['x', '(256)', '(num_anchors * (num_classes + 5))'], {'predict_id': '"""2"""'}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (71, 'yolo3.models.layers.make_last_layers', 'make_last_layers', (['x', '(128)', '(num_anchors * (num_classes + 5))'], {'predict_id': '"""3"""'}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (73, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'inputs', 'outputs': '[y1, y2, y3]'}), False, 'from tensorflow.keras.models import Model\n'), (77, 'tensorflow.keras.applications.vgg16.VGG16', 'VGG16', ([], {'input_tensor': 'inputs', 'weights': '"""imagenet"""', 'include_top': '(False)'}), False, 'from tensorflow.keras.applications.vgg16 import VGG16\n'), (108, 'tensorflow.keras.models.Model', 'Model', (['inputs', '[y1, y2]'], {}), False, 'from tensorflow.keras.models import Model\n'), (40, 'common.backbones.layers.YoloConv2D', 'YoloConv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block6_conv1"""'}), False, 'from common.backbones.layers import YoloConv2D\n'), (41, 'common.backbones.layers.YoloConv2D', 'YoloConv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block6_conv2"""'}), False, 'from common.backbones.layers import YoloConv2D\n'), (42, 'common.backbones.layers.YoloConv2D', 'YoloConv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block6_conv3"""'}), False, 'from common.backbones.layers import YoloConv2D\n'), (43, 'common.backbones.layers.YoloConv2D', 'YoloConv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block6_conv4"""'}), False, 'from common.backbones.layers import YoloConv2D\n'), (60, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, UpSampling2D, Concatenate, MaxPooling2D\n'), (70, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, UpSampling2D, Concatenate, MaxPooling2D\n'), (79, 'common.backbones.layers.YoloConv2D', 'YoloConv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block6_conv1"""'}), False, 'from common.backbones.layers import YoloConv2D\n'), (80, 'common.backbones.layers.YoloConv2D', 'YoloConv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block6_conv2"""'}), False, 'from common.backbones.layers import YoloConv2D\n'), (81, 'common.backbones.layers.YoloConv2D', 'YoloConv2D', (['(512)', '(3, 3)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""block6_conv3"""'}), False, 'from common.backbones.layers import YoloConv2D\n'), (92, 'yolo3.models.layers.DarknetConv2D_BN_Leaky', 'DarknetConv2D_BN_Leaky', (['(512)', '(1, 1)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (55, 'yolo3.models.layers.DarknetConv2D_BN_Leaky', 'DarknetConv2D_BN_Leaky', (['(256)', '(1, 1)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (56, 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Conv2D, UpSampling2D, Concatenate, MaxPooling2D\n'), (65, 'yolo3.models.layers.DarknetConv2D_BN_Leaky', 'DarknetConv2D_BN_Leaky', (['(128)', '(1, 1)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (66, 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Conv2D, UpSampling2D, Concatenate, MaxPooling2D\n'), (95, 'yolo3.models.layers.DarknetConv2D_BN_Leaky', 'DarknetConv2D_BN_Leaky', (['(1024)', '(3, 3)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (97, 'yolo3.models.layers.DarknetConv2D', 'DarknetConv2D', (['(num_anchors * (num_classes + 5))', '(1, 1)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (100, 'yolo3.models.layers.DarknetConv2D_BN_Leaky', 'DarknetConv2D_BN_Leaky', (['(256)', '(1, 1)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (101, 'tensorflow.keras.layers.UpSampling2D', 'UpSampling2D', (['(2)'], {}), False, 'from tensorflow.keras.layers import Conv2D, UpSampling2D, Concatenate, MaxPooling2D\n'), (103, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, UpSampling2D, Concatenate, MaxPooling2D\n'), (104, 'yolo3.models.layers.DarknetConv2D_BN_Leaky', 'DarknetConv2D_BN_Leaky', (['(512)', '(3, 3)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n'), (106, 'yolo3.models.layers.DarknetConv2D', 'DarknetConv2D', (['(num_anchors * (num_classes + 5))', '(1, 1)'], {}), False, 'from yolo3.models.layers import compose, DarknetConv2D, DarknetConv2D_BN_Leaky, make_last_layers\n')] |
Neronjust2017/keras-project | 919e67e10b0bf518eb9cc63df68c79fe2bb71b36 | # -*- coding: utf-8 -*-
import os
import numpy as np
import tensorflow as tf
import tensorflow.keras as keras
from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, \
BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape
from tensorflow.keras.models import Model, load_model
from keras.initializers import glorot_uniform
import keras.backend as K
from keras.callbacks import TensorBoard
from utils.AFClassication.data import loaddata
from utils.uts_classification.tools import AdvancedLearnignRateScheduler
from utils.uts_classification.metric import f1,recall,precision
def res_conv(X, filters, base, s):
name_base = base + '/branch'
F1, F2, F3 = filters
##### Branch1 is the main path and Branch2 is the shortcut path #####
X_shortcut = X
##### Branch1 #####
# First component of Branch1
X = BatchNormalization(name=name_base + '1/bn_1')(X)
X = Activation('relu', name=name_base + '1/relu_1')(X)
X = Conv1D(filters=F1, kernel_size=16, strides=1, padding='same', name=name_base + '1/conv_1',
kernel_initializer=glorot_uniform(seed=0))(X)
# Second component of Branch1
X = BatchNormalization(name=name_base + '1/bn_2')(X)
X = Activation('relu', name=name_base + '1/relu_2')(X)
X = Conv1D(filters=F2, kernel_size=48, strides=s, padding='same', name=name_base + '1/conv_2',
kernel_initializer=glorot_uniform(seed=0))(X)
# Third component of Branch1
X = BatchNormalization(name=name_base + '1/bn_3')(X)
X = Activation('relu', name=name_base + '1/relu_3')(X)
X = Conv1D(filters=F3, kernel_size=16, strides=1, padding='same', name=name_base + '1/conv_3',
kernel_initializer=glorot_uniform(seed=0))(X)
##### Branch2 ####
X_shortcut = BatchNormalization(name=name_base + '2/bn_1')(X_shortcut)
X_shortcut = Activation('relu', name=name_base + '2/relu_1')(X_shortcut)
X_shortcut = Conv1D(filters=F3, kernel_size=16, strides=s, padding='same', name=name_base + '2/conv_1',
kernel_initializer=glorot_uniform(seed=0))(X_shortcut)
# Final step: Add Branch1 and Branch2
X = Add(name=base + '/Add')([X, X_shortcut])
return X
def res_identity(X, filters, base):
name_base = base + '/branch'
F1, F2, F3 = filters
##### Branch1 is the main path and Branch2 is the shortcut path #####
X_shortcut = X
##### Branch1 #####
# First component of Branch1
X = BatchNormalization(name=name_base + '1/bn_1')(X)
Shortcut = Activation('relu', name=name_base + '1/relu_1')(X)
X = Conv1D(filters=F1, kernel_size=16, strides=1, padding='same', name=name_base + '1/conv_1',
kernel_initializer=glorot_uniform(seed=0))(Shortcut)
# Second component of Branch1
X = BatchNormalization(name=name_base + '1/bn_2')(X)
X = Activation('relu', name=name_base + '1/relu_2')(X)
X = Conv1D(filters=F2, kernel_size=48, strides=1, padding='same', name=name_base + '1/conv_2',
kernel_initializer=glorot_uniform(seed=0))(X)
# Third component of Branch1
X = BatchNormalization(name=name_base + '1/bn_3')(X)
X = Activation('relu', name=name_base + '1/relu_3')(X)
X = Conv1D(filters=F3, kernel_size=16, strides=1, padding='same', name=name_base + '1/conv_3',
kernel_initializer=glorot_uniform(seed=0))(X)
# Final step: Add Branch1 and the original Input itself
X = Add(name=base + '/Add')([X, X_shortcut])
return X
def Trunk_block(X, F, base):
name_base = base
X = res_identity(X, F, name_base + '/Residual_id_1')
X = res_identity(X, F, name_base + '/Residual_id_2')
return X
def interpolation(input_tensor, ref_tensor, name): # resizes input_tensor wrt. ref_tensor
# resize_nearest_neighbor
# L = ref_tensor.get_shape()[1]
# print(input_tensor.shape)
# x = Reshape((input_tensor.shape[1],1,input_tensor.shape[2]))(input_tensor)
# print(x.shape)
# x = tf.compat.v1.image.resize_nearest_neighbor(x, [L, 1], name=name)
# out = Reshape((x.shape[1],x.shape[3]))(x)
# print(out.shape)
# print(input_tensor.shape)
out = UpSampling1D(size=ref_tensor.shape[1]//input_tensor.shape[1])(input_tensor)
# print(out.shape)
return out
def Attention_1(X, filters, base):
F1, F2, F3 = filters
name_base = base
X = res_identity(X, filters, name_base + '/Pre_Residual_id')
X_Trunk = Trunk_block(X, filters, name_base + '/Trunk')
print("X_Trunk")
print(X_Trunk.shape)
X = MaxPooling1D(3, strides=2, padding='same', name=name_base + '/Mask/pool_3')(X)
print(X.shape)
X = res_identity(X, filters, name_base + '/Mask/Residual_id_3_Down')
Residual_id_3_Down_shortcut = X
Residual_id_3_Down_branched = res_identity(X, filters, name_base + '/Mask/Residual_id_3_Down_branched')
X = MaxPooling1D(3,strides=2, padding='same', name=name_base + '/Mask/pool_2')(X)
print(X.shape)
X = res_identity(X, filters, name_base + '/Mask/Residual_id_2_Down')
Residual_id_2_Down_shortcut = X
Residual_id_2_Down_branched = res_identity(X, filters, name_base + '/Mask/Residual_id_2_Down_branched')
X = MaxPooling1D(3, strides=2, padding='same', name=name_base + '/Mask/pool_1')(X)
print(X.shape)
X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Down')
X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Up')
temp_name1 = name_base + "/Mask/Interpool_1"
# X = Lambda(interpolation, arguments={'ref_tensor': Residual_id_2_Down_shortcut, 'name': temp_name1})(X)
X = UpSampling1D(size=Residual_id_2_Down_shortcut.shape[1] // X.shape[1], name=temp_name1)(X)
print(X.shape)
X = Add(name=base + '/Mask/Add_after_Interpool_1')([X, Residual_id_2_Down_branched])
X = res_identity(X, filters, name_base + '/Mask/Residual_id_2_Up')
temp_name2 = name_base + "/Mask/Interpool_2"
# X = Lambda(interpolation, arguments={'ref_tensor': Residual_id_3_Down_shortcut, 'name': temp_name2})(X)
X = UpSampling1D(size=Residual_id_3_Down_shortcut.shape[1] // X.shape[1], name=temp_name2)(X)
print(X.shape)
X = Add(name=base + '/Mask/Add_after_Interpool_2')([X, Residual_id_3_Down_branched])
X = res_identity(X, filters, name_base + '/Mask/Residual_id_3_Up')
temp_name3 = name_base + "/Mask/Interpool_3"
# X = Lambda(interpolation, arguments={'ref_tensor': X_Trunk, 'name': temp_name3})(X)
X = UpSampling1D(size=X_Trunk.shape[1] // X.shape[1], name=temp_name3)(X)
print(X.shape)
X = BatchNormalization(name=name_base + '/Mask/Interpool_3/bn_1')(X)
X = Activation('relu', name=name_base + '/Mask/Interpool_3/relu_1')(X)
X = Conv1D(F3, kernel_size=1, strides=1, padding='same', name=name_base + '/Mask/Interpool_3/conv_1',
kernel_initializer=glorot_uniform(seed=0))(X)
print(X.shape)
X = BatchNormalization(name=name_base + '/Mask/Interpool_3/bn_2')(X)
X = Activation('relu', name=name_base + '/Mask/Interpool_3/relu_2')(X)
X = Conv1D(F3, kernel_size=1, strides=1, padding='same', name=name_base + '/Mask/Interpool_3/conv_2',
kernel_initializer=glorot_uniform(seed=0))(X)
print(X.shape)
X = Activation('sigmoid', name=name_base + '/Mask/sigmoid')(X)
X = Multiply(name=name_base + '/Mutiply')([X_Trunk, X])
X = Add(name=name_base + '/Add')([X_Trunk, X])
X = res_identity(X, filters, name_base + '/Post_Residual_id')
return X
def Attention_2(X, filters, base):
F1, F2, F3 = filters
name_base = base
X = res_identity(X, filters, name_base + '/Pre_Residual_id')
X_Trunk = Trunk_block(X, filters, name_base + '/Trunk')
X = MaxPooling1D(3, strides=2, padding='same', name=name_base + '/Mask/pool_2')(X)
X = res_identity(X, filters, name_base + '/Mask/Residual_id_2_Down')
Residual_id_2_Down_shortcut = X
Residual_id_2_Down_branched = res_identity(X, filters, name_base + '/Mask/Residual_id_2_Down_branched')
X = MaxPooling1D(3, strides=2, padding='same', name=name_base + '/Mask/pool_1')(X)
X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Down')
X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Up')
temp_name1 = name_base + "/Mask/Interpool_1"
# X = Lambda(interpolation, arguments={'ref_tensor': Residual_id_2_Down_shortcut, 'name': temp_name1})(X)
X = UpSampling1D(size=Residual_id_2_Down_shortcut.shape[1] // X.shape[1], name=temp_name1)(X)
X = Add(name=base + '/Mask/Add_after_Interpool_1')([X, Residual_id_2_Down_branched])
X = res_identity(X, filters, name_base + '/Mask/Residual_id_2_Up')
temp_name2 = name_base + "/Mask/Interpool_2"
# X = Lambda(interpolation, arguments={'ref_tensor': X_Trunk, 'name': temp_name2})(X)
X = UpSampling1D(size=X_Trunk.shape[1] // X.shape[1], name=temp_name2)(X)
X = BatchNormalization(name=name_base + '/Mask/Interpool_2/bn_1')(X)
X = Activation('relu', name=name_base + '/Mask/Interpool_2/relu_1')(X)
X = Conv1D(F3, kernel_size=1, strides=1, padding='same', name=name_base + '/Mask/Interpool_2/conv_1',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(name=name_base + '/Mask/Interpool_2/bn_2')(X)
X = Activation('relu', name=name_base + '/Mask/Interpool_2/relu_2')(X)
X = Conv1D(F3, kernel_size=1, strides=1, padding='same', name=name_base + '/Mask/Interpool_2/conv_2',
kernel_initializer=glorot_uniform(seed=0))(X)
X = Activation('sigmoid', name=name_base + '/Mask/sigmoid')(X)
X = Multiply(name=name_base + '/Mutiply')([X_Trunk, X])
X = Add(name=name_base + '/Add')([X_Trunk, X])
X = res_identity(X, filters, name_base + '/Post_Residual_id')
return X
def Attention_3(X, filters, base):
F1, F2, F3 = filters
name_base = base
X = res_identity(X, filters, name_base + '/Pre_Residual_id')
X_Trunk = Trunk_block(X, filters, name_base + '/Trunk')
X = MaxPooling1D(3, strides=2, padding='same', name=name_base + '/Mask/pool_1')(X)
X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Down')
X = res_identity(X, filters, name_base + '/Mask/Residual_id_1_Up')
temp_name2 = name_base + "/Mask/Interpool_1"
# X = Lambda(interpolation, arguments={'ref_tensor': X_Trunk, 'name': temp_name2})(X)
X = UpSampling1D(size=X_Trunk.shape[1] // X.shape[1], name=temp_name2)(X)
X = BatchNormalization(name=name_base + '/Mask/Interpool_2/bn_1')(X)
X = Activation('relu', name=name_base + '/Mask/Interpool_2/relu_1')(X)
X = Conv1D(F3, kernel_size=1, strides=1, padding='same', name=name_base + '/Mask/Interpool_2/conv_1',
kernel_initializer=glorot_uniform(seed=0))(X)
X = BatchNormalization(name=name_base + '/Mask/Interpool_2/bn_2')(X)
X = Activation('relu', name=name_base + '/Mask/Interpool_2/relu_2')(X)
X = Conv1D(F3, kernel_size=1, strides=1, padding='same', name=name_base + '/Mask/Interpool_2/conv_2',
kernel_initializer=glorot_uniform(seed=0))(X)
X = Activation('sigmoid', name=name_base + '/Mask/sigmoid')(X)
X = Multiply(name=name_base + '/Mutiply')([X_Trunk, X])
X = Add(name=name_base + '/Add')([X_Trunk, X])
X = res_identity(X, filters, name_base + '/Post_Residual_id')
return X
# 加载UCI_HAR_Dataset
(X_train, y_train), (Xval, yval), (final_testset, final_testtarget) , (R_train, Rval, Rtest), (
P_train, Pval, Ptest), (Q_train, Qval, Qtest), (T_train, Tval, Ttest)= loaddata()
shape = X_train[0].shape
if len(shape) == 2:
X_train[0] = np.expand_dims(X_train[0], axis=2)
Xval[0] = np.expand_dims(Xval[0], axis=2)
final_testset[0] = np.expand_dims(final_testset[0], axis=2)
NUM_CLASSES = 3
x_train = np.concatenate((X_train[0],Xval[0]),axis=0)
y_train = np.concatenate((y_train,yval),axis=0)
train_number = int(len(x_train) / 16) * 16
print(x_train.shape)
print(y_train.shape)
print(final_testset[0].shape)
print(final_testtarget.shape)
print(y_train[:3])
input_shape = x_train.shape[1:]
nb_classes = 3
os.environ['CUDA_VISIBLE_DEVICES'] = '0,1'
X_input = Input(input_shape)
X = Conv1D(8, 7, strides=2, padding='same', name='conv_1', kernel_initializer=glorot_uniform(seed=0))(
X_input)
X = BatchNormalization(axis=-1, name='bn_1')(X)
X = Activation('relu', name='relu_1')(X)
X = MaxPooling1D(3, strides=2, padding='same', name='pool_1')(X)
X = res_conv(X, [8, 8, 32], 'Residual_conv_1', 1)
### Attention 1 Start
X = Attention_1(X, [8, 8, 32], 'Attention_1')
### Attention 1 End
X = res_conv(X, [16, 16, 64], 'Residual_conv_2', 2)
### Attention 2 Start
X = Attention_2(X, [16, 16, 64], 'Attention_2')
### Attention 2 End
X = res_conv(X, [32, 32, 128], 'Residual_conv_3', 2)
### Attention 3 Start
X = Attention_3(X, [32, 32, 128], 'Attention_3')
### Attention 3 End
X = res_conv(X, [64, 64, 256], 'Residual_conv_4', 2)
X = res_identity(X, [64, 64, 256], 'Residual_id_1')
X = res_identity(X, [64, 64, 256], 'Residual_id_2')
X = BatchNormalization(name='bn_2')(X)
X = Activation('relu', name='relu_2')(X)
print(X.shape)
X = GlobalAveragePooling1D()(X)
print(X.shape)
X = Dense(nb_classes,activation='sigmoid', name='Dense_1')(X)
model = Model(inputs=X_input, outputs=X, name='attention_56')
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam',
metrics=['accuracy',recall,precision,f1])
tbCallBack = TensorBoard(log_dir='tensorboard_log', # log 目录
histogram_freq=0, # 按照何等频率(epoch)来计算直方图,0为不计算
# batch_size=32, # 用多大量的数据计算直方图
write_graph=True, # 是否存储网络结构图
write_images=True,# 是否可视化参数
embeddings_freq=0,
embeddings_layer_names=None,
embeddings_metadata=None)
model.fit(x_train[0:train_number],y_train[0:train_number], epochs=100, validation_split=0.1, batch_size=16, callbacks=[keras.callbacks.EarlyStopping(patience=10),
AdvancedLearnignRateScheduler(monitor='val_loss', patience=6, verbose=1, mode='auto', decayRatio=0.1, warmup_batches=5, init_lr=0.001)],verbose=1)
loss, accuracy, recall, precision, f1 = model.evaluate(final_testset[0], final_testtarget)
print(loss)
print(accuracy)
print(recall)
print(precision)
print(f1) | [
"numpy.expand_dims",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.UpSampling1D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.GlobalAveragePooling1D",
"tensorflow.keras.layers.MaxPooling1D",
"numpy.concatenate",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Multiply",
"tensorflow.keras.layers.Add",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.layers.Input"
] | models/classification/resnet_attention.py | [(316, 'utils.AFClassication.data.loaddata', 'loaddata', ([], {}), False, 'from utils.AFClassication.data import loaddata\n'), (327, 'numpy.concatenate', 'np.concatenate', (['(X_train[0], Xval[0])'], {'axis': '(0)'}), True, 'import numpy as np\n'), (328, 'numpy.concatenate', 'np.concatenate', (['(y_train, yval)'], {'axis': '(0)'}), True, 'import numpy as np\n'), (343, 'tensorflow.keras.layers.Input', 'Input', (['input_shape'], {}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (380, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'X_input', 'outputs': 'X', 'name': '"""attention_56"""'}), False, 'from tensorflow.keras.models import Model, load_model\n'), (385, 'keras.callbacks.TensorBoard', 'TensorBoard', ([], {'log_dir': '"""tensorboard_log"""', 'histogram_freq': '(0)', 'write_graph': '(True)', 'write_images': '(True)', 'embeddings_freq': '(0)', 'embeddings_layer_names': 'None', 'embeddings_metadata': 'None'}), False, 'from keras.callbacks import TensorBoard\n'), (321, 'numpy.expand_dims', 'np.expand_dims', (['X_train[0]'], {'axis': '(2)'}), True, 'import numpy as np\n'), (322, 'numpy.expand_dims', 'np.expand_dims', (['Xval[0]'], {'axis': '(2)'}), True, 'import numpy as np\n'), (323, 'numpy.expand_dims', 'np.expand_dims', (['final_testset[0]'], {'axis': '(2)'}), True, 'import numpy as np\n'), (347, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'axis': '(-1)', 'name': '"""bn_1"""'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (348, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""relu_1"""'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (349, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""', 'name': '"""pool_1"""'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (372, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""bn_2"""'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (373, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': '"""relu_2"""'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (376, 'tensorflow.keras.layers.GlobalAveragePooling1D', 'GlobalAveragePooling1D', ([], {}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (379, 'tensorflow.keras.layers.Dense', 'Dense', (['nb_classes'], {'activation': '"""sigmoid"""', 'name': '"""Dense_1"""'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (29, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '1/bn_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (30, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '1/relu_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (35, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '1/bn_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (36, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '1/relu_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (41, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '1/bn_3')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (42, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '1/relu_3')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (47, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '2/bn_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (48, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '2/relu_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (53, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(base + '/Add')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (69, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '1/bn_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (70, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '1/relu_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (75, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '1/bn_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (76, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '1/relu_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (81, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '1/bn_3')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (82, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '1/relu_3')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (87, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(base + '/Add')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (114, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(ref_tensor.shape[1] // input_tensor.shape[1])'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (130, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""', 'name': "(name_base + '/Mask/pool_3')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (139, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""', 'name': "(name_base + '/Mask/pool_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (148, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""', 'name': "(name_base + '/Mask/pool_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (158, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(Residual_id_2_Down_shortcut.shape[1] // X.shape[1])', 'name': 'temp_name1'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (161, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(base + '/Mask/Add_after_Interpool_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (168, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(Residual_id_3_Down_shortcut.shape[1] // X.shape[1])', 'name': 'temp_name2'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (171, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(base + '/Mask/Add_after_Interpool_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (178, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(X_Trunk.shape[1] // X.shape[1])', 'name': 'temp_name3'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (181, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '/Mask/Interpool_3/bn_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (183, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '/Mask/Interpool_3/relu_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (188, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '/Mask/Interpool_3/bn_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (190, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '/Mask/Interpool_3/relu_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (195, 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {'name': "(name_base + '/Mask/sigmoid')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (197, 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {'name': "(name_base + '/Mutiply')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (199, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(name_base + '/Add')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (215, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""', 'name': "(name_base + '/Mask/pool_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (223, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""', 'name': "(name_base + '/Mask/pool_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (232, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(Residual_id_2_Down_shortcut.shape[1] // X.shape[1])', 'name': 'temp_name1'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (234, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(base + '/Mask/Add_after_Interpool_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (241, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(X_Trunk.shape[1] // X.shape[1])', 'name': 'temp_name2'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (243, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '/Mask/Interpool_2/bn_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (245, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '/Mask/Interpool_2/relu_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (250, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '/Mask/Interpool_2/bn_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (252, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '/Mask/Interpool_2/relu_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (257, 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {'name': "(name_base + '/Mask/sigmoid')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (259, 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {'name': "(name_base + '/Mutiply')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (261, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(name_base + '/Add')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (277, 'tensorflow.keras.layers.MaxPooling1D', 'MaxPooling1D', (['(3)'], {'strides': '(2)', 'padding': '"""same"""', 'name': "(name_base + '/Mask/pool_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (286, 'tensorflow.keras.layers.UpSampling1D', 'UpSampling1D', ([], {'size': '(X_Trunk.shape[1] // X.shape[1])', 'name': 'temp_name2'}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (288, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '/Mask/Interpool_2/bn_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (290, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '/Mask/Interpool_2/relu_1')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (295, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(name_base + '/Mask/Interpool_2/bn_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (297, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {'name': "(name_base + '/Mask/Interpool_2/relu_2')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (302, 'tensorflow.keras.layers.Activation', 'Activation', (['"""sigmoid"""'], {'name': "(name_base + '/Mask/sigmoid')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (304, 'tensorflow.keras.layers.Multiply', 'Multiply', ([], {'name': "(name_base + '/Mutiply')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (306, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(name_base + '/Add')"}), False, 'from tensorflow.keras.layers import Input, Multiply, GlobalAveragePooling1D, Add, Dense, Activation, ZeroPadding1D, BatchNormalization, Flatten, Conv1D, AveragePooling1D, MaxPooling1D, GlobalMaxPooling1D, Lambda, UpSampling1D, Reshape\n'), (345, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (394, 'tensorflow.keras.callbacks.EarlyStopping', 'keras.callbacks.EarlyStopping', ([], {'patience': '(10)'}), True, 'import tensorflow.keras as keras\n'), (395, 'utils.uts_classification.tools.AdvancedLearnignRateScheduler', 'AdvancedLearnignRateScheduler', ([], {'monitor': '"""val_loss"""', 'patience': '(6)', 'verbose': '(1)', 'mode': '"""auto"""', 'decayRatio': '(0.1)', 'warmup_batches': '(5)', 'init_lr': '(0.001)'}), False, 'from utils.uts_classification.tools import AdvancedLearnignRateScheduler\n'), (32, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (38, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (44, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (50, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (72, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (78, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (84, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (186, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (193, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (248, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (255, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (293, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n'), (300, 'keras.initializers.glorot_uniform', 'glorot_uniform', ([], {'seed': '(0)'}), False, 'from keras.initializers import glorot_uniform\n')] |
zeeshanbasar/devnagri-classifier | ccd0ba76e0b9c015c5c18e6d8d3d5e5c86900ee8 | # basic imports
import numpy as np
#import matplotlib.pyplot as plt
#import os
#import cv2
import pickle
import tensorflow as tf
from tensorflow.keras.models import Sequential
import tensorflow.keras.layers as tfl
#from tensorflow.keras.callbacks import TensorBoard, LearningRateScheduler, EarlyStopping
#import time
#model name
#NAME="devnagri-{}".format(int(time.time()))
# TensorBoard
#tensorboard=TensorBoard(log_dir='logs/{}'.format(NAME))
# GPU
gpu_options=tf.compat.v1.GPUOptions(per_process_gpu_memory_fraction=0.333)
sess=tf.compat.v1.Session(config=tf.compat.v1.ConfigProto(gpu_options=gpu_options))
# load the directory where image(or dataset) folders are
DATADIR='C:/IDK/ML/Devnagri/DevanagariHandwrittenCharacterDataset/Train'
CATEGORIES=["0","1","2","3","4","5","6","7","8","9",
"adna","ba","bha","cha","chha","chhya","da","daa","dha","dhaa","ga",
"gha","gya","ha","ja","jha","ka","kha","kna","ksha","la","ma","na",
"pa","pha","ra","sa","sh","t","ta","tha","thaa","tra","waw","yaw","yna"]
X=pickle.load(open("X.pickle","rb"))
y=pickle.load(open("y.pickle","rb"))
#X=np.array(X).reshape((-1,32,32,1))
y=np.array(y)
y=tf.keras.utils.to_categorical(y, num_classes=46, dtype='float32')
#building the model
model=Sequential()
# layer 1
model.add(tfl.Conv2D(64,(3,3)))
model.add(tfl.Activation('relu'))
model.add(tfl.MaxPooling2D(pool_size=(2,2)))
#layer 2
model.add(tfl.Conv2D(64,(3,3)))
model.add(tfl.Activation('relu'))
model.add(tfl.MaxPooling2D(pool_size=(2,2)))
#layer 3
model.add(tfl.Conv2D(64,(3,3)))
model.add(tfl.Activation('relu'))
model.add(tfl.MaxPooling2D(pool_size=(2,2)))
# Dense 1
model.add(tfl.Flatten())
model.add(tfl.Dense(64))
model.add(tfl.Activation('relu'))
# Dense 2
model.add(tfl.Flatten())
model.add(tfl.Dense(64))
model.add(tfl.Activation('relu'))
# o/p layer
model.add(tfl.Dense(46)) # 1 for binary class
model.add(tfl.Activation('softmax'))
# compilation, loss, accuracy, optimizer
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True),
optimizer=tf.keras.optimizers.Adam(),metrics=['accuracy','AUC'])
# early=tf.keras.callbacks.EarlyStopping(monitor='val_loss', min_delta=0.0000001, patience=5)
# fitting
model.fit(X,y,validation_split=0.1,batch_size=200,epochs=10)#,callbacks=[tensorboard,early])
model.save('devnagri-script-detection.model')
| [
"tensorflow.compat.v1.ConfigProto",
"tensorflow.keras.layers.Activation",
"tensorflow.compat.v1.GPUOptions",
"tensorflow.keras.losses.CategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.optimizers.Adam",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.utils.to_categorical",
"tensorflow.keras.layers.Flatten"
] | devnagri-script-detection_test2.py | [(21, 'tensorflow.compat.v1.GPUOptions', 'tf.compat.v1.GPUOptions', ([], {'per_process_gpu_memory_fraction': '(0.333)'}), True, 'import tensorflow as tf\n'), (38, 'numpy.array', 'np.array', (['y'], {}), True, 'import numpy as np\n'), (39, 'tensorflow.keras.utils.to_categorical', 'tf.keras.utils.to_categorical', (['y'], {'num_classes': '(46)', 'dtype': '"""float32"""'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (46, 'tensorflow.keras.layers.Conv2D', 'tfl.Conv2D', (['(64)', '(3, 3)'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (47, 'tensorflow.keras.layers.Activation', 'tfl.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (48, 'tensorflow.keras.layers.MaxPooling2D', 'tfl.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), True, 'import tensorflow.keras.layers as tfl\n'), (50, 'tensorflow.keras.layers.Conv2D', 'tfl.Conv2D', (['(64)', '(3, 3)'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (51, 'tensorflow.keras.layers.Activation', 'tfl.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (52, 'tensorflow.keras.layers.MaxPooling2D', 'tfl.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), True, 'import tensorflow.keras.layers as tfl\n'), (54, 'tensorflow.keras.layers.Conv2D', 'tfl.Conv2D', (['(64)', '(3, 3)'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (55, 'tensorflow.keras.layers.Activation', 'tfl.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (56, 'tensorflow.keras.layers.MaxPooling2D', 'tfl.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), True, 'import tensorflow.keras.layers as tfl\n'), (58, 'tensorflow.keras.layers.Flatten', 'tfl.Flatten', ([], {}), True, 'import tensorflow.keras.layers as tfl\n'), (59, 'tensorflow.keras.layers.Dense', 'tfl.Dense', (['(64)'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (60, 'tensorflow.keras.layers.Activation', 'tfl.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (62, 'tensorflow.keras.layers.Flatten', 'tfl.Flatten', ([], {}), True, 'import tensorflow.keras.layers as tfl\n'), (63, 'tensorflow.keras.layers.Dense', 'tfl.Dense', (['(64)'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (64, 'tensorflow.keras.layers.Activation', 'tfl.Activation', (['"""relu"""'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (66, 'tensorflow.keras.layers.Dense', 'tfl.Dense', (['(46)'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (67, 'tensorflow.keras.layers.Activation', 'tfl.Activation', (['"""softmax"""'], {}), True, 'import tensorflow.keras.layers as tfl\n'), (22, 'tensorflow.compat.v1.ConfigProto', 'tf.compat.v1.ConfigProto', ([], {'gpu_options': 'gpu_options'}), True, 'import tensorflow as tf\n'), (70, 'tensorflow.keras.losses.CategoricalCrossentropy', 'tf.keras.losses.CategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (71, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {}), True, 'import tensorflow as tf\n')] |
yubozhao/BentoML | 0d56b35e7a6969947c77a8cea685190f2196440f | # pylint: disable=redefined-outer-name
import json
import numpy as np
import pytest
import tensorflow as tf
import bentoml
from tests.bento_service_examples.tensorflow_classifier import Tensorflow2Classifier
from tests.integration.api_server.conftest import (
build_api_server_docker_image,
export_service_bundle,
run_api_server_docker_container,
)
test_data = [[1, 2, 3, 4, 5]]
test_tensor = tf.constant(np.asfarray(test_data))
import contextlib
@pytest.fixture(scope="session")
def clean_context():
with contextlib.ExitStack() as stack:
yield stack
class TfKerasModel(tf.keras.Model):
def __init__(self):
super().__init__()
# Simple linear layer which sums the inputs
self.dense = tf.keras.layers.Dense(
units=1,
input_shape=(5,),
use_bias=False,
kernel_initializer=tf.keras.initializers.Ones(),
)
def call(self, inputs):
return self.dense(inputs)
class TfNativeModel(tf.Module):
def __init__(self):
super().__init__()
self.weights = np.asfarray([[1.0], [1.0], [1.0], [1.0], [1.0]])
super(TfNativeModel, self).__init__()
self.dense = lambda inputs: tf.matmul(inputs, self.weights)
@tf.function(
input_signature=[tf.TensorSpec(shape=None, dtype=tf.float64, name='inputs')]
)
def __call__(self, inputs):
return self.dense(inputs)
@pytest.fixture(params=[TfKerasModel, TfNativeModel], scope="session")
def model_class(request):
return request.param
@pytest.fixture(scope="session")
def tf2_svc(model_class):
"""Return a TensorFlow2 BentoService."""
# When the ExampleBentoService got saved and loaded again in the test, the
# two class attribute below got set to the loaded BentoService class.
# Resetting it here so it does not effect other tests
Tensorflow2Classifier._bento_service_bundle_path = None
Tensorflow2Classifier._bento_service_bundle_version = None
svc = Tensorflow2Classifier()
model = model_class()
model(test_tensor)
svc.pack('model', model)
return svc
@pytest.fixture(params=[False, True], scope="module")
def enable_microbatch(request):
return request.param
@pytest.fixture(scope="module")
def tf2_host(tf2_svc, enable_microbatch, clean_context):
with export_service_bundle(tf2_svc) as saved_path:
server_image = clean_context.enter_context(
build_api_server_docker_image(saved_path)
)
with run_api_server_docker_container(
server_image, enable_microbatch=enable_microbatch, timeout=500
) as host:
yield host
def test_tensorflow_2_artifact(tf2_svc):
assert (
tf2_svc.predict(test_tensor) == 15.0
), 'Inference on unsaved TF2 artifact does not match expected'
def test_tensorflow_2_artifact_loaded(tf2_svc):
with export_service_bundle(tf2_svc) as saved_path:
tf2_svc_loaded = bentoml.load(saved_path)
assert (
tf2_svc.predict(test_tensor) == tf2_svc_loaded.predict(test_tensor) == 15.0
), 'Inference on saved and loaded TF2 artifact does not match expected'
@pytest.mark.asyncio
async def test_tensorflow_2_artifact_with_docker(tf2_host):
await pytest.assert_request(
"POST",
f"http://{tf2_host}/predict",
headers=(("Content-Type", "application/json"),),
data=json.dumps({"instances": test_data}),
assert_status=200,
assert_data=b'[[15.0]]',
)
| [
"numpy.asfarray",
"tensorflow.TensorSpec",
"tensorflow.matmul",
"tensorflow.keras.initializers.Ones"
] | tests/integration/test_tensorflow_v2_2_savedmodel_artifact.py | [(22, 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), False, 'import pytest\n'), (57, 'pytest.fixture', 'pytest.fixture', ([], {'params': '[TfKerasModel, TfNativeModel]', 'scope': '"""session"""'}), False, 'import pytest\n'), (62, 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""session"""'}), False, 'import pytest\n'), (78, 'pytest.fixture', 'pytest.fixture', ([], {'params': '[False, True]', 'scope': '"""module"""'}), False, 'import pytest\n'), (83, 'pytest.fixture', 'pytest.fixture', ([], {'scope': '"""module"""'}), False, 'import pytest\n'), (17, 'numpy.asfarray', 'np.asfarray', (['test_data'], {}), True, 'import numpy as np\n'), (71, 'tests.bento_service_examples.tensorflow_classifier.Tensorflow2Classifier', 'Tensorflow2Classifier', ([], {}), False, 'from tests.bento_service_examples.tensorflow_classifier import Tensorflow2Classifier\n'), (24, 'contextlib.ExitStack', 'contextlib.ExitStack', ([], {}), False, 'import contextlib\n'), (46, 'numpy.asfarray', 'np.asfarray', (['[[1.0], [1.0], [1.0], [1.0], [1.0]]'], {}), True, 'import numpy as np\n'), (85, 'tests.integration.api_server.conftest.export_service_bundle', 'export_service_bundle', (['tf2_svc'], {}), False, 'from tests.integration.api_server.conftest import build_api_server_docker_image, export_service_bundle, run_api_server_docker_container\n'), (102, 'tests.integration.api_server.conftest.export_service_bundle', 'export_service_bundle', (['tf2_svc'], {}), False, 'from tests.integration.api_server.conftest import build_api_server_docker_image, export_service_bundle, run_api_server_docker_container\n'), (103, 'bentoml.load', 'bentoml.load', (['saved_path'], {}), False, 'import bentoml\n'), (48, 'tensorflow.matmul', 'tf.matmul', (['inputs', 'self.weights'], {}), True, 'import tensorflow as tf\n'), (87, 'tests.integration.api_server.conftest.build_api_server_docker_image', 'build_api_server_docker_image', (['saved_path'], {}), False, 'from tests.integration.api_server.conftest import build_api_server_docker_image, export_service_bundle, run_api_server_docker_container\n'), (89, 'tests.integration.api_server.conftest.run_api_server_docker_container', 'run_api_server_docker_container', (['server_image'], {'enable_microbatch': 'enable_microbatch', 'timeout': '(500)'}), False, 'from tests.integration.api_server.conftest import build_api_server_docker_image, export_service_bundle, run_api_server_docker_container\n'), (36, 'tensorflow.keras.initializers.Ones', 'tf.keras.initializers.Ones', ([], {}), True, 'import tensorflow as tf\n'), (51, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': 'None', 'dtype': 'tf.float64', 'name': '"""inputs"""'}), True, 'import tensorflow as tf\n'), (115, 'json.dumps', 'json.dumps', (["{'instances': test_data}"], {}), False, 'import json\n')] |
julesmuhizi/qkeras | eec5a4a9f1930d0ee51319ab7363dd038a6e68c5 | # Copyright 2019 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import pytest
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import DepthwiseConv2D
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from qkeras.estimate import print_qstats
from qkeras.utils import model_quantize
from qkeras import QConv2D
from qkeras.quantizers import *
def create_network():
xi = Input((28, 28, 1))
x = Conv2D(32, (3, 3))(xi)
x = Activation("relu")(x)
x = Conv2D(32, (3, 3), activation="relu")(x)
x = Activation("softmax")(x)
return Model(inputs=xi, outputs=x)
def create_mix_network():
xi = Input((28, 28, 1))
x = QConv2D(32, (3, 3), kernel_quantizer=binary())(xi)
x = Activation("relu")(x)
x = Conv2D(32, (3, 3))(x)
x = Activation("softmax")(x)
return Model(inputs=xi, outputs=x)
def create_network_with_bn():
"""Creates a network contains both QConv2D and QDepthwiseConv2D layers."""
xi = Input((28, 28, 1))
x = Conv2D(32, (3, 3))(xi)
x = BatchNormalization()(x)
x = Activation("relu")(x)
x = DepthwiseConv2D((3, 3), activation="relu")(x)
x = BatchNormalization()(x)
x = Activation("softmax")(x)
return Model(inputs=xi, outputs=x)
def test_conversion_print_qstats():
# this tests if references in tensorflow are working properly.
m = create_network()
d = {
"QConv2D": {
"kernel_quantizer": "binary",
"bias_quantizer": "binary"
},
"QActivation": {
"relu": "ternary"
}
}
qq = model_quantize(m, d, 4)
qq.summary()
print_qstats(qq)
# test if print_qstats works with unquantized layers
print_qstats(m)
# test if print_qstats works with mixture of quantized and unquantized layers
m1 = create_mix_network()
print_qstats(m1)
m2 = create_network_with_bn()
d2 = {
"QConv2D": {
"kernel_quantizer": "binary",
"bias_quantizer": "binary"
},
"QActivation": {
"relu": "ternary"
},
"QConv2DBatchnorm": {
"kernel_quantizer": "ternary",
"bias_quantizer": "ternary",
},
"QDepthwiseConv2DBatchnorm": {
"depthwise_quantizer": "ternary",
"bias_quantizer": "ternary",
},
}
m2 = model_quantize(m2, d2, 4, enable_bn_folding=True)
m2.summary()
print_qstats(m2)
if __name__ == "__main__":
pytest.main([__file__])
| [
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Input"
] | tests/print_qstats_test.py | [(34, 'tensorflow.keras.layers.Input', 'Input', (['(28, 28, 1)'], {}), False, 'from tensorflow.keras.layers import Input\n'), (39, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'xi', 'outputs': 'x'}), False, 'from tensorflow.keras.models import Model\n'), (44, 'tensorflow.keras.layers.Input', 'Input', (['(28, 28, 1)'], {}), False, 'from tensorflow.keras.layers import Input\n'), (49, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'xi', 'outputs': 'x'}), False, 'from tensorflow.keras.models import Model\n'), (55, 'tensorflow.keras.layers.Input', 'Input', (['(28, 28, 1)'], {}), False, 'from tensorflow.keras.layers import Input\n'), (62, 'tensorflow.keras.models.Model', 'Model', ([], {'inputs': 'xi', 'outputs': 'x'}), False, 'from tensorflow.keras.models import Model\n'), (77, 'qkeras.utils.model_quantize', 'model_quantize', (['m', 'd', '(4)'], {}), False, 'from qkeras.utils import model_quantize\n'), (79, 'qkeras.estimate.print_qstats', 'print_qstats', (['qq'], {}), False, 'from qkeras.estimate import print_qstats\n'), (82, 'qkeras.estimate.print_qstats', 'print_qstats', (['m'], {}), False, 'from qkeras.estimate import print_qstats\n'), (86, 'qkeras.estimate.print_qstats', 'print_qstats', (['m1'], {}), False, 'from qkeras.estimate import print_qstats\n'), (106, 'qkeras.utils.model_quantize', 'model_quantize', (['m2', 'd2', '(4)'], {'enable_bn_folding': '(True)'}), False, 'from qkeras.utils import model_quantize\n'), (108, 'qkeras.estimate.print_qstats', 'print_qstats', (['m2'], {}), False, 'from qkeras.estimate import print_qstats\n'), (112, 'pytest.main', 'pytest.main', (['[__file__]'], {}), False, 'import pytest\n'), (35, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), False, 'from tensorflow.keras.layers import Conv2D\n'), (36, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (37, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (38, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (46, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (47, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), False, 'from tensorflow.keras.layers import Conv2D\n'), (48, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (56, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), False, 'from tensorflow.keras.layers import Conv2D\n'), (57, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (58, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (59, 'tensorflow.keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', (['(3, 3)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import DepthwiseConv2D\n'), (60, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (61, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import Activation\n')] |
micheleFraccaroli/autokeras | 4c0e36dc0a5418355952dd74f74b2b6e7e87ebf1 | # Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from typing import Optional
from typing import Tuple
from typing import Union
from tensorflow.keras.layers.experimental import preprocessing
from tensorflow.python.util import nest
from autokeras import analysers
from autokeras import keras_layers
from autokeras.engine import block as block_module
class Normalization(block_module.Block):
"""Perform feature-wise normalization on data.
Refer to Normalization layer in keras preprocessing layers for more information.
# Arguments
axis: Integer or tuple of integers, the axis or axes that should be
normalized (typically the features axis). We will normalize each element
in the specified axis. The default is '-1' (the innermost axis); 0 (the
batch axis) is not allowed.
"""
def __init__(self, axis: int = -1, **kwargs):
super().__init__(**kwargs)
self.axis = axis
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
return preprocessing.Normalization(axis=self.axis)(input_node)
def get_config(self):
config = super().get_config()
config.update({"axis": self.axis})
return config
class TextToIntSequence(block_module.Block):
"""Convert raw texts to sequences of word indices.
# Arguments
output_sequence_length: Int. The maximum length of a sentence. If
unspecified, it would be tuned automatically.
max_tokens: Int. The maximum size of the vocabulary. Defaults to 20000.
"""
def __init__(
self,
output_sequence_length: Optional[int] = None,
max_tokens: int = 20000,
**kwargs
):
super().__init__(**kwargs)
self.output_sequence_length = output_sequence_length
self.max_tokens = max_tokens
def get_config(self):
config = super().get_config()
config.update(
{
"output_sequence_length": self.output_sequence_length,
"max_tokens": self.max_tokens,
}
)
return config
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
if self.output_sequence_length is not None:
output_sequence_length = self.output_sequence_length
else:
output_sequence_length = hp.Choice(
"output_sequence_length", [64, 128, 256, 512], default=64
)
output_node = preprocessing.TextVectorization(
max_tokens=self.max_tokens,
output_mode="int",
output_sequence_length=output_sequence_length,
)(input_node)
return output_node
class TextToNgramVector(block_module.Block):
"""Convert raw texts to n-gram vectors.
# Arguments
max_tokens: Int. The maximum size of the vocabulary. Defaults to 20000.
ngrams: Int or tuple of ints. Passing an integer will create ngrams up to
that integer, and passing a tuple of integers will create ngrams for the
specified values in the tuple. If left unspecified, it will be tuned
automatically.
"""
def __init__(
self,
max_tokens: int = 20000,
ngrams: Union[int, Tuple[int], None] = None,
**kwargs
):
super().__init__(**kwargs)
self.max_tokens = max_tokens
self.ngrams = ngrams
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
if self.ngrams is not None:
ngrams = self.ngrams
else:
ngrams = hp.Int("ngrams", min_value=1, max_value=2, default=2)
return preprocessing.TextVectorization(
max_tokens=self.max_tokens,
ngrams=ngrams,
output_mode="tf-idf",
pad_to_max_tokens=True,
)(input_node)
def get_config(self):
config = super().get_config()
config.update({"max_tokens": self.max_tokens, "ngrams": self.ngrams})
return config
class ImageAugmentation(block_module.Block):
"""Collection of various image augmentation methods.
# Arguments
translation_factor: A positive float represented as fraction value, or a
tuple of 2 representing fraction for translation vertically and
horizontally. For instance, `translation_factor=0.2` result in a random
translation factor within 20% of the width and height.
If left unspecified, it will be tuned automatically.
vertical_flip: Boolean. Whether to flip the image vertically.
If left unspecified, it will be tuned automatically.
horizontal_flip: Boolean. Whether to flip the image horizontally.
If left unspecified, it will be tuned automatically.
rotation_factor: Float. A positive float represented as fraction of 2pi
upper bound for rotating clockwise and counter-clockwise. When
represented as a single float, lower = upper.
If left unspecified, it will be tuned automatically.
zoom_factor: A positive float represented as fraction value, or a tuple of 2
representing fraction for zooming vertically and horizontally. For
instance, `zoom_factor=0.2` result in a random zoom factor from 80% to
120%. If left unspecified, it will be tuned automatically.
contrast_factor: A positive float represented as fraction of value, or a
tuple of size 2 representing lower and upper bound. When represented as a
single float, lower = upper. The contrast factor will be randomly picked
between [1.0 - lower, 1.0 + upper]. If left unspecified, it will be tuned
automatically.
"""
def __init__(
self,
translation_factor: Optional[Union[float, Tuple[float, float]]] = None,
vertical_flip: Optional[bool] = None,
horizontal_flip: Optional[bool] = None,
rotation_factor: Optional[float] = None,
zoom_factor: Optional[Union[float, Tuple[float, float]]] = None,
contrast_factor: Optional[Union[float, Tuple[float, float]]] = None,
**kwargs
):
super().__init__(**kwargs)
self.translation_factor = translation_factor
self.horizontal_flip = horizontal_flip
self.vertical_flip = vertical_flip
self.rotation_factor = rotation_factor
self.zoom_factor = zoom_factor
self.contrast_factor = contrast_factor
@staticmethod
def _get_fraction_value(value):
if isinstance(value, tuple):
return value
return value, value
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
output_node = input_node
# Translate
translation_factor = self.translation_factor
if translation_factor is None:
translation_factor = hp.Choice("translation_factor", [0.0, 0.1])
if translation_factor not in [0, (0, 0)]:
height_factor, width_factor = self._get_fraction_value(
translation_factor
)
output_node = preprocessing.RandomTranslation(
height_factor, width_factor
)(output_node)
# Flip
horizontal_flip = self.horizontal_flip
if horizontal_flip is None:
horizontal_flip = hp.Boolean("horizontal_flip", default=True)
vertical_flip = self.vertical_flip
if self.vertical_flip is None:
vertical_flip = hp.Boolean("vertical_flip", default=True)
if not horizontal_flip and not vertical_flip:
flip_mode = ""
elif horizontal_flip and vertical_flip:
flip_mode = "horizontal_and_vertical"
elif horizontal_flip and not vertical_flip:
flip_mode = "horizontal"
elif not horizontal_flip and vertical_flip:
flip_mode = "vertical"
if flip_mode != "":
output_node = preprocessing.RandomFlip(mode=flip_mode)(output_node)
# Rotate
rotation_factor = self.rotation_factor
if rotation_factor is None:
rotation_factor = hp.Choice("rotation_factor", [0.0, 0.1])
if rotation_factor != 0:
output_node = preprocessing.RandomRotation(rotation_factor)(output_node)
# Zoom
zoom_factor = self.zoom_factor
if zoom_factor is None:
zoom_factor = hp.Choice("zoom_factor", [0.0, 0.1])
if zoom_factor not in [0, (0, 0)]:
height_factor, width_factor = self._get_fraction_value(zoom_factor)
# TODO: Add back RandomZoom when it is ready.
# output_node = preprocessing.RandomZoom(
# height_factor, width_factor)(output_node)
# Contrast
contrast_factor = self.contrast_factor
if contrast_factor is None:
contrast_factor = hp.Choice("contrast_factor", [0.0, 0.1])
if contrast_factor not in [0, (0, 0)]:
output_node = preprocessing.RandomContrast(contrast_factor)(output_node)
return output_node
def get_config(self):
config = super().get_config()
config.update(
{
"translation_factor": self.translation_factor,
"horizontal_flip": self.horizontal_flip,
"vertical_flip": self.vertical_flip,
"rotation_factor": self.rotation_factor,
"zoom_factor": self.zoom_factor,
"contrast_factor": self.contrast_factor,
}
)
return config
class CategoricalToNumerical(block_module.Block):
"""Encode the categorical features to numerical features."""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.column_types = None
self.column_names = None
def build(self, hp, inputs=None):
input_node = nest.flatten(inputs)[0]
encoding = []
for column_name in self.column_names:
column_type = self.column_types[column_name]
if column_type == analysers.CATEGORICAL:
# TODO: Search to use one-hot or int.
encoding.append(keras_layers.INT)
else:
encoding.append(keras_layers.NONE)
return keras_layers.MultiCategoryEncoding(encoding)(input_node)
@classmethod
def from_config(cls, config):
column_types = config.pop("column_types")
column_names = config.pop("column_names")
instance = cls(**config)
instance.column_types = column_types
instance.column_names = column_names
return instance
def get_config(self):
config = super().get_config()
config.update(
{"column_types": self.column_types, "column_names": self.column_names}
)
return config
| [
"tensorflow.keras.layers.experimental.preprocessing.RandomRotation",
"tensorflow.keras.layers.experimental.preprocessing.RandomTranslation",
"tensorflow.keras.layers.experimental.preprocessing.TextVectorization",
"tensorflow.keras.layers.experimental.preprocessing.RandomContrast",
"tensorflow.keras.layers.experimental.preprocessing.RandomFlip",
"tensorflow.keras.layers.experimental.preprocessing.Normalization",
"tensorflow.python.util.nest.flatten"
] | autokeras/blocks/preprocessing.py | [(44, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (45, 'tensorflow.keras.layers.experimental.preprocessing.Normalization', 'preprocessing.Normalization', ([], {'axis': 'self.axis'}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (83, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (90, 'tensorflow.keras.layers.experimental.preprocessing.TextVectorization', 'preprocessing.TextVectorization', ([], {'max_tokens': 'self.max_tokens', 'output_mode': '"""int"""', 'output_sequence_length': 'output_sequence_length'}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (120, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (125, 'tensorflow.keras.layers.experimental.preprocessing.TextVectorization', 'preprocessing.TextVectorization', ([], {'max_tokens': 'self.max_tokens', 'ngrams': 'ngrams', 'output_mode': '"""tf-idf"""', 'pad_to_max_tokens': '(True)'}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (191, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (274, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (283, 'autokeras.keras_layers.MultiCategoryEncoding', 'keras_layers.MultiCategoryEncoding', (['encoding'], {}), False, 'from autokeras import keras_layers\n'), (202, 'tensorflow.keras.layers.experimental.preprocessing.RandomTranslation', 'preprocessing.RandomTranslation', (['height_factor', 'width_factor'], {}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (222, 'tensorflow.keras.layers.experimental.preprocessing.RandomFlip', 'preprocessing.RandomFlip', ([], {'mode': 'flip_mode'}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (229, 'tensorflow.keras.layers.experimental.preprocessing.RandomRotation', 'preprocessing.RandomRotation', (['rotation_factor'], {}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n'), (246, 'tensorflow.keras.layers.experimental.preprocessing.RandomContrast', 'preprocessing.RandomContrast', (['contrast_factor'], {}), False, 'from tensorflow.keras.layers.experimental import preprocessing\n')] |
micheleFraccaroli/autokeras | 4c0e36dc0a5418355952dd74f74b2b6e7e87ebf1 | # Copyright 2020 The AutoKeras Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import re
import os
from datetime import datetime
import keras_tuner
import tensorflow as tf
from packaging.version import parse
from tensorflow.python.util import nest
# from autokeras.utils.history import History
from tensorflow.keras.callbacks import History
from tqdm import tqdm
from flops_calculator import flop_calculator
import flops_losses
from colors import colors
def validate_num_inputs(inputs, num):
inputs = nest.flatten(inputs)
if not len(inputs) == num:
raise ValueError(
"Expected {num} elements in the inputs list "
"but received {len} inputs.".format(num=num, len=len(inputs))
)
def to_snake_case(name):
intermediate = re.sub("(.)([A-Z][a-z0-9]+)", r"\1_\2", name)
insecure = re.sub("([a-z])([A-Z])", r"\1_\2", intermediate).lower()
return insecure
def check_tf_version() -> None:
if parse(tf.__version__) < parse("2.3.0"):
raise ImportError(
"The Tensorflow package version needs to be at least 2.3.0 \n"
"for AutoKeras to run. Currently, your TensorFlow version is \n"
"{version}. Please upgrade with \n"
"`$ pip install --upgrade tensorflow`. \n"
"You can use `pip freeze` to check afterwards that everything is "
"ok.".format(version=tf.__version__)
)
def check_kt_version() -> None:
if parse(keras_tuner.__version__) < parse("1.0.3"):
raise ImportError(
"The Keras Tuner package version needs to be at least 1.0.3 \n"
"for AutoKeras to run. Currently, your Keras Tuner version is \n"
"{version}. Please upgrade with \n"
"`$ pip install --upgrade keras-tuner`. \n"
"You can use `pip freeze` to check afterwards that everything is "
"ok.".format(version=keras_tuner.__version__)
)
def contain_instance(instance_list, instance_type):
return any([isinstance(instance, instance_type) for instance in instance_list])
def evaluate_with_adaptive_batch_size(model, batch_size, verbose=1, **fit_kwargs):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.evaluate(
x, verbose=verbose, **kwargs
),
**fit_kwargs
)
def predict_with_adaptive_batch_size(model, batch_size, verbose=1, **fit_kwargs):
return run_with_adaptive_batch_size(
batch_size,
lambda x, validation_data, **kwargs: model.predict(
x, verbose=verbose, **kwargs
),
**fit_kwargs
)
def fit_with_adaptive_batch_size(model, batch_size, **fit_kwargs):
history = run_with_adaptive_batch_size(
batch_size, lambda **kwargs: model.fit(**kwargs), **fit_kwargs
)
return model, history
def run_with_adaptive_batch_size(batch_size, func, **fit_kwargs):
x = fit_kwargs.pop("x")
validation_data = None
if "validation_data" in fit_kwargs:
validation_data = fit_kwargs.pop("validation_data")
while batch_size > 0:
try:
history = func(x=x, validation_data=validation_data, **fit_kwargs)
break
except tf.errors.ResourceExhaustedError as e:
if batch_size == 1:
raise e
batch_size //= 2
print(
"Not enough memory, reduce batch size to {batch_size}.".format(
batch_size=batch_size
)
)
x = x.unbatch().batch(batch_size)
if validation_data is not None:
validation_data = validation_data.unbatch().batch(batch_size)
return history
'''
Custom training loop here###############################################################################################
'''
def custom_training_loop(model, batch_size, max_flops, **fit_kwargs):
@tf.function
def _train_step(x, y, model, loss_fn, optimizer, train_epoch_accuracy, train_epoch_loss_avg, max_flops, actual_flops):
with tf.GradientTape() as tape:
logits = model(x, training=True)
loss_value = loss_fn(y, logits)
loss_value += abs(max_flops - actual_flops)
grads = tape.gradient(loss_value, model.trainable_weights)
optimizer.apply_gradients(zip(grads, model.trainable_weights))
train_epoch_accuracy.update_state(y, logits)
train_epoch_loss_avg.update_state(loss_value)
return loss_value
@tf.function
def _validation_step(x, y, loss_fn, model, val_epoch_accuracy, val_epoch_loss_avg):
val_logits = model(x, training=False)
loss = loss_fn(y, val_logits)
val_epoch_accuracy.update_state(y, val_logits)
val_epoch_loss_avg.update_state(loss)
try:
folder = str(datetime.now().strftime("%b-%d-%Y"))
os.mkdir("../logs/{}".format(folder))
except OSError as e:
print(e)
history = History()
history.model = model
print(model.summary())
logs = {'loss': None, 'accuracy': None, 'val_loss': None, 'val_accuracy': None}
writer = tf.summary.create_file_writer("logs/{}".format(folder))
fc = flop_calculator()
actual_flops = fc.get_flops(history.model)
loss_fn = model.loss['classification_head_1']
optimizer = model.optimizer
history.on_train_begin()
pbar = tqdm(range(fit_kwargs['epochs']))
for epoch in pbar:
train_epoch_loss_avg = tf.keras.metrics.Mean()
train_epoch_accuracy = tf.keras.metrics.CategoricalAccuracy()
val_epoch_loss_avg = tf.keras.metrics.Mean()
val_epoch_accuracy = tf.keras.metrics.CategoricalAccuracy()
model.metrics.extend((train_epoch_loss_avg, train_epoch_accuracy))
model.metrics_names.extend(('loss', 'accuracy'))
history.model.metrics.extend((train_epoch_loss_avg, train_epoch_accuracy))
history.model.metrics_names.extend(('loss', 'accuracy'))
# Training loop -
for x, y in fit_kwargs['x']:
pbar.set_description("TRAINING")
loss_value = _train_step(x, y, model, loss_fn, optimizer, train_epoch_accuracy, train_epoch_loss_avg, max_flops, actual_flops)
# Display metrics at the end of each epoch.
train_acc = train_epoch_accuracy.result()
train_loss = train_epoch_loss_avg.result()
# print("Training acc over epoch: %.4f" % (float(train_acc),))
# print("Training loss over epoch: %.4f" % (float(train_loss),))
# pbar.set_postfix({'Training acc': float(train_acc), 'Training loss': float(train_loss)})
# Reset training metrics at the end of each epoch
train_epoch_accuracy.reset_states()
train_epoch_loss_avg.reset_states()
# Run a validation loop at the end of each epoch.
for xv, yv in fit_kwargs['validation_data']:
# pbar.set_description("VALIDATION")
_validation_step(xv, yv, loss_fn, model, val_epoch_accuracy, val_epoch_loss_avg)
# Reset training metrics at the end of each epoch
train_epoch_accuracy.reset_states()
train_epoch_loss_avg.reset_states()
val_acc = val_epoch_accuracy.result()
val_loss = val_epoch_loss_avg.result()
val_epoch_accuracy.reset_states()
val_epoch_loss_avg.reset_states()
# print("Validation acc: %.4f" % (float(val_acc),))
# print("Validation loss: %.4f" % (float(val_loss),))
pbar.set_postfix({'Training acc': float(train_acc), 'Training loss': float(train_loss), 'Valid acc': float(val_acc), 'Valid loss': float(val_loss)})
with writer.as_default():
tf.summary.scalar("Train Loss", train_epoch_loss_avg.result(), step=epoch)
tf.summary.scalar("Train Acc", train_epoch_accuracy.result(), step=epoch)
tf.summary.scalar("Flops", actual_flops, step=epoch, description="Flops of the model")
tf.summary.scalar("|max_flops - actual_flops|", abs(max_flops - actual_flops), step=epoch)
with writer.as_default():
tf.summary.scalar("Validation Loss", val_epoch_loss_avg.result(), step=epoch)
tf.summary.scalar("Val Acc", val_epoch_accuracy.result(), step=epoch)
writer.flush()
# if epoch % 10 == 0:
# model.save('tf_ckpts/model_{}@{}epoch'.format(model.name, epoch))
# print("Saved checkpoint for step {} in {}".format(epoch, 'tf_ckpts'))
logs['loss'] = train_loss.numpy()
logs['accuracy'] = train_acc.numpy()
logs['val_loss'] = val_loss.numpy()
logs['val_accuracy'] = val_acc.numpy()
history.on_epoch_end(epoch, logs=logs)
# stopEarly = Callback_EarlyStopping(val_loss_results, min_delta=0.5, patience=20)
# if stopEarly:
# print("Callback_EarlyStopping signal received at epoch= %d/%d" % (epoch, num_epochs))
# print("Terminating training ")
# model.save('tf_ckpts/model_{}@{}epoch'.format(model_name, epoch))
# print("Saved checkpoint for step {} in {}".format(epoch, 'tf_ckpts'))
# break
writer.close()
return model, history
'''
########################################################################################################################
'''
def get_hyperparameter(value, hp, dtype):
if value is None:
return hp
return value
def add_to_hp(hp, hps, name=None):
"""Add the HyperParameter (self) to the HyperParameters.
# Arguments
hp: keras_tuner.HyperParameters.
name: String. If left unspecified, the hp name is used.
"""
if not isinstance(hp, keras_tuner.engine.hyperparameters.HyperParameter):
return hp
kwargs = hp.get_config()
if name is None:
name = hp.name
kwargs.pop("conditions")
kwargs.pop("name")
class_name = hp.__class__.__name__
func = getattr(hps, class_name)
return func(name=name, **kwargs)
| [
"tensorflow.python.util.nest.flatten",
"tensorflow.keras.callbacks.History",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.summary.scalar",
"tensorflow.keras.metrics.Mean",
"tensorflow.GradientTape"
] | autokeras/utils/utils.py | [(31, 'tensorflow.python.util.nest.flatten', 'nest.flatten', (['inputs'], {}), False, 'from tensorflow.python.util import nest\n'), (40, 're.sub', 're.sub', (['"""(.)([A-Z][a-z0-9]+)"""', '"""\\\\1_\\\\2"""', 'name'], {}), False, 'import re\n'), (154, 'tensorflow.keras.callbacks.History', 'History', ([], {}), False, 'from tensorflow.keras.callbacks import History\n'), (159, 'flops_calculator.flop_calculator', 'flop_calculator', ([], {}), False, 'from flops_calculator import flop_calculator\n'), (46, 'packaging.version.parse', 'parse', (['tf.__version__'], {}), False, 'from packaging.version import parse\n'), (46, 'packaging.version.parse', 'parse', (['"""2.3.0"""'], {}), False, 'from packaging.version import parse\n'), (58, 'packaging.version.parse', 'parse', (['keras_tuner.__version__'], {}), False, 'from packaging.version import parse\n'), (58, 'packaging.version.parse', 'parse', (['"""1.0.3"""'], {}), False, 'from packaging.version import parse\n'), (168, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (169, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (170, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (41, 're.sub', 're.sub', (['"""([a-z])([A-Z])"""', '"""\\\\1_\\\\2"""', 'intermediate'], {}), False, 'import re\n'), (132, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""Flops"""', 'actual_flops'], {'step': 'epoch', 'description': '"""Flops of the model"""'}), True, 'import tensorflow as tf\n'), (150, 'datetime.datetime.now', 'datetime.now', ([], {}), False, 'from datetime import datetime\n')] |
exajobs/machine-learning-collection | 84444f0bfe351efea6e3b2813e47723bd8d769cc |
# coding: utf-8
# # Script for testing the TensorFlow 2.0 setup
#
# This script is for testing the TensorFlow
# (https://www.tensorflow.org/) setup using the Keras API
# (https://keras.io/). Below is a set of required imports.
#
# No error messages should appear. In particular, **TensorFlow 2 is
# required**.
#
# Some warnings may appear, this should be fine.
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras.layers import SimpleRNN, LSTM, GRU
from tensorflow.keras.utils import to_categorical
from distutils.version import LooseVersion as LV
from tensorflow.keras.datasets import mnist, fashion_mnist, imdb
from sklearn.model_selection import train_test_split
import numpy as np
print('Using Tensorflow version: {}, '
'and Keras version: {}.'.format(tf.__version__,
tf.keras.__version__))
assert(LV(tf.__version__) >= LV("2.0.0"))
# Let's check if we have GPU available.
if tf.test.is_gpu_available():
from tensorflow.python.client import device_lib
for d in device_lib.list_local_devices():
if d.device_type == 'GPU':
print('GPU', d.physical_device_desc)
else:
print('No GPU, using CPU instead.')
# ## Getting started: 30 seconds to Keras
#
# (This section is adapted from https://keras.io/)
#
# The core data structure of Keras is a **model**, a way to organize
# layers. The main type of model is the Sequential model, a linear
# stack of layers.
#
# A model is initialized by calling Sequential():
model = Sequential()
# Stacking layers is as easy as .add():
model.add(Dense(units=64, input_dim=100))
model.add(Activation("relu"))
model.add(Dense(units=10))
model.add(Activation("softmax"))
# A summary of the model:
print(model.summary())
# Once your model looks good, configure its learning process with
# .compile():
model.compile(loss='categorical_crossentropy',
optimizer='sgd',
metrics=['accuracy'])
# You can now begin training your model with .fit(). Let's generate
# some random data and use it to train the model:
X_train = np.random.rand(128, 100)
Y_train = to_categorical(np.random.randint(10, size=128))
model.fit(X_train, Y_train, epochs=5, batch_size=32, verbose=2);
# Evaluate your performance on test data with .evaluate():
X_test = np.random.rand(64, 100)
Y_test = to_categorical(np.random.randint(10, size=64))
loss, acc = model.evaluate(X_test, Y_test, batch_size=32)
print()
print('loss:', loss, 'acc:', acc)
| [
"tensorflow.keras.layers.Activation",
"tensorflow.python.client.device_lib.list_local_devices",
"tensorflow.keras.layers.Dense",
"numpy.random.rand",
"tensorflow.test.is_gpu_available",
"tensorflow.keras.models.Sequential",
"numpy.random.randint"
] | machine-learning-scripts/examples/tf2-test.py | [(37, 'tensorflow.test.is_gpu_available', 'tf.test.is_gpu_available', ([], {}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential\n'), (78, 'numpy.random.rand', 'np.random.rand', (['(128)', '(100)'], {}), True, 'import numpy as np\n'), (85, 'numpy.random.rand', 'np.random.rand', (['(64)', '(100)'], {}), True, 'import numpy as np\n'), (33, 'distutils.version.LooseVersion', 'LV', (['tf.__version__'], {}), True, 'from distutils.version import LooseVersion as LV\n'), (33, 'distutils.version.LooseVersion', 'LV', (['"""2.0.0"""'], {}), True, 'from distutils.version import LooseVersion as LV\n'), (39, 'tensorflow.python.client.device_lib.list_local_devices', 'device_lib.list_local_devices', ([], {}), False, 'from tensorflow.python.client import device_lib\n'), (59, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(64)', 'input_dim': '(100)'}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), (60, 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), (61, 'tensorflow.keras.layers.Dense', 'Dense', ([], {'units': '(10)'}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), (62, 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), False, 'from tensorflow.keras.layers import Dense, Activation, Dropout, Flatten\n'), (79, 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(128)'}), True, 'import numpy as np\n'), (86, 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(64)'}), True, 'import numpy as np\n')] |
tansyab1/PhD-project | 15f170c1976e58697454cd992687d808d1b2284a | #!/usr/bin/env python
# coding: utf-8
# In[ ]:
#Importing all required libraries
# In[1]:
from __future__ import absolute_import, division, print_function, unicode_literals
# In[2]:
import tensorflow as tf
import pathlib
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Conv2D, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
import numpy as np
import matplotlib.pyplot as plt
# In[3]:
AUTOTUNE = tf.data.experimental.AUTOTUNE
# In[4]:
import IPython.display as display
from PIL import Image
import numpy as np
import matplotlib.pyplot as plt
import os
# In[5]:
physical_devices = tf.config.list_physical_devices('GPU')
tf.config.experimental.set_memory_growth(physical_devices[0], True)
# In[6]:
tf.__version__
# In[7]:
#Train and test data folder
train_data_dir = "/home/nguyentansy/PhD-work/Datasets/Image - Split 0-1/1"
test_data_dir = "/home/nguyentansy/PhD-work/Datasets/Image - Split 0-1/0"
# In[8]:
train_data_dir = pathlib.Path(train_data_dir)
test_data_dir = pathlib.Path(test_data_dir)
# In[9]:
#count how many images are there
image_count = len(list(train_data_dir.glob('*/*.jpg')))
image_count
# In[10]:
total_train = len(list(train_data_dir.glob('*/*.jpg')))
total_val = len(list(test_data_dir.glob('*/*.jpg')))
# In[11]:
#get the class names
CLASS_NAMES = np.array([item.name for item in train_data_dir.glob('*') if item.name != "LICENSE.txt"])
CLASS_NAMES
# In[12]:
#Define parameter for training
batch_size = 32
IMG_HEIGHT = 224
IMG_WIDTH = 224
STEPS_PER_EPOCH = np.ceil(image_count/batch_size)
epochs = 8
num_classes = len(CLASS_NAMES) #23
# In[13]:
#We use image data generators to load the images and prepare them for the training
train_image_generator = ImageDataGenerator() # Generator for our training data
validation_image_generator = ImageDataGenerator() # Generator for our validation data
train_data_gen = train_image_generator.flow_from_directory(directory=str(train_data_dir),
batch_size=batch_size,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
classes = list(CLASS_NAMES),
class_mode='categorical'
)
val_data_gen = validation_image_generator.flow_from_directory(directory=str(test_data_dir),
batch_size=batch_size,
shuffle=True,
target_size=(IMG_HEIGHT, IMG_WIDTH),
class_mode='categorical',
classes = list(CLASS_NAMES)
)
#get class order from directories
print(train_data_gen.class_indices.keys())
print(val_data_gen.class_indices.keys())
# In[14]:
IMG_SIZE = 224
IMG_SHAPE = (IMG_SIZE, IMG_SIZE, 3)
# base model from the pre-trained model. Resnet 50 in this case
base_model = tf.keras.applications.ResNet50(input_shape=IMG_SHAPE,
include_top=False,
weights='imagenet')
base_model.trainable = False
# In[15]:
#add new classification layer
x = base_model.output
x = tf.keras.layers.GlobalAveragePooling2D()(x)
x = tf.keras.layers.Dense(num_classes,activation='softmax')(x)
model = tf.keras.models.Model(inputs=base_model.input, outputs=x)
base_learning_rate = 0.001
model.compile(optimizer=tf.keras.optimizers.Adam(lr=base_learning_rate),
loss='categorical_crossentropy',
metrics=['accuracy'])
# In[16]:
#fit the model
history = model.fit(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=epochs,
validation_data=val_data_gen,
validation_steps=total_val // batch_size
)
# In[23]:
#create training plots
history
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs_range = range(epochs)
plt.figure(figsize=(30, 8))
plt.subplot(1, 2, 1)
plt.plot(epochs_range, acc, label='Training Accuracy')
plt.plot(epochs_range, val_acc, label='Validation Accuracy')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(1, 2, 2)
plt.plot(epochs_range, loss, label='Training Loss')
plt.plot(epochs_range, val_loss, label='Validation Loss')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.show()
# In[24]:
base_model.trainable = True #now we want to train the base model
# In[25]:
# How many layers are in the base model
print("Layers base model: ", len(base_model.layers))
# Fine tune from layer x
fine_tune_at = 100
# Freeze all the layers before the fine tune starting layer
for layer in base_model.layers[:fine_tune_at]:
layer.trainable = False
# In[26]:
model.compile(loss='categorical_crossentropy',
optimizer = tf.keras.optimizers.RMSprop(lr=base_learning_rate/10),
metrics=['accuracy'])
# In[27]:
model.summary()
# In[28]:
#Fine tune step
initial_epochs = 7
fine_tune_epochs = 3
total_epochs = initial_epochs + fine_tune_epochs
train_batches = total_train // batch_size
print(total_val // batch_size)
validation_batches = total_val // batch_size
history_fine = model.fit(
train_data_gen,
steps_per_epoch=total_train // batch_size,
epochs=total_epochs,
initial_epoch = history.epoch[-1],
validation_data=val_data_gen,
validation_steps=total_val // batch_size
)
# In[29]:
acc += history_fine.history['accuracy']
val_acc += history_fine.history['val_accuracy']
loss += history_fine.history['loss']
val_loss += history_fine.history['val_loss']
# In[30]:
#Plot fine tuning
plt.figure(figsize=(8, 8))
plt.subplot(2, 1, 1)
plt.plot(acc, label='Training Accuracy')
plt.plot(val_acc, label='Validation Accuracy')
plt.ylim([0.8, 1])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='lower right')
plt.title('Training and Validation Accuracy')
plt.subplot(2, 1, 2)
plt.plot(loss, label='Training Loss')
plt.plot(val_loss, label='Validation Loss')
plt.ylim([0, 1.0])
plt.plot([initial_epochs-1,initial_epochs-1],
plt.ylim(), label='Start Fine Tuning')
plt.legend(loc='upper right')
plt.title('Training and Validation Loss')
plt.xlabel('epoch')
plt.show()
# In[31]:
#model save and load
import os
# In[32]:
#some time stamp
from datetime import datetime
# current date and time.
now = datetime.now()
timestamp = datetime.timestamp(now)
print("timestamp =", timestamp)
# In[35]:
model_filename = str(timestamp)+'mymodel.h5'
model.save(model_filename)
# In[36]:
#To apply the model on new data
new_model = tf.keras.models.load_model(model_filename)
# Show the model architecture
new_model.summary()
# In[38]:
from tensorflow.keras.preprocessing import image
#image directory containing images to test
img_dir="/home/nguyentansy/PhD-work/Datasets/all/0/polyps"
for i,img in enumerate(os.listdir(img_dir)):
tmpimage = image.load_img(os.path.join(img_dir,img), target_size=(IMG_SIZE,IMG_SIZE))
tmpimage = np.expand_dims(tmpimage, axis=0).astype('float32')
result_class=new_model.predict(tmpimage)
print(img,";",CLASS_NAMES[result_class.argmax(axis=-1)])
# In[ ]:
| [
"matplotlib.pyplot.legend",
"tensorflow.keras.models.load_model",
"numpy.expand_dims",
"matplotlib.pyplot.plot",
"tensorflow.config.list_physical_devices",
"tensorflow.keras.preprocessing.image.ImageDataGenerator",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.optimizers.RMSprop",
"numpy.ceil",
"tensorflow.keras.applications.ResNet50",
"matplotlib.pyplot.subplot",
"matplotlib.pyplot.figure",
"matplotlib.pyplot.title",
"tensorflow.keras.models.Model",
"matplotlib.pyplot.ylim",
"tensorflow.keras.layers.Dense",
"matplotlib.pyplot.show",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.optimizers.Adam",
"matplotlib.pyplot.xlabel"
] | 2021/src/Classification/Fine-Tuned-ResNet-50/Fine-Tuned-ResNet-50.py | [(48, 'tensorflow.config.list_physical_devices', 'tf.config.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['physical_devices[0]', '(True)'], {}), True, 'import tensorflow as tf\n'), (70, 'pathlib.Path', 'pathlib.Path', (['train_data_dir'], {}), False, 'import pathlib\n'), (71, 'pathlib.Path', 'pathlib.Path', (['test_data_dir'], {}), False, 'import pathlib\n'), (104, 'numpy.ceil', 'np.ceil', (['(image_count / batch_size)'], {}), True, 'import numpy as np\n'), (114, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (115, 'tensorflow.keras.preprocessing.image.ImageDataGenerator', 'ImageDataGenerator', ([], {}), False, 'from tensorflow.keras.preprocessing.image import ImageDataGenerator\n'), (145, 'tensorflow.keras.applications.ResNet50', 'tf.keras.applications.ResNet50', ([], {'input_shape': 'IMG_SHAPE', 'include_top': '(False)', 'weights': '"""imagenet"""'}), True, 'import tensorflow as tf\n'), (160, 'tensorflow.keras.models.Model', 'tf.keras.models.Model', ([], {'inputs': 'base_model.input', 'outputs': 'x'}), True, 'import tensorflow as tf\n'), (194, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(30, 8)'}), True, 'import matplotlib.pyplot as plt\n'), (195, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(1)'], {}), True, 'import matplotlib.pyplot as plt\n'), (196, 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'acc'], {'label': '"""Training Accuracy"""'}), True, 'import matplotlib.pyplot as plt\n'), (197, 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_acc'], {'label': '"""Validation Accuracy"""'}), True, 'import matplotlib.pyplot as plt\n'), (198, 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), True, 'import matplotlib.pyplot as plt\n'), (199, 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (201, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(1)', '(2)', '(2)'], {}), True, 'import matplotlib.pyplot as plt\n'), (202, 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'loss'], {'label': '"""Training Loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (203, 'matplotlib.pyplot.plot', 'plt.plot', (['epochs_range', 'val_loss'], {'label': '"""Validation Loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (204, 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), True, 'import matplotlib.pyplot as plt\n'), (205, 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (206, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (278, 'matplotlib.pyplot.figure', 'plt.figure', ([], {'figsize': '(8, 8)'}), True, 'import matplotlib.pyplot as plt\n'), (279, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(1)'], {}), True, 'import matplotlib.pyplot as plt\n'), (280, 'matplotlib.pyplot.plot', 'plt.plot', (['acc'], {'label': '"""Training Accuracy"""'}), True, 'import matplotlib.pyplot as plt\n'), (281, 'matplotlib.pyplot.plot', 'plt.plot', (['val_acc'], {'label': '"""Validation Accuracy"""'}), True, 'import matplotlib.pyplot as plt\n'), (282, 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0.8, 1]'], {}), True, 'import matplotlib.pyplot as plt\n'), (285, 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""lower right"""'}), True, 'import matplotlib.pyplot as plt\n'), (286, 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Accuracy"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (288, 'matplotlib.pyplot.subplot', 'plt.subplot', (['(2)', '(1)', '(2)'], {}), True, 'import matplotlib.pyplot as plt\n'), (289, 'matplotlib.pyplot.plot', 'plt.plot', (['loss'], {'label': '"""Training Loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (290, 'matplotlib.pyplot.plot', 'plt.plot', (['val_loss'], {'label': '"""Validation Loss"""'}), True, 'import matplotlib.pyplot as plt\n'), (291, 'matplotlib.pyplot.ylim', 'plt.ylim', (['[0, 1.0]'], {}), True, 'import matplotlib.pyplot as plt\n'), (294, 'matplotlib.pyplot.legend', 'plt.legend', ([], {'loc': '"""upper right"""'}), True, 'import matplotlib.pyplot as plt\n'), (295, 'matplotlib.pyplot.title', 'plt.title', (['"""Training and Validation Loss"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (296, 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""epoch"""'], {}), True, 'import matplotlib.pyplot as plt\n'), (297, 'matplotlib.pyplot.show', 'plt.show', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (313, 'datetime.datetime.now', 'datetime.now', ([], {}), False, 'from datetime import datetime\n'), (314, 'datetime.datetime.timestamp', 'datetime.timestamp', (['now'], {}), False, 'from datetime import datetime\n'), (329, 'tensorflow.keras.models.load_model', 'tf.keras.models.load_model', (['model_filename'], {}), True, 'import tensorflow as tf\n'), (157, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'tf.keras.layers.GlobalAveragePooling2D', ([], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['num_classes'], {'activation': '"""softmax"""'}), True, 'import tensorflow as tf\n'), (284, 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (293, 'matplotlib.pyplot.ylim', 'plt.ylim', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (343, 'os.listdir', 'os.listdir', (['img_dir'], {}), False, 'import os\n'), (163, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': 'base_learning_rate'}), True, 'import tensorflow as tf\n'), (233, 'tensorflow.keras.optimizers.RMSprop', 'tf.keras.optimizers.RMSprop', ([], {'lr': '(base_learning_rate / 10)'}), True, 'import tensorflow as tf\n'), (344, 'os.path.join', 'os.path.join', (['img_dir', 'img'], {}), False, 'import os\n'), (345, 'numpy.expand_dims', 'np.expand_dims', (['tmpimage'], {'axis': '(0)'}), True, 'import numpy as np\n')] |
pyoung2778/models | 45fd9249893b07b73447cf849a770891734c7e3a | # Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Video classification task definition."""
from typing import Any, Optional, List, Tuple
from absl import logging
import tensorflow as tf
from official.core import base_task
from official.core import task_factory
from official.modeling import tf_utils
from official.vision.beta.configs import video_classification as exp_cfg
from official.vision.beta.dataloaders import input_reader_factory
from official.vision.beta.dataloaders import video_input
from official.vision.beta.modeling import factory_3d
@task_factory.register_task_cls(exp_cfg.VideoClassificationTask)
class VideoClassificationTask(base_task.Task):
"""A task for video classification."""
def _get_num_classes(self):
"""Gets the number of classes."""
return self.task_config.train_data.num_classes
def _get_feature_shape(self):
"""Get the common feature shape for train and eval."""
return [
d1 if d1 == d2 else None
for d1, d2 in zip(self.task_config.train_data.feature_shape,
self.task_config.validation_data.feature_shape)
]
def _get_num_test_views(self):
"""Gets number of views for test."""
num_test_clips = self.task_config.validation_data.num_test_clips
num_test_crops = self.task_config.validation_data.num_test_crops
num_test_views = num_test_clips * num_test_crops
return num_test_views
def _is_multilabel(self):
"""If the label is multi-labels."""
return self.task_config.train_data.is_multilabel
def build_model(self):
"""Builds video classification model."""
common_input_shape = self._get_feature_shape()
input_specs = tf.keras.layers.InputSpec(shape=[None] + common_input_shape)
logging.info('Build model input %r', common_input_shape)
l2_weight_decay = self.task_config.losses.l2_weight_decay
# Divide weight decay by 2.0 to match the implementation of tf.nn.l2_loss.
# (https://www.tensorflow.org/api_docs/python/tf/keras/regularizers/l2)
# (https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss)
l2_regularizer = (tf.keras.regularizers.l2(
l2_weight_decay / 2.0) if l2_weight_decay else None)
model = factory_3d.build_model(
self.task_config.model.model_type,
input_specs=input_specs,
model_config=self.task_config.model,
num_classes=self._get_num_classes(),
l2_regularizer=l2_regularizer)
return model
def initialize(self, model: tf.keras.Model):
"""Loads pretrained checkpoint."""
if not self.task_config.init_checkpoint:
return
ckpt_dir_or_file = self.task_config.init_checkpoint
if tf.io.gfile.isdir(ckpt_dir_or_file):
ckpt_dir_or_file = tf.train.latest_checkpoint(ckpt_dir_or_file)
# Restoring checkpoint.
if self.task_config.init_checkpoint_modules == 'all':
ckpt = tf.train.Checkpoint(**model.checkpoint_items)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
elif self.task_config.init_checkpoint_modules == 'backbone':
ckpt = tf.train.Checkpoint(backbone=model.backbone)
status = ckpt.read(ckpt_dir_or_file)
status.expect_partial().assert_existing_objects_matched()
else:
raise ValueError(
"Only 'all' or 'backbone' can be used to initialize the model.")
logging.info('Finished loading pretrained checkpoint from %s',
ckpt_dir_or_file)
def _get_dataset_fn(self, params):
if params.file_type == 'tfrecord':
return tf.data.TFRecordDataset
else:
raise ValueError('Unknown input file type {!r}'.format(params.file_type))
def _get_decoder_fn(self, params):
if params.tfds_name:
decoder = video_input.VideoTfdsDecoder(
image_key=params.image_field_key, label_key=params.label_field_key)
else:
decoder = video_input.Decoder(
image_key=params.image_field_key, label_key=params.label_field_key)
if self.task_config.train_data.output_audio:
assert self.task_config.train_data.audio_feature, 'audio feature is empty'
decoder.add_feature(self.task_config.train_data.audio_feature,
tf.io.VarLenFeature(dtype=tf.float32))
return decoder.decode
def build_inputs(self,
params: exp_cfg.DataConfig,
input_context: Optional[tf.distribute.InputContext] = None):
"""Builds classification input."""
parser = video_input.Parser(
input_params=params,
image_key=params.image_field_key,
label_key=params.label_field_key)
postprocess_fn = video_input.PostBatchProcessor(params)
reader = input_reader_factory.input_reader_generator(
params,
dataset_fn=self._get_dataset_fn(params),
decoder_fn=self._get_decoder_fn(params),
parser_fn=parser.parse_fn(params.is_training),
postprocess_fn=postprocess_fn)
dataset = reader.read(input_context=input_context)
return dataset
def build_losses(self,
labels: Any,
model_outputs: Any,
aux_losses: Optional[Any] = None):
"""Sparse categorical cross entropy loss.
Args:
labels: labels.
model_outputs: Output logits of the classifier.
aux_losses: auxiliarly loss tensors, i.e. `losses` in keras.Model.
Returns:
The total loss tensor.
"""
all_losses = {}
losses_config = self.task_config.losses
total_loss = None
if self._is_multilabel():
entropy = -tf.reduce_mean(
tf.reduce_sum(model_outputs * tf.math.log(model_outputs + 1e-8), -1))
total_loss = tf.keras.losses.binary_crossentropy(
labels, model_outputs, from_logits=False)
all_losses.update({
'class_loss': total_loss,
'entropy': entropy,
})
else:
if losses_config.one_hot:
total_loss = tf.keras.losses.categorical_crossentropy(
labels,
model_outputs,
from_logits=False,
label_smoothing=losses_config.label_smoothing)
else:
total_loss = tf.keras.losses.sparse_categorical_crossentropy(
labels, model_outputs, from_logits=False)
total_loss = tf_utils.safe_mean(total_loss)
all_losses.update({
'class_loss': total_loss,
})
if aux_losses:
all_losses.update({
'reg_loss': aux_losses,
})
total_loss += tf.add_n(aux_losses)
all_losses[self.loss] = total_loss
return all_losses
def build_metrics(self, training: bool = True):
"""Gets streaming metrics for training/validation."""
if self.task_config.losses.one_hot:
metrics = [
tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(k=1, name='top_1_accuracy'),
tf.keras.metrics.TopKCategoricalAccuracy(k=5, name='top_5_accuracy')
]
if self._is_multilabel():
metrics.append(
tf.keras.metrics.AUC(
curve='ROC', multi_label=self._is_multilabel(), name='ROC-AUC'))
metrics.append(
tf.keras.metrics.RecallAtPrecision(
0.95, name='RecallAtPrecision95'))
metrics.append(
tf.keras.metrics.AUC(
curve='PR', multi_label=self._is_multilabel(), name='PR-AUC'))
if self.task_config.metrics.use_per_class_recall:
for i in range(self._get_num_classes()):
metrics.append(
tf.keras.metrics.Recall(class_id=i, name=f'recall-{i}'))
else:
metrics = [
tf.keras.metrics.SparseCategoricalAccuracy(name='accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=1, name='top_1_accuracy'),
tf.keras.metrics.SparseTopKCategoricalAccuracy(
k=5, name='top_5_accuracy')
]
return metrics
def process_metrics(self, metrics: List[Any], labels: Any,
model_outputs: Any):
"""Process and update metrics.
Called when using custom training loop API.
Args:
metrics: a nested structure of metrics objects. The return of function
self.build_metrics.
labels: a tensor or a nested structure of tensors.
model_outputs: a tensor or a nested structure of tensors. For example,
output of the keras model built by self.build_model.
"""
for metric in metrics:
metric.update_state(labels, model_outputs)
def train_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
optimizer: tf.keras.optimizers.Optimizer,
metrics: Optional[List[Any]] = None):
"""Does forward and backward.
Args:
inputs: a dictionary of input tensors.
model: the model, forward pass definition.
optimizer: the optimizer for this training step.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
num_replicas = tf.distribute.get_strategy().num_replicas_in_sync
with tf.GradientTape() as tape:
outputs = model(features, training=True)
# Casting output layer as float32 is necessary when mixed_precision is
# mixed_float16 or mixed_bfloat16 to ensure output is casted as float32.
outputs = tf.nest.map_structure(
lambda x: tf.cast(x, tf.float32), outputs)
# Computes per-replica loss.
if self._is_multilabel():
outputs = tf.math.sigmoid(outputs)
else:
outputs = tf.math.softmax(outputs)
all_losses = self.build_losses(
model_outputs=outputs, labels=labels, aux_losses=model.losses)
loss = all_losses[self.loss]
# Scales loss as the default gradients allreduce performs sum inside the
# optimizer.
scaled_loss = loss / num_replicas
# For mixed_precision policy, when LossScaleOptimizer is used, loss is
# scaled for numerical stability.
if isinstance(
optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
scaled_loss = optimizer.get_scaled_loss(scaled_loss)
tvars = model.trainable_variables
grads = tape.gradient(scaled_loss, tvars)
# Scales back gradient before apply_gradients when LossScaleOptimizer is
# used.
if isinstance(optimizer, tf.keras.mixed_precision.LossScaleOptimizer):
grads = optimizer.get_unscaled_gradients(grads)
optimizer.apply_gradients(list(zip(grads, tvars)))
logs = all_losses
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def validation_step(self,
inputs: Tuple[Any, Any],
model: tf.keras.Model,
metrics: Optional[List[Any]] = None):
"""Validatation step.
Args:
inputs: a dictionary of input tensors.
model: the keras.Model.
metrics: a nested structure of metrics objects.
Returns:
A dictionary of logs.
"""
features, labels = inputs
outputs = self.inference_step(features, model)
outputs = tf.nest.map_structure(lambda x: tf.cast(x, tf.float32), outputs)
logs = self.build_losses(model_outputs=outputs, labels=labels,
aux_losses=model.losses)
if metrics:
self.process_metrics(metrics, labels, outputs)
logs.update({m.name: m.result() for m in metrics})
elif model.compiled_metrics:
self.process_compiled_metrics(model.compiled_metrics, labels, outputs)
logs.update({m.name: m.result() for m in model.metrics})
return logs
def inference_step(self, features: tf.Tensor, model: tf.keras.Model):
"""Performs the forward step."""
outputs = model(features, training=False)
if self._is_multilabel():
outputs = tf.math.sigmoid(outputs)
else:
outputs = tf.math.softmax(outputs)
num_test_views = self._get_num_test_views()
if num_test_views > 1:
# Averaging output probabilities across multiples views.
outputs = tf.reshape(outputs, [-1, num_test_views, outputs.shape[-1]])
outputs = tf.reduce_mean(outputs, axis=1)
return outputs
| [
"tensorflow.keras.losses.categorical_crossentropy",
"tensorflow.keras.metrics.RecallAtPrecision",
"tensorflow.cast",
"tensorflow.keras.metrics.CategoricalAccuracy",
"tensorflow.add_n",
"tensorflow.keras.regularizers.l2",
"tensorflow.io.VarLenFeature",
"tensorflow.keras.layers.InputSpec",
"tensorflow.keras.metrics.SparseTopKCategoricalAccuracy",
"tensorflow.train.Checkpoint",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.math.sigmoid",
"tensorflow.keras.losses.binary_crossentropy",
"tensorflow.distribute.get_strategy",
"tensorflow.keras.metrics.Recall",
"tensorflow.GradientTape",
"tensorflow.io.gfile.isdir",
"tensorflow.keras.metrics.TopKCategoricalAccuracy",
"tensorflow.train.latest_checkpoint",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.math.log",
"tensorflow.math.softmax",
"tensorflow.keras.metrics.SparseCategoricalAccuracy"
] | official/vision/beta/tasks/video_classification.py | [(29, 'official.core.task_factory.register_task_cls', 'task_factory.register_task_cls', (['exp_cfg.VideoClassificationTask'], {}), False, 'from official.core import task_factory\n'), (59, 'tensorflow.keras.layers.InputSpec', 'tf.keras.layers.InputSpec', ([], {'shape': '([None] + common_input_shape)'}), True, 'import tensorflow as tf\n'), (60, 'absl.logging.info', 'logging.info', (['"""Build model input %r"""', 'common_input_shape'], {}), False, 'from absl import logging\n'), (83, 'tensorflow.io.gfile.isdir', 'tf.io.gfile.isdir', (['ckpt_dir_or_file'], {}), True, 'import tensorflow as tf\n'), (99, 'absl.logging.info', 'logging.info', (['"""Finished loading pretrained checkpoint from %s"""', 'ckpt_dir_or_file'], {}), False, 'from absl import logging\n'), (126, 'official.vision.beta.dataloaders.video_input.Parser', 'video_input.Parser', ([], {'input_params': 'params', 'image_key': 'params.image_field_key', 'label_key': 'params.label_field_key'}), False, 'from official.vision.beta.dataloaders import video_input\n'), (130, 'official.vision.beta.dataloaders.video_input.PostBatchProcessor', 'video_input.PostBatchProcessor', (['params'], {}), False, 'from official.vision.beta.dataloaders import video_input\n'), (66, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['(l2_weight_decay / 2.0)'], {}), True, 'import tensorflow as tf\n'), (84, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['ckpt_dir_or_file'], {}), True, 'import tensorflow as tf\n'), (88, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {}), True, 'import tensorflow as tf\n'), (110, 'official.vision.beta.dataloaders.video_input.VideoTfdsDecoder', 'video_input.VideoTfdsDecoder', ([], {'image_key': 'params.image_field_key', 'label_key': 'params.label_field_key'}), False, 'from official.vision.beta.dataloaders import video_input\n'), (113, 'official.vision.beta.dataloaders.video_input.Decoder', 'video_input.Decoder', ([], {'image_key': 'params.image_field_key', 'label_key': 'params.label_field_key'}), False, 'from official.vision.beta.dataloaders import video_input\n'), (163, 'tensorflow.keras.losses.binary_crossentropy', 'tf.keras.losses.binary_crossentropy', (['labels', 'model_outputs'], {'from_logits': '(False)'}), True, 'import tensorflow as tf\n'), (180, 'official.modeling.tf_utils.safe_mean', 'tf_utils.safe_mean', (['total_loss'], {}), False, 'from official.modeling import tf_utils\n'), (188, 'tensorflow.add_n', 'tf.add_n', (['aux_losses'], {}), True, 'import tensorflow as tf\n'), (259, 'tensorflow.distribute.get_strategy', 'tf.distribute.get_strategy', ([], {}), True, 'import tensorflow as tf\n'), (260, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (335, 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['outputs'], {}), True, 'import tensorflow as tf\n'), (337, 'tensorflow.math.softmax', 'tf.math.softmax', (['outputs'], {}), True, 'import tensorflow as tf\n'), (341, 'tensorflow.reshape', 'tf.reshape', (['outputs', '[-1, num_test_views, outputs.shape[-1]]'], {}), True, 'import tensorflow as tf\n'), (342, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['outputs'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (92, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'backbone': 'model.backbone'}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.io.VarLenFeature', 'tf.io.VarLenFeature', ([], {'dtype': 'tf.float32'}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.keras.losses.categorical_crossentropy', 'tf.keras.losses.categorical_crossentropy', (['labels', 'model_outputs'], {'from_logits': '(False)', 'label_smoothing': 'losses_config.label_smoothing'}), True, 'import tensorflow as tf\n'), (177, 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', (['labels', 'model_outputs'], {'from_logits': '(False)'}), True, 'import tensorflow as tf\n'), (197, 'tensorflow.keras.metrics.CategoricalAccuracy', 'tf.keras.metrics.CategoricalAccuracy', ([], {'name': '"""accuracy"""'}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.keras.metrics.TopKCategoricalAccuracy', 'tf.keras.metrics.TopKCategoricalAccuracy', ([], {'k': '(1)', 'name': '"""top_1_accuracy"""'}), True, 'import tensorflow as tf\n'), (199, 'tensorflow.keras.metrics.TopKCategoricalAccuracy', 'tf.keras.metrics.TopKCategoricalAccuracy', ([], {'k': '(5)', 'name': '"""top_5_accuracy"""'}), True, 'import tensorflow as tf\n'), (217, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""accuracy"""'}), True, 'import tensorflow as tf\n'), (218, 'tensorflow.keras.metrics.SparseTopKCategoricalAccuracy', 'tf.keras.metrics.SparseTopKCategoricalAccuracy', ([], {'k': '(1)', 'name': '"""top_1_accuracy"""'}), True, 'import tensorflow as tf\n'), (220, 'tensorflow.keras.metrics.SparseTopKCategoricalAccuracy', 'tf.keras.metrics.SparseTopKCategoricalAccuracy', ([], {'k': '(5)', 'name': '"""top_5_accuracy"""'}), True, 'import tensorflow as tf\n'), (269, 'tensorflow.math.sigmoid', 'tf.math.sigmoid', (['outputs'], {}), True, 'import tensorflow as tf\n'), (271, 'tensorflow.math.softmax', 'tf.math.softmax', (['outputs'], {}), True, 'import tensorflow as tf\n'), (319, 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (206, 'tensorflow.keras.metrics.RecallAtPrecision', 'tf.keras.metrics.RecallAtPrecision', (['(0.95)'], {'name': '"""RecallAtPrecision95"""'}), True, 'import tensorflow as tf\n'), (265, 'tensorflow.cast', 'tf.cast', (['x', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (162, 'tensorflow.math.log', 'tf.math.log', (['(model_outputs + 1e-08)'], {}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.keras.metrics.Recall', 'tf.keras.metrics.Recall', ([], {'class_id': 'i', 'name': 'f"""recall-{i}"""'}), True, 'import tensorflow as tf\n')] |
koreybea/tensorflow | e252fffb16f2706688604dc91c426bae367ae5e8 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Distribution tests for keras.layers.preprocessing.text_vectorization."""
import numpy as np
from tensorflow.python import keras
from tensorflow.python.data.ops import dataset_ops
from tensorflow.python.distribute import combinations as ds_combinations
from tensorflow.python.framework import config
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import test_combinations as combinations
from tensorflow.python.keras import keras_parameterized
from tensorflow.python.keras.distribute.strategy_combinations import all_strategies
from tensorflow.python.keras.layers.preprocessing import preprocessing_test_utils
from tensorflow.python.keras.layers.preprocessing import text_vectorization
from tensorflow.python.platform import test
@ds_combinations.generate(
combinations.combine(distribution=all_strategies, mode=["eager"]))
class TextVectorizationDistributionTest(
keras_parameterized.TestCase,
preprocessing_test_utils.PreprocessingLayerTest):
def test_distribution_strategy_output(self, distribution):
vocab_data = ["earth", "wind", "and", "fire"]
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.set_vocabulary(vocab_data)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
def test_distribution_strategy_output_with_adapt(self, distribution):
vocab_data = [[
"earth", "earth", "earth", "earth", "wind", "wind", "wind", "and",
"and", "fire"
]]
vocab_dataset = dataset_ops.Dataset.from_tensors(vocab_data)
input_array = np.array([["earth", "wind", "and", "fire"],
["fire", "and", "earth", "michigan"]])
input_dataset = dataset_ops.Dataset.from_tensor_slices(input_array).batch(
2, drop_remainder=True)
expected_output = [[2, 3, 4, 5], [5, 4, 2, 1]]
config.set_soft_device_placement(True)
with distribution.scope():
input_data = keras.Input(shape=(None,), dtype=dtypes.string)
layer = text_vectorization.TextVectorization(
max_tokens=None,
standardize=None,
split=None,
output_mode=text_vectorization.INT)
layer.adapt(vocab_dataset)
int_data = layer(input_data)
model = keras.Model(inputs=input_data, outputs=int_data)
output_dataset = model.predict(input_dataset)
self.assertAllEqual(expected_output, output_dataset)
if __name__ == "__main__":
test.main()
| [
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors",
"tensorflow.python.keras.Input",
"tensorflow.python.framework.config.set_soft_device_placement",
"tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices",
"tensorflow.python.keras.Model",
"tensorflow.python.keras.layers.preprocessing.text_vectorization.TextVectorization",
"tensorflow.python.platform.test.main",
"tensorflow.python.framework.test_combinations.combine",
"numpy.array"
] | tensorflow/python/keras/layers/preprocessing/text_vectorization_distribution_test.py | [(33, 'tensorflow.python.framework.test_combinations.combine', 'combinations.combine', ([], {'distribution': 'all_strategies', 'mode': "['eager']"}), True, 'from tensorflow.python.framework import test_combinations as combinations\n'), (92, 'tensorflow.python.platform.test.main', 'test.main', ([], {}), False, 'from tensorflow.python.platform import test\n'), (40, 'numpy.array', 'np.array', (["[['earth', 'wind', 'and', 'fire'], ['fire', 'and', 'earth', 'michigan']]"], {}), True, 'import numpy as np\n'), (47, 'tensorflow.python.framework.config.set_soft_device_placement', 'config.set_soft_device_placement', (['(True)'], {}), False, 'from tensorflow.python.framework import config\n'), (68, 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensors', 'dataset_ops.Dataset.from_tensors', (['vocab_data'], {}), False, 'from tensorflow.python.data.ops import dataset_ops\n'), (69, 'numpy.array', 'np.array', (["[['earth', 'wind', 'and', 'fire'], ['fire', 'and', 'earth', 'michigan']]"], {}), True, 'import numpy as np\n'), (76, 'tensorflow.python.framework.config.set_soft_device_placement', 'config.set_soft_device_placement', (['(True)'], {}), False, 'from tensorflow.python.framework import config\n'), (50, 'tensorflow.python.keras.Input', 'keras.Input', ([], {'shape': '(None,)', 'dtype': 'dtypes.string'}), False, 'from tensorflow.python import keras\n'), (51, 'tensorflow.python.keras.layers.preprocessing.text_vectorization.TextVectorization', 'text_vectorization.TextVectorization', ([], {'max_tokens': 'None', 'standardize': 'None', 'split': 'None', 'output_mode': 'text_vectorization.INT'}), False, 'from tensorflow.python.keras.layers.preprocessing import text_vectorization\n'), (58, 'tensorflow.python.keras.Model', 'keras.Model', ([], {'inputs': 'input_data', 'outputs': 'int_data'}), False, 'from tensorflow.python import keras\n'), (79, 'tensorflow.python.keras.Input', 'keras.Input', ([], {'shape': '(None,)', 'dtype': 'dtypes.string'}), False, 'from tensorflow.python import keras\n'), (80, 'tensorflow.python.keras.layers.preprocessing.text_vectorization.TextVectorization', 'text_vectorization.TextVectorization', ([], {'max_tokens': 'None', 'standardize': 'None', 'split': 'None', 'output_mode': 'text_vectorization.INT'}), False, 'from tensorflow.python.keras.layers.preprocessing import text_vectorization\n'), (87, 'tensorflow.python.keras.Model', 'keras.Model', ([], {'inputs': 'input_data', 'outputs': 'int_data'}), False, 'from tensorflow.python import keras\n'), (42, 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (['input_array'], {}), False, 'from tensorflow.python.data.ops import dataset_ops\n'), (71, 'tensorflow.python.data.ops.dataset_ops.Dataset.from_tensor_slices', 'dataset_ops.Dataset.from_tensor_slices', (['input_array'], {}), False, 'from tensorflow.python.data.ops import dataset_ops\n')] |
qixuanf/uncertainty-baselines | e965d4e3129825f5710a26a8877d6d8703bbf023 | # coding=utf-8
# Copyright 2021 The Uncertainty Baselines Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Heteroscedastic ViT on JFT-300M."""
from functools import partial # pylint: disable=g-importing-member so standard
import multiprocessing
import numbers
import os
from absl import app
from absl import flags
from absl import logging
from clu import parameter_overview
from clu import periodic_actions
import flax
import flax.jax_utils as flax_utils
import jax
import jax.numpy as jnp
import ml_collections
import numpy as np
import robustness_metrics as rm
import tensorflow as tf
from tensorflow.io import gfile
import uncertainty_baselines as ub
import cifar10h_utils # local file import
# TODO(dusenberrymw): Open-source remaining imports.
fewshot = None
input_pipeline = None
resformer = None
u = None
pp_builder = None
xm = None
xm_api = None
ml_collections.config_flags.DEFINE_config_file(
'config', None, 'Training configuration.', lock_config=True)
flags.DEFINE_string('output_dir', default=None, help='Work unit directory.')
flags.DEFINE_integer(
'num_cores', default=None, help='Unused. How many devices being used.')
flags.DEFINE_boolean(
'use_gpu', default=None, help='Unused. Whether or not running on GPU.')
flags.DEFINE_string('tpu', None,
'Unused. Name of the TPU. Only used if use_gpu is False.')
flags.DEFINE_integer('seed', default=0, help='Random seed.')
FLAGS = flags.FLAGS
# Adds jax flags to the program.
jax.config.parse_flags_with_absl()
def main(argv):
del argv
config = FLAGS.config
output_dir = FLAGS.output_dir
if config.get('dataset_dir'):
logging.info('data_dir=%s', config.dataset_dir)
logging.info('Output dir: %s', output_dir)
save_checkpoint_path = None
if config.get('checkpoint_steps'):
gfile.makedirs(output_dir)
save_checkpoint_path = os.path.join(output_dir, 'checkpoint.npz')
# The pool is used to perform misc operations such as logging in async way.
pool = multiprocessing.pool.ThreadPool()
# This seed makes the Jax part of things (like model init) deterministic.
# However, full training still won't be deterministic, for example due to the
# tf.data pipeline not being deterministic even if we would set TF seed.
rng = jax.random.PRNGKey(config.get('seed', 0))
xm_xp = None
xm_wu = None
def write_note(note):
if jax.host_id() == 0:
logging.info('NOTE: %s', note)
write_note('Initializing...')
fillin = lambda *_: None
# Verify settings to make sure no checkpoints are accidentally missed.
if config.get('keep_checkpoint_steps'):
assert config.get('checkpoint_steps'), 'Specify `checkpoint_steps`.'
assert config.keep_checkpoint_steps % config.checkpoint_steps == 0, (
f'`keep_checkpoint_steps` ({config.checkpoint_steps}) should be'
f'divisible by `checkpoint_steps ({config.checkpoint_steps}).`')
batch_size = config.batch_size
batch_size_eval = config.get('batch_size_eval', batch_size)
if (batch_size % jax.device_count() != 0 or
batch_size_eval % jax.device_count() != 0):
raise ValueError(f'Batch sizes ({batch_size} and {batch_size_eval}) must '
f'be divisible by device number ({jax.device_count()})')
local_batch_size = batch_size // jax.host_count()
local_batch_size_eval = batch_size_eval // jax.host_count()
logging.info(
'Global batch size %d on %d hosts results in %d local batch size. '
'With %d devices per host (%d devices total), that\'s a %d per-device '
'batch size.',
batch_size, jax.host_count(), local_batch_size,
jax.local_device_count(), jax.device_count(),
local_batch_size // jax.local_device_count())
write_note('Initializing train dataset...')
train_ds = input_pipeline.get_data(
dataset=config.dataset,
split=config.train_split,
data_dir=fillin(config.get('dataset_dir')),
batch_size=local_batch_size,
preprocess_fn=pp_builder.get_preprocess_fn(config.pp_train),
shuffle_buffer_size=config.shuffle_buffer_size,
prefetch=config.get('prefetch_to_host', 2),
cache=False)
# Start prefetching already.
train_iter = u.start_input_pipeline(
train_ds, config.get('prefetch_to_device', 1), pad=local_batch_size)
# We always pad to local_batch_size_eval even when less would be enough in
# order to minimize memory fragmentation.
write_note('Initializing val dataset(s)...')
def _get_val_split(dataset, split, pp_eval, data_dir=None):
# We do ceil rounding such that we include the last incomplete batch.
nval_img = input_pipeline.get_num_examples(
dataset, split, data_dir=fillin(data_dir))
val_steps = int(np.ceil(nval_img / batch_size_eval))
logging.info('Running validation for %d steps for %s, %s', val_steps,
dataset, split)
val_it = input_pipeline.get_data(
dataset=dataset,
split=split,
data_dir=fillin(data_dir),
batch_size=local_batch_size_eval,
preprocess_fn=pp_builder.get_preprocess_fn(pp_eval),
cache=config.get('val_cache', 'batched'),
repeat_after_batching=True,
prefetch=0, # Save memory since we cache.
drop_remainder=False,
shuffle_files=False)
val_it = u.start_input_pipeline(
val_it, config.get('prefetch_to_device', 1), pad=local_batch_size_eval)
return (val_it, val_steps)
if isinstance(config.val_split, str):
val_ds = {'val': _get_val_split(config.dataset, config.val_split,
config.pp_eval, config.get('dataset_dir'))}
else:
val_ds = {t[0]: _get_val_split(*t[1:]) for t in config.val_split}
if config.get('eval_on_cifar_10h'):
val_steps = int(np.ceil(10000 / batch_size_eval))
cifar10h_dataset = cifar10h_utils.load_ds()
val_it = input_pipeline.make_pipeline(
data=cifar10h_dataset,
batch_size=local_batch_size_eval,
preprocess_fn=pp_builder.get_preprocess_fn(config.pp_eval_cifar_10h),
cache=config.get('val_cache', 'batched'),
repeats=None,
repeat_after_batching=True,
prefetch=0,
drop_remainder=False,
shuffle_buffer_size=None,
ignore_errors=False,
filter_fn=None)
val_it = u.start_input_pipeline(
val_it, config.get('prefetch_to_device', 1), pad=local_batch_size_eval)
val_ds['cifar_10h'] = (val_it, val_steps)
ood_ds = None
if config.get('ood_dataset'):
logging.info('loading OOD dataset = %s', config.get('ood_dataset'))
if isinstance(config.ood_split, str):
ood_ds = {
'ind':
_get_val_split(config.dataset, config.ood_split, config.pp_eval,
config.get('data_dir')),
'ood':
_get_val_split(config.ood_dataset, config.ood_split,
config.pp_eval, config.get('data_dir')),
}
else:
raise NotImplementedError(
'Only string type of val_split is supported! Got val_split=%s!' %
str(config.ood_split))
ntrain_img = input_pipeline.get_num_examples(
config.dataset, config.train_split,
data_dir=fillin(config.get('dataset_dir')))
steps_per_epoch = ntrain_img / batch_size
if config.get('num_epochs'):
total_steps = int(config.num_epochs * steps_per_epoch)
assert not config.get('total_steps'), 'Set either num_epochs or total_steps'
else:
total_steps = config.total_steps
logging.info(
'Running for %d steps, that means %f epochs and %f steps per epoch',
total_steps, total_steps * batch_size / ntrain_img, steps_per_epoch)
mw = u.BigVisionMetricWriter(xm_xp.id, xm_wu.id, steps_per_epoch)
write_note('Initializing model...')
logging.info('config.model = %s', config.get('model'))
model = ub.models.het_vision_transformer(
num_classes=config.num_classes, **config.get('model', {}))
# We want all parameters to be created in host RAM, not on any device, they'll
# be sent there later as needed, otherwise we already encountered two
# situations where we allocate them twice.
@partial(jax.jit, backend='cpu')
def init(rng):
image_size = tuple(train_ds.element_spec['image'].shape[1:])
dummy_input = jnp.zeros((local_batch_size,) + image_size, jnp.float32)
init_rngs = {'params': rng, 'diag_noise_samples': (rng + 1) * 7,
'standard_norm_noise_samples': (rng + 3) * 13}
params = flax.core.unfreeze(model.init(init_rngs, dummy_input,
train=False))['params']
# Set bias in the head to a low value, such that loss is small initially.
if 'head' in params:
params['head']['loc_layer']['bias'] = jnp.full_like(
params['head']['loc_layer']['bias'], config.get('init_head_bias', 0))
return params
rng, rng_init = jax.random.split(rng)
params_cpu = init(rng_init)
if jax.host_id() == 0:
num_params = sum(p.size for p in jax.tree_flatten(params_cpu)[0])
parameter_overview.log_parameter_overview(params_cpu)
mw.measure('num_params', num_params)
@partial(jax.pmap, axis_name='batch')
def evaluation_fn(params, images, labels, mask):
# Ignore the entries with all zero labels for evaluation.
mask *= labels.max(axis=1)
logits, _ = model.apply({'params': flax.core.freeze(params)},
images,
train=False,
rngs={
'dropout': rng,
'diag_noise_samples': (rng + 1) * 7,
'standard_norm_noise_samples': (rng + 3) * 13})
losses = getattr(u, config.get('loss', 'sigmoid_xent'))(
logits=logits, labels=labels, reduction=False)
loss = jax.lax.psum(losses * mask, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
top1_correct = jnp.take_along_axis(labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct * mask, axis_name='batch')
n = jax.lax.psum(mask, axis_name='batch')
metric_args = jax.lax.all_gather([logits, labels, mask], axis_name='batch')
return ncorrect, loss, n, metric_args
@partial(jax.pmap, axis_name='batch')
def cifar_10h_evaluation_fn(params, images, labels, mask):
logits, _ = model.apply({'params': flax.core.freeze(params)},
images,
train=False,
rngs={
'dropout': rng,
'diag_noise_samples': (rng + 1) * 7,
'standard_norm_noise_samples': (rng + 3) * 13})
losses = getattr(u, config.get('loss', 'softmax_xent'))(
logits=logits, labels=labels, reduction=False)
loss = jax.lax.psum(losses, axis_name='batch')
top1_idx = jnp.argmax(logits, axis=1)
# Extracts the label at the highest logit index for each image.
one_hot_labels = jnp.eye(10)[jnp.argmax(labels, axis=1)]
top1_correct = jnp.take_along_axis(
one_hot_labels, top1_idx[:, None], axis=1)[:, 0]
ncorrect = jax.lax.psum(top1_correct, axis_name='batch')
n = jax.lax.psum(one_hot_labels, axis_name='batch')
metric_args = jax.lax.all_gather([logits, labels, mask],
axis_name='batch')
return ncorrect, loss, n, metric_args
# Setup function for computing representation.
@partial(jax.pmap, axis_name='batch')
def representation_fn(params, images, labels, mask):
_, outputs = model.apply({'params': flax.core.freeze(params)},
images,
train=False,
rngs={
'dropout': rng,
'diag_noise_samples': (rng + 1) * 7,
'standard_norm_noise_samples': (rng + 3) * 13})
representation = outputs[config.fewshot.representation_layer]
representation = jax.lax.all_gather(representation, 'batch')
labels = jax.lax.all_gather(labels, 'batch')
mask = jax.lax.all_gather(mask, 'batch')
return representation, labels, mask
# Load the optimizer from flax.
opt_name = config.get('optim_name')
write_note(f'Initializing {opt_name} optimizer...')
opt_def = getattr(flax.optim, opt_name)(**config.get('optim', {}))
# We jit this, such that the arrays that are created are created on the same
# device as the input is, in this case the CPU. Else they'd be on device[0].
opt_cpu = jax.jit(opt_def.create)(params_cpu)
@partial(jax.pmap, axis_name='batch', donate_argnums=(0,))
def update_fn(opt, lr, images, labels, rng):
"""Update step."""
measurements = {}
if config.get('mixup') and config.mixup.p:
rng, (images, labels), _ = u.mixup(rng, images, labels, **config.mixup)
# Get device-specific loss rng.
rng, rng_model = jax.random.split(rng, 2)
rng_model_local = jax.random.fold_in(rng_model, jax.lax.axis_index('batch'))
def loss_fn(params, images, labels):
logits, _ = model.apply(
{'params': flax.core.freeze(params)}, images,
train=True, rngs={
'dropout': rng_model_local,
'diag_noise_samples': (rng_model_local + 1) * 7,
'standard_norm_noise_samples': (rng_model_local + 3) * 13})
return getattr(u, config.get('loss', 'sigmoid_xent'))(
logits=logits, labels=labels)
# Implementation considerations compared and summarized at
# https://docs.google.com/document/d/1g3kMEvqu1DOawaflKNyUsIoQ4yIVEoyE5ZlIPkIl4Lc/edit?hl=en#
l, g = u.accumulate_gradient(jax.value_and_grad(loss_fn), opt.target,
images, labels,
config.get('grad_accum_steps'))
l, g = jax.lax.pmean((l, g), axis_name='batch')
# Log the gradient norm only if we need to compute it anyways (clipping)
# or if we don't use grad_accum_steps, as they interact badly.
if config.get('grad_accum_steps', 1) == 1 or config.get('grad_clip_norm'):
grads, _ = jax.tree_flatten(g)
l2_g = jnp.sqrt(sum([jnp.vdot(p, p) for p in grads]))
measurements['l2_grads'] = l2_g
# Optionally resize the global gradient to a maximum norm. We found this
# useful in some cases across optimizers, hence it's in the main loop.
if config.get('grad_clip_norm'):
g_factor = jnp.minimum(1.0, config.grad_clip_norm / l2_g)
g = jax.tree_map(lambda p: g_factor * p, g)
opt = opt.apply_gradient(g, learning_rate=lr)
decay_rules = config.get('weight_decay', []) or []
if isinstance(decay_rules, numbers.Number):
decay_rules = [('.*kernel.*', decay_rules)]
sched_m = lr/config.lr.base if config.get('weight_decay_decouple') else lr
def decay_fn(v, wd):
return (1.0 - sched_m * wd) * v
opt = opt.replace(target=u.tree_map_with_regex(
decay_fn, opt.target, decay_rules, name='weight decay'))
params, _ = jax.tree_flatten(opt.target)
measurements['l2_params'] = jnp.sqrt(sum([jnp.vdot(p, p) for p in params]))
return opt, l, rng, measurements
# Other things besides optimizer state to be stored.
checkpoint_extra = dict(accum_train_time=0.0)
# Decide how to initialize training. The order is important.
# 1. Always resumes from the existing checkpoint, e.g. resumes a finetune job.
# 2. Resume from a previous checkpoint, e.g. start a cooldown training job.
# 3. Initialize model from something, e,g, start a fine-tuning job.
# 4. Train from scratch.
resume_checkpoint_path = None
if save_checkpoint_path and gfile.exists(save_checkpoint_path):
resume_checkpoint_path = save_checkpoint_path
elif config.get('resume'):
resume_checkpoint_path = fillin(config.resume)
if resume_checkpoint_path:
write_note('Resume training from checkpoint...')
checkpoint = {'opt': opt_cpu, 'extra': checkpoint_extra}
_, checkpoint_tree = jax.tree_flatten(checkpoint)
loaded = u.load_checkpoint(checkpoint_tree, resume_checkpoint_path)
# bfloat16 type gets lost when data is saved to disk, so we recover it.
checkpoint = jax.tree_map(u.recover_dtype, loaded)
opt_cpu, checkpoint_extra = checkpoint['opt'], checkpoint['extra']
elif config.get('model_init'):
write_note(f'Initialize model from {config.model_init}...')
# TODO(dusenberrymw): Replace and test load function.
reinit_params = ['head/scale_layer_homoscedastic/kernel',
'head/scale_layer_homoscedastic/bias',
'head/scale_layer_heteroscedastic/kernel',
'head/scale_layer_heteroscedastic/bias',
'head/loc_layer/kernel', 'head/diag_layer/kernel',
'head/loc_layer/bias', 'head/diag_layer/bias']
for param in reinit_params:
if param in params_cpu:
del params_cpu[param]
loaded = resformer.load(params_cpu, config.model_init, config.get('model'),
reinit_params)
opt_cpu = opt_cpu.replace(target=loaded)
if jax.host_id() == 0:
logging.info('Restored parameter overview:')
parameter_overview.log_parameter_overview(loaded)
write_note('Kicking off misc stuff...')
first_step = int(opt_cpu.state.step) # Might be a DeviceArray type.
chrono = u.Chrono(first_step, total_steps, batch_size,
checkpoint_extra['accum_train_time'])
# Note: switch to ProfileAllHosts() if you need to profile all hosts.
# (Xprof data become much larger and take longer to load for analysis)
profiler = periodic_actions.Profile(
# Create profile after every restart to analyze pre-emption related
# problems and assure we get similar performance in every run.
logdir=output_dir, first_profile=first_step + 10)
# Prepare the learning-rate and pre-fetch it to device to avoid delays.
lr_fn = u.create_learning_rate_schedule(
batch_size, total_steps, steps_per_epoch, **config.get('lr', {}))
# TODO(dusenberrymw): According to flax docs, prefetching shouldn't be
# necessary for TPUs.
lr_iter = u.prefetch_scalar(map(lr_fn, range(first_step, total_steps)),
config.get('prefetch_to_device', 1))
write_note(f'Replicating...\n{chrono.note}')
opt_repl = flax_utils.replicate(opt_cpu)
write_note(f'Initializing few-shotters...\n{chrono.note}')
if 'fewshot' in config:
fewshotter = fewshot.FewShotEvaluator(
representation_fn, config.fewshot,
config.fewshot.get('batch_size') or batch_size_eval)
rng, rng_loop = jax.random.split(rng, 2)
rngs_loop = flax_utils.replicate(rng_loop)
checkpoint_writer = None
# Note: we return the train loss, val loss, and fewshot best l2s for use in
# reproducibility unit tests.
train_loss = -jnp.inf
val_loss = -jnp.inf
results = {'dummy': {(0, 1): -jnp.inf}}
write_note(f'First step compilations...\n{chrono.note}')
# Using a python integer for step here, because opt.state.step is allocated
# on TPU during replication.
for step, train_batch, lr_repl in zip(
range(first_step + 1, total_steps + 1), train_iter, lr_iter):
mw.step_start(step)
with jax.profiler.TraceContext('train_step', step_num=step, _r=1):
opt_repl, loss_value, rngs_loop, extra_measurements = update_fn(
opt_repl,
lr_repl,
train_batch['image'],
train_batch['labels'],
rng=rngs_loop)
if jax.host_id() == 0:
profiler(step)
# Checkpoint saving
if u.itstime(step, config.get('checkpoint_steps'), total_steps, host=0):
write_note('Checkpointing...')
chrono.pause()
u.checkpointing_timeout(checkpoint_writer,
config.get('checkpoint_timeout', 1))
checkpoint_extra['accum_train_time'] = chrono.accum_train_time
# We need to transfer the weights over now or else we risk keeping them
# alive while they'll be updated in a future step, creating hard to debug
# memory errors (see b/160593526). Also, takes device 0's params only.
opt_cpu = jax.tree_map(lambda x: np.array(x[0]), opt_repl)
# Check whether we want to keep a copy of the current checkpoint.
copy_step = None
if u.itstime(step, config.get('keep_checkpoint_steps'), total_steps):
write_note('Keeping a checkpoint copy...')
copy_step = step
# Checkpoint should be a nested dictionary or FLAX datataclasses from
# `flax.struct`. Both can be present in a checkpoint.
checkpoint = {'opt': opt_cpu, 'extra': checkpoint_extra}
checkpoint_writer = pool.apply_async(
u.save_checkpoint, (checkpoint, save_checkpoint_path, copy_step))
chrono.resume()
# Report training progress
if u.itstime(step, config.log_training_steps, total_steps, host=0):
write_note('Reporting training progress...')
train_loss = loss_value[0] # Keep to return for reproducibility tests.
mw.measure('learning_rate', lr_repl[0])
mw.measure('training_loss', loss_value[0])
for name, value in extra_measurements.items():
mw.measure(name, value[0])
chrono.tick(step, mw.measure, write_note)
# Report validation performance
if u.itstime(step, config.log_eval_steps, total_steps):
write_note('Evaluating on the validation set...')
chrono.pause()
for val_name, (val_iter, val_steps) in val_ds.items():
ncorrect, loss, nseen = 0, 0, 0
ece_num_bins = config.get('ece_num_bins', 15)
ece = rm.metrics.ExpectedCalibrationError(num_bins=ece_num_bins)
label_diversity = tf.keras.metrics.Mean()
sample_diversity = tf.keras.metrics.Mean()
ged = tf.keras.metrics.Mean()
for _, batch in zip(range(val_steps), val_iter):
if val_name == 'cifar_10h':
batch_ncorrect, batch_losses, batch_n, batch_metric_args = (
cifar_10h_evaluation_fn(opt_repl.target, batch['image'],
batch['labels'], batch['mask']))
else:
batch_ncorrect, batch_losses, batch_n, batch_metric_args = (
evaluation_fn(opt_repl.target, batch['image'],
batch['labels'], batch['mask']))
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical as they got psum'd.
# So let's just take the first one to the host as numpy.
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
# Here we parse batch_metric_args to compute complicated metrics
# such as ECE.
logits, labels, masks = batch_metric_args
masks = np.array(masks[0], dtype=np.bool)
# From one-hot to integer labels, as required by ECE.
int_labels = np.argmax(np.array(labels[0]), axis=-1)
logits = np.array(logits[0])
probs = jax.nn.softmax(logits)
for p, l, m, label in zip(probs, int_labels, masks, labels[0]):
ece.add_batch(p[m, :], label=l[m])
if val_name == 'cifar_10h':
batch_label_diversity, batch_sample_diversity, batch_ged = cifar10h_utils.generalized_energy_distance(
label[m], p[m, :], 10)
label_diversity.update_state(batch_label_diversity)
sample_diversity.update_state(batch_sample_diversity)
ged.update_state(batch_ged)
val_loss = loss / nseen # Keep to return for reproducibility tests.
mw.measure(f'{val_name}_prec@1', ncorrect / nseen)
mw.measure(f'{val_name}_loss', val_loss)
mw.measure(f'{val_name}_ece', float(ece.result()['ece']))
if val_name == 'cifar_10h':
mw.measure(
f'{val_name}_label_diversity', float(label_diversity.result()))
mw.measure(
f'{val_name}_sample_diversity', float(sample_diversity.result()))
mw.measure(f'{val_name}_ged', float(ged.result()))
# OOD eval
if ood_ds:
ood_metrics = {
'auroc':
tf.keras.metrics.AUC(
curve='ROC', summation_method='interpolation'),
'auprc':
tf.keras.metrics.AUC(
curve='PR', summation_method='interpolation')
}
for metric in ood_metrics.values():
metric.reset_states()
for val_name, (val_iter, val_steps) in ood_ds.items():
for _, batch in zip(range(val_steps), val_iter):
batch_ncorrect, batch_losses, batch_n, batch_metric_args = evaluation_fn(
opt_repl.target, batch['image'], batch['labels'], batch['mask'])
# All results are a replicated array shaped as follows:
# (local_devices, per_device_batch_size, elem_shape...)
# with each local device's entry being identical as they got psum'd.
# So let's just take the first one to the host as numpy.
ncorrect += np.sum(np.array(batch_ncorrect[0]))
loss += np.sum(np.array(batch_losses[0]))
nseen += np.sum(np.array(batch_n[0]))
# Here we parse batch_metric_args to compute
# complicated metrics such as ECE and OOD AUROC
logits, _, masks = batch_metric_args
probs = jax.nn.softmax(logits[0], axis=-1)
probs = probs[jnp.array(masks[0], dtype=bool)]
confs = jnp.max(probs, axis=-1)
ood_labels = np.ones_like(
confs) if val_name == 'ind' else np.zeros_like(confs)
for metric in ood_metrics.values():
metric.update_state(ood_labels, confs)
if val_name == 'ind':
val_loss = loss / nseen # Keep to return for reproducibility tests.
mw.measure(f'{val_name}_prec@1', ncorrect / nseen)
mw.measure(f'{val_name}_loss', val_loss)
for name, value in ood_metrics.items():
mw.measure(f'ood_{name}', value.result())
chrono.resume()
if 'fewshot' in config:
# Compute few-shot on-the-fly evaluation.
if u.itstime(step, config.fewshot.log_steps, total_steps):
chrono.pause()
write_note(f'Few-shot evaluation...\n{chrono.note}')
# Keep `results` to return for reproducibility tests.
results, best_l2 = fewshotter.run_all(opt_repl.target,
config.fewshot.datasets)
fewshotter.walk_results(mw.measure, results, best_l2)
chrono.resume()
mw.step_end()
write_note(f'Done!\n{chrono.note}')
pool.close()
pool.join()
mw.close()
# Return final training loss, validation loss, and fewshot results for
# reproducibility test cases.
return train_loss, val_loss, results
if __name__ == '__main__':
# TODO(dusenberrymw): Refactor `main` such that there is a `train_eval`
# function that returns values for tests and does not directly access flags,
# and then have `main` return None.
def _main(argv):
main(argv)
app.run(_main) # Ignore the returned values from `main`.
| [
"numpy.ones_like",
"tensorflow.io.gfile.exists",
"tensorflow.keras.metrics.AUC",
"tensorflow.io.gfile.makedirs",
"numpy.ceil",
"numpy.zeros_like",
"numpy.array",
"tensorflow.keras.metrics.Mean"
] | baselines/jft/heteroscedastic.py | [(51, 'ml_collections.config_flags.DEFINE_config_file', 'ml_collections.config_flags.DEFINE_config_file', (['"""config"""', 'None', '"""Training configuration."""'], {'lock_config': '(True)'}), False, 'import ml_collections\n'), (54, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""output_dir"""'], {'default': 'None', 'help': '"""Work unit directory."""'}), False, 'from absl import flags\n'), (55, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""num_cores"""'], {'default': 'None', 'help': '"""Unused. How many devices being used."""'}), False, 'from absl import flags\n'), (57, 'absl.flags.DEFINE_boolean', 'flags.DEFINE_boolean', (['"""use_gpu"""'], {'default': 'None', 'help': '"""Unused. Whether or not running on GPU."""'}), False, 'from absl import flags\n'), (59, 'absl.flags.DEFINE_string', 'flags.DEFINE_string', (['"""tpu"""', 'None', '"""Unused. Name of the TPU. Only used if use_gpu is False."""'], {}), False, 'from absl import flags\n'), (61, 'absl.flags.DEFINE_integer', 'flags.DEFINE_integer', (['"""seed"""'], {'default': '(0)', 'help': '"""Random seed."""'}), False, 'from absl import flags\n'), (66, 'jax.config.parse_flags_with_absl', 'jax.config.parse_flags_with_absl', ([], {}), False, 'import jax\n'), (77, 'absl.logging.info', 'logging.info', (['"""Output dir: %s"""', 'output_dir'], {}), False, 'from absl import logging\n'), (85, 'multiprocessing.pool.ThreadPool', 'multiprocessing.pool.ThreadPool', ([], {}), False, 'import multiprocessing\n'), (222, 'absl.logging.info', 'logging.info', (['"""Running for %d steps, that means %f epochs and %f steps per epoch"""', 'total_steps', '(total_steps * batch_size / ntrain_img)', 'steps_per_epoch'], {}), False, 'from absl import logging\n'), (235, 'functools.partial', 'partial', (['jax.jit'], {'backend': '"""cpu"""'}), False, 'from functools import partial\n'), (253, 'jax.random.split', 'jax.random.split', (['rng'], {}), False, 'import jax\n'), (261, 'functools.partial', 'partial', (['jax.pmap'], {'axis_name': '"""batch"""'}), False, 'from functools import partial\n'), (287, 'functools.partial', 'partial', (['jax.pmap'], {'axis_name': '"""batch"""'}), False, 'from functools import partial\n'), (315, 'functools.partial', 'partial', (['jax.pmap'], {'axis_name': '"""batch"""'}), False, 'from functools import partial\n'), (339, 'functools.partial', 'partial', (['jax.pmap'], {'axis_name': '"""batch"""', 'donate_argnums': '(0,)'}), False, 'from functools import partial\n'), (444, 'clu.periodic_actions.Profile', 'periodic_actions.Profile', ([], {'logdir': 'output_dir', 'first_profile': '(first_step + 10)'}), False, 'from clu import periodic_actions\n'), (458, 'flax.jax_utils.replicate', 'flax_utils.replicate', (['opt_cpu'], {}), True, 'import flax.jax_utils as flax_utils\n'), (466, 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), False, 'import jax\n'), (467, 'flax.jax_utils.replicate', 'flax_utils.replicate', (['rng_loop'], {}), True, 'import flax.jax_utils as flax_utils\n'), (658, 'absl.app.run', 'app.run', (['_main'], {}), False, 'from absl import app\n'), (76, 'absl.logging.info', 'logging.info', (['"""data_dir=%s"""', 'config.dataset_dir'], {}), False, 'from absl import logging\n'), (81, 'tensorflow.io.gfile.makedirs', 'gfile.makedirs', (['output_dir'], {}), False, 'from tensorflow.io import gfile\n'), (82, 'os.path.join', 'os.path.join', (['output_dir', '"""checkpoint.npz"""'], {}), False, 'import os\n'), (114, 'jax.host_count', 'jax.host_count', ([], {}), False, 'import jax\n'), (115, 'jax.host_count', 'jax.host_count', ([], {}), False, 'import jax\n'), (120, 'jax.host_count', 'jax.host_count', ([], {}), False, 'import jax\n'), (121, 'jax.local_device_count', 'jax.local_device_count', ([], {}), False, 'import jax\n'), (121, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (147, 'absl.logging.info', 'logging.info', (['"""Running validation for %d steps for %s, %s"""', 'val_steps', 'dataset', 'split'], {}), False, 'from absl import logging\n'), (175, 'cifar10h_utils.load_ds', 'cifar10h_utils.load_ds', ([], {}), False, 'import cifar10h_utils\n'), (238, 'jax.numpy.zeros', 'jnp.zeros', (['((local_batch_size,) + image_size)', 'jnp.float32'], {}), True, 'import jax.numpy as jnp\n'), (256, 'jax.host_id', 'jax.host_id', ([], {}), False, 'import jax\n'), (258, 'clu.parameter_overview.log_parameter_overview', 'parameter_overview.log_parameter_overview', (['params_cpu'], {}), False, 'from clu import parameter_overview\n'), (275, 'jax.lax.psum', 'jax.lax.psum', (['(losses * mask)'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (277, 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (280, 'jax.lax.psum', 'jax.lax.psum', (['(top1_correct * mask)'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (281, 'jax.lax.psum', 'jax.lax.psum', (['mask'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (283, 'jax.lax.all_gather', 'jax.lax.all_gather', (['[logits, labels, mask]'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (299, 'jax.lax.psum', 'jax.lax.psum', (['losses'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (301, 'jax.numpy.argmax', 'jnp.argmax', (['logits'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (307, 'jax.lax.psum', 'jax.lax.psum', (['top1_correct'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (308, 'jax.lax.psum', 'jax.lax.psum', (['one_hot_labels'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (310, 'jax.lax.all_gather', 'jax.lax.all_gather', (['[logits, labels, mask]'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (325, 'jax.lax.all_gather', 'jax.lax.all_gather', (['representation', '"""batch"""'], {}), False, 'import jax\n'), (326, 'jax.lax.all_gather', 'jax.lax.all_gather', (['labels', '"""batch"""'], {}), False, 'import jax\n'), (327, 'jax.lax.all_gather', 'jax.lax.all_gather', (['mask', '"""batch"""'], {}), False, 'import jax\n'), (337, 'jax.jit', 'jax.jit', (['opt_def.create'], {}), False, 'import jax\n'), (349, 'jax.random.split', 'jax.random.split', (['rng', '(2)'], {}), False, 'import jax\n'), (367, 'jax.lax.pmean', 'jax.lax.pmean', (['(l, g)'], {'axis_name': '"""batch"""'}), False, 'import jax\n'), (392, 'jax.tree_flatten', 'jax.tree_flatten', (['opt.target'], {}), False, 'import jax\n'), (406, 'tensorflow.io.gfile.exists', 'gfile.exists', (['save_checkpoint_path'], {}), False, 'from tensorflow.io import gfile\n'), (413, 'jax.tree_flatten', 'jax.tree_flatten', (['checkpoint'], {}), False, 'import jax\n'), (416, 'jax.tree_map', 'jax.tree_map', (['u.recover_dtype', 'loaded'], {}), False, 'import jax\n'), (95, 'jax.host_id', 'jax.host_id', ([], {}), False, 'import jax\n'), (96, 'absl.logging.info', 'logging.info', (['"""NOTE: %s"""', 'note'], {}), False, 'from absl import logging\n'), (122, 'jax.local_device_count', 'jax.local_device_count', ([], {}), False, 'import jax\n'), (146, 'numpy.ceil', 'np.ceil', (['(nval_img / batch_size_eval)'], {}), True, 'import numpy as np\n'), (173, 'numpy.ceil', 'np.ceil', (['(10000 / batch_size_eval)'], {}), True, 'import numpy as np\n'), (279, 'jax.numpy.take_along_axis', 'jnp.take_along_axis', (['labels', 'top1_idx[:, (None)]'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (303, 'jax.numpy.eye', 'jnp.eye', (['(10)'], {}), True, 'import jax.numpy as jnp\n'), (305, 'jax.numpy.take_along_axis', 'jnp.take_along_axis', (['one_hot_labels', 'top1_idx[:, (None)]'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (350, 'jax.lax.axis_index', 'jax.lax.axis_index', (['"""batch"""'], {}), False, 'import jax\n'), (364, 'jax.value_and_grad', 'jax.value_and_grad', (['loss_fn'], {}), False, 'import jax\n'), (372, 'jax.tree_flatten', 'jax.tree_flatten', (['g'], {}), False, 'import jax\n'), (379, 'jax.numpy.minimum', 'jnp.minimum', (['(1.0)', '(config.grad_clip_norm / l2_g)'], {}), True, 'import jax.numpy as jnp\n'), (380, 'jax.tree_map', 'jax.tree_map', (['(lambda p: g_factor * p)', 'g'], {}), False, 'import jax\n'), (483, 'jax.profiler.TraceContext', 'jax.profiler.TraceContext', (['"""train_step"""'], {'step_num': 'step', '_r': '(1)'}), False, 'import jax\n'), (491, 'jax.host_id', 'jax.host_id', ([], {}), False, 'import jax\n'), (109, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (110, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (265, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (289, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (303, 'jax.numpy.argmax', 'jnp.argmax', (['labels'], {'axis': '(1)'}), True, 'import jax.numpy as jnp\n'), (317, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (434, 'jax.host_id', 'jax.host_id', ([], {}), False, 'import jax\n'), (435, 'absl.logging.info', 'logging.info', (['"""Restored parameter overview:"""'], {}), False, 'from absl import logging\n'), (436, 'clu.parameter_overview.log_parameter_overview', 'parameter_overview.log_parameter_overview', (['loaded'], {}), False, 'from clu import parameter_overview\n'), (536, 'robustness_metrics.metrics.ExpectedCalibrationError', 'rm.metrics.ExpectedCalibrationError', ([], {'num_bins': 'ece_num_bins'}), True, 'import robustness_metrics as rm\n'), (537, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (538, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (539, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {}), True, 'import tensorflow as tf\n'), (111, 'jax.device_count', 'jax.device_count', ([], {}), False, 'import jax\n'), (354, 'flax.core.freeze', 'flax.core.freeze', (['params'], {}), False, 'import flax\n'), (393, 'jax.numpy.vdot', 'jnp.vdot', (['p', 'p'], {}), True, 'import jax.numpy as jnp\n'), (504, 'numpy.array', 'np.array', (['x[0]'], {}), True, 'import numpy as np\n'), (560, 'numpy.array', 'np.array', (['masks[0]'], {'dtype': 'np.bool'}), True, 'import numpy as np\n'), (563, 'numpy.array', 'np.array', (['logits[0]'], {}), True, 'import numpy as np\n'), (564, 'jax.nn.softmax', 'jax.nn.softmax', (['logits'], {}), False, 'import jax\n'), (590, 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {'curve': '"""ROC"""', 'summation_method': '"""interpolation"""'}), True, 'import tensorflow as tf\n'), (593, 'tensorflow.keras.metrics.AUC', 'tf.keras.metrics.AUC', ([], {'curve': '"""PR"""', 'summation_method': '"""interpolation"""'}), True, 'import tensorflow as tf\n'), (257, 'jax.tree_flatten', 'jax.tree_flatten', (['params_cpu'], {}), False, 'import jax\n'), (373, 'jax.numpy.vdot', 'jnp.vdot', (['p', 'p'], {}), True, 'import jax.numpy as jnp\n'), (553, 'numpy.array', 'np.array', (['batch_ncorrect[0]'], {}), True, 'import numpy as np\n'), (554, 'numpy.array', 'np.array', (['batch_losses[0]'], {}), True, 'import numpy as np\n'), (555, 'numpy.array', 'np.array', (['batch_n[0]'], {}), True, 'import numpy as np\n'), (562, 'numpy.array', 'np.array', (['labels[0]'], {}), True, 'import numpy as np\n'), (613, 'jax.nn.softmax', 'jax.nn.softmax', (['logits[0]'], {'axis': '(-1)'}), False, 'import jax\n'), (615, 'jax.numpy.max', 'jnp.max', (['probs'], {'axis': '(-1)'}), True, 'import jax.numpy as jnp\n'), (569, 'cifar10h_utils.generalized_energy_distance', 'cifar10h_utils.generalized_energy_distance', (['label[m]', 'p[(m), :]', '(10)'], {}), False, 'import cifar10h_utils\n'), (606, 'numpy.array', 'np.array', (['batch_ncorrect[0]'], {}), True, 'import numpy as np\n'), (607, 'numpy.array', 'np.array', (['batch_losses[0]'], {}), True, 'import numpy as np\n'), (608, 'numpy.array', 'np.array', (['batch_n[0]'], {}), True, 'import numpy as np\n'), (616, 'numpy.ones_like', 'np.ones_like', (['confs'], {}), True, 'import numpy as np\n'), (617, 'numpy.zeros_like', 'np.zeros_like', (['confs'], {}), True, 'import numpy as np\n'), (614, 'jax.numpy.array', 'jnp.array', (['masks[0]'], {'dtype': 'bool'}), True, 'import jax.numpy as jnp\n')] |
hina-shah/US-famli | f927c89ec9cb51f9e511bbdfa2f59ce15e0e8730 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.keras import layers
import json
import os
import glob
import sys
class Attention(layers.Layer):
def __init__(self, k=25, mask=None, name='attention'):
super(Attention, self).__init__()
self.wa = layers.Dense(1, activation='relu', use_bias=False)
self.k = k
self.mask = mask
def call(self, x0):
a = self.wa(x0)
if self.mask:
a = tf.multiply(a, self.mask)
min_a = tf.reduce_min(tf.math.top_k(tf.reshape(a, [-1, tf.shape(a)[1]]), k=self.k, sorted=False, name=None)[0], axis=1, keepdims=True)
min_a = tf.reshape(min_a, [-1, 1, 1])
m = tf.greater_equal(a, min_a)
m = tf.cast(m, tf.float32)
a = tf.multiply(tf.exp(a), m) / tf.reduce_sum(tf.multiply(tf.exp(a), m), axis=1, keepdims=True)
return a
class BahdanauAttention(tf.keras.layers.Layer):
def __init__(self, units):
super(BahdanauAttention, self).__init__()
self.W1 = tf.keras.layers.Dense(units)
self.W2 = tf.keras.layers.Dense(units)
self.V = tf.keras.layers.Dense(1)
# self.k = k
def call(self, query, values):
# query hidden state shape == (batch_size, hidden size)
# query_with_time_axis shape == (batch_size, 1, hidden size)
# values shape == (batch_size, max_len, hidden size)
# we are doing this to broadcast addition along the time axis to calculate the score
query_with_time_axis = tf.expand_dims(query, 1)
# score shape == (batch_size, max_length, 1)
# we get 1 at the last axis because we are applying score to self.V
# the shape of the tensor before applying self.V is (batch_size, max_length, units)
score = self.V(tf.nn.tanh(
self.W1(query_with_time_axis) + self.W2(values)))
# min_score = tf.reduce_min(tf.math.top_k(tf.reshape(score, [-1, tf.shape(score)[1]]), k=self.k, sorted=False, name=None)[0], axis=1, keepdims=True)
# min_score = tf.reshape(min_score, [-1, 1, 1])
# score_mask = tf.greater_equal(score, min_score)
# score_mask = tf.cast(score_mask, tf.float32)
# attention_weights = tf.multiply(tf.exp(score), score_mask) / tf.reduce_sum(tf.multiply(tf.exp(score), score_mask), axis=1, keepdims=True)
# attention_weights shape == (batch_size, max_length, 1)
attention_weights = tf.nn.softmax(score, axis=1)
# context_vector shape after sum == (batch_size, hidden_size)
context_vector = attention_weights * values
context_vector = tf.reduce_sum(context_vector, axis=1)
return context_vector, attention_weights
class SigmoidCrossEntropy(tf.keras.losses.Loss):
def __init__(self, max_value, reduction=tf.keras.losses.Reduction.AUTO):
super(SigmoidCrossEntropy, self).__init__()
self.reduction = reduction
self.max_value = max_value
def call(self, y_true, y_pred, sample_weight=None):
y_true = tf.math.divide(y_true, self.max_value)
loss = tf.nn.sigmoid_cross_entropy_with_logits(y_true, y_pred)
if sample_weight is not None:
loss = tf.multiply(loss, sample_weight)
if self.reduction == 'sum':
return tf.reduce_sum(loss)
return tf.reduce_mean(loss)
class NN(tf.keras.Model):
def __init__(self, tf_inputs, args):
super(NN, self).__init__()
learning_rate = args.learning_rate
decay_steps = args.decay_steps
decay_rate = args.decay_rate
staircase = args.staircase
drop_prob = args.drop_prob
data_description = tf_inputs.get_data_description()
self.num_channels = data_description[data_description["data_keys"][0]]["shape"][-1]
self.num_classes = 2
self.class_weights_index = -1
self.enumerate_index = 1
if "enumerate" in data_description:
self.enumerate_index = data_description["data_keys"].index(data_description["enumerate"])
if(data_description[data_description["data_keys"][self.enumerate_index]]["num_class"]):
self.num_classes = data_description[data_description["data_keys"][self.enumerate_index]]["num_class"]
print("Number of classes in data description", self.num_classes)
if "class_weights" in data_description["data_keys"]:
self.class_weights_index = data_description["data_keys"].index("class_weights")
print("Using weights index", self.class_weights_index)
self.drop_prob = drop_prob
self.gru_class = self.make_gru_network()
self.gru_class.summary()
self.max_value = 290.0
# self.loss = SigmoidCrossEntropy(self.max_value)
self.loss = tf.keras.losses.MeanSquaredError()
# self.loss = tf.keras.losses.Huber(delta=5.0, reduction=tf.keras.losses.Reduction.SUM)
# self.loss = tf.keras.losses.LogCosh()
self.metrics_train = tf.keras.metrics.MeanAbsoluteError()
if decay_rate != 0.0:
lr = tf.keras.optimizers.schedules.ExponentialDecay(learning_rate, decay_steps, decay_rate, staircase)
else:
lr = learning_rate
self.optimizer = tf.keras.optimizers.Adam(lr)
self.validation_loss = tf.keras.losses.MeanSquaredError()
# self.validation_loss = tf.keras.losses.Huber(delta=5.0, reduction=tf.keras.losses.Reduction.SUM)
# self.validation_loss = tf.keras.losses.MeanSquaredError()
# self.validation_loss = SigmoidCrossEntropy(self.max_value)
self.validation_metric = tf.keras.metrics.MeanAbsoluteError()
self.global_validation_metric = float("inf")
self.global_validation_step = args.in_epoch
def make_gru_network(self):
x0 = tf.keras.Input(shape=[None, self.num_channels])
x = layers.Masking(mask_value=-1.0)(x0)
x = tf.keras.layers.GaussianNoise(0.1)(x)
x = layers.BatchNormalization()(x)
x_e, x_h_fwd, x_h_bwd = layers.Bidirectional(layers.GRU(units=512, activation='tanh', use_bias=False, kernel_initializer="glorot_normal", return_sequences=True, return_state=True), name="bi_gru")(x)
x_e = layers.Dropout(self.drop_prob)(x_e)
x_h_fwd = layers.Dropout(self.drop_prob)(x_h_fwd)
x_h_bwd = layers.Dropout(self.drop_prob)(x_h_bwd)
x_a_fwd, w_a_fwd = BahdanauAttention(1024)(x_h_fwd, x_e)
x_a_bwd, w_a_bwd = BahdanauAttention(1024)(x_h_bwd, x_e)
x = tf.concat([x_h_fwd, x_a_fwd, x_h_bwd, x_a_bwd], axis=-1)
x = layers.Dense(1, activation='sigmoid', use_bias=False, name='prediction')(x)
x = tf.math.add(tf.math.multiply(x, 90.0), 190.0)
return tf.keras.Model(inputs=x0, outputs=x)
@tf.function(experimental_relax_shapes=True)
def train_step(self, train_tuple):
images = train_tuple[0]
labels = train_tuple[self.enumerate_index]
sample_weight = None
if self.class_weights_index != -1:
sample_weight = train_tuple[self.class_weights_index]
with tf.GradientTape() as tape:
x_c = self.gru_class(images, training=True)
loss = self.loss(labels, x_c, sample_weight=sample_weight)
var_list = self.trainable_variables
gradients = tape.gradient(loss, var_list)
self.optimizer.apply_gradients(zip(gradients, var_list))
return loss, x_c
def valid_step(self, dataset_validation):
loss = 0
for valid_tuple in dataset_validation:
images = valid_tuple[0]
labels = valid_tuple[self.enumerate_index]
x_c = self.gru_class(images, training=False)
loss += self.validation_loss(labels, x_c)
self.validation_metric.update_state(labels, x_c)
metric = self.validation_metric.result()
tf.summary.scalar('validation_loss', loss, step=self.global_validation_step)
tf.summary.scalar('validation_acc', metric, step=self.global_validation_step)
self.global_validation_step += 1
print("val loss", loss.numpy(), "mae", metric.numpy())
improved = False
if loss < self.global_validation_metric:
self.global_validation_metric = loss
improved = True
return improved
def get_checkpoint_manager(self):
return tf.train.Checkpoint(
gru_class=self.gru_class,
optimizer=self.optimizer)
def summary(self, train_tuple, tr_step, step):
sample_weight = None
if self.class_weights_index != -1:
sample_weight = train_tuple[self.class_weights_index]
labels = tf.reshape(train_tuple[1], [-1])
loss = tr_step[0]
prediction = tf.reshape(tr_step[1], [-1])
self.metrics_train.update_state(labels, prediction, sample_weight=sample_weight)
metrics_result = self.metrics_train.result()
print("step", step, "loss", loss.numpy(), "mae", metrics_result.numpy())
print(labels.numpy())
print(prediction.numpy())
tf.summary.scalar('loss', loss, step=step)
tf.summary.scalar('loss', loss, step=step)
tf.summary.scalar('mae', metrics_result, step=step)
def save_model(self, save_model):
self.gru_class.summary()
self.gru_class.save(save_model) | [
"tensorflow.concat",
"tensorflow.reduce_sum",
"tensorflow.cast",
"tensorflow.nn.sigmoid_cross_entropy_with_logits",
"tensorflow.summary.scalar",
"tensorflow.keras.Input",
"tensorflow.keras.layers.GRU",
"tensorflow.keras.layers.GaussianNoise",
"tensorflow.math.divide",
"tensorflow.keras.layers.Masking",
"tensorflow.shape",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.train.Checkpoint",
"tensorflow.exp",
"tensorflow.keras.Model",
"tensorflow.math.multiply",
"tensorflow.function",
"tensorflow.GradientTape",
"tensorflow.nn.softmax",
"tensorflow.multiply",
"tensorflow.reduce_mean",
"tensorflow.reshape",
"tensorflow.keras.optimizers.schedules.ExponentialDecay",
"tensorflow.expand_dims",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.metrics.MeanAbsoluteError",
"tensorflow.keras.layers.Dropout",
"tensorflow.greater_equal"
] | src/py/dl/nn_v2/gru_ga_nn.py | [(174, 'tensorflow.function', 'tf.function', ([], {'experimental_relax_shapes': '(True)'}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""relu"""', 'use_bias': '(False)'}), False, 'from tensorflow.keras import layers\n'), (28, 'tensorflow.reshape', 'tf.reshape', (['min_a', '[-1, 1, 1]'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.greater_equal', 'tf.greater_equal', (['a', 'min_a'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.cast', 'tf.cast', (['m', 'tf.float32'], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units'], {}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['units'], {}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(1)'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.expand_dims', 'tf.expand_dims', (['query', '(1)'], {}), True, 'import tensorflow as tf\n'), (64, 'tensorflow.nn.softmax', 'tf.nn.softmax', (['score'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (68, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['context_vector'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.math.divide', 'tf.math.divide', (['y_true', 'self.max_value'], {}), True, 'import tensorflow as tf\n'), (81, 'tensorflow.nn.sigmoid_cross_entropy_with_logits', 'tf.nn.sigmoid_cross_entropy_with_logits', (['y_true', 'y_pred'], {}), True, 'import tensorflow as tf\n'), (87, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['loss'], {}), True, 'import tensorflow as tf\n'), (125, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (128, 'tensorflow.keras.metrics.MeanAbsoluteError', 'tf.keras.metrics.MeanAbsoluteError', ([], {}), True, 'import tensorflow as tf\n'), (135, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', (['lr'], {}), True, 'import tensorflow as tf\n'), (138, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {}), True, 'import tensorflow as tf\n'), (142, 'tensorflow.keras.metrics.MeanAbsoluteError', 'tf.keras.metrics.MeanAbsoluteError', ([], {}), True, 'import tensorflow as tf\n'), (150, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '[None, self.num_channels]'}), True, 'import tensorflow as tf\n'), (166, 'tensorflow.concat', 'tf.concat', (['[x_h_fwd, x_a_fwd, x_h_bwd, x_a_bwd]'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'x0', 'outputs': 'x'}), True, 'import tensorflow as tf\n'), (211, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation_loss"""', 'loss'], {'step': 'self.global_validation_step'}), True, 'import tensorflow as tf\n'), (212, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""validation_acc"""', 'metric'], {'step': 'self.global_validation_step'}), True, 'import tensorflow as tf\n'), (225, 'tensorflow.train.Checkpoint', 'tf.train.Checkpoint', ([], {'gru_class': 'self.gru_class', 'optimizer': 'self.optimizer'}), True, 'import tensorflow as tf\n'), (235, 'tensorflow.reshape', 'tf.reshape', (['train_tuple[1]', '[-1]'], {}), True, 'import tensorflow as tf\n'), (238, 'tensorflow.reshape', 'tf.reshape', (['tr_step[1]', '[-1]'], {}), True, 'import tensorflow as tf\n'), (247, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {'step': 'step'}), True, 'import tensorflow as tf\n'), (248, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""loss"""', 'loss'], {'step': 'step'}), True, 'import tensorflow as tf\n'), (249, 'tensorflow.summary.scalar', 'tf.summary.scalar', (['"""mae"""', 'metrics_result'], {'step': 'step'}), True, 'import tensorflow as tf\n'), (25, 'tensorflow.multiply', 'tf.multiply', (['a', 'self.mask'], {}), True, 'import tensorflow as tf\n'), (83, 'tensorflow.multiply', 'tf.multiply', (['loss', 'sample_weight'], {}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.reduce_sum', 'tf.reduce_sum', (['loss'], {}), True, 'import tensorflow as tf\n'), (131, 'tensorflow.keras.optimizers.schedules.ExponentialDecay', 'tf.keras.optimizers.schedules.ExponentialDecay', (['learning_rate', 'decay_steps', 'decay_rate', 'staircase'], {}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.keras.layers.Masking', 'layers.Masking', ([], {'mask_value': '(-1.0)'}), False, 'from tensorflow.keras import layers\n'), (154, 'tensorflow.keras.layers.GaussianNoise', 'tf.keras.layers.GaussianNoise', (['(0.1)'], {}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.keras.layers.BatchNormalization', 'layers.BatchNormalization', ([], {}), False, 'from tensorflow.keras import layers\n'), (159, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['self.drop_prob'], {}), False, 'from tensorflow.keras import layers\n'), (160, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['self.drop_prob'], {}), False, 'from tensorflow.keras import layers\n'), (161, 'tensorflow.keras.layers.Dropout', 'layers.Dropout', (['self.drop_prob'], {}), False, 'from tensorflow.keras import layers\n'), (168, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(1)'], {'activation': '"""sigmoid"""', 'use_bias': '(False)', 'name': '"""prediction"""'}), False, 'from tensorflow.keras import layers\n'), (169, 'tensorflow.math.multiply', 'tf.math.multiply', (['x', '(90.0)'], {}), True, 'import tensorflow as tf\n'), (184, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (31, 'tensorflow.exp', 'tf.exp', (['a'], {}), True, 'import tensorflow as tf\n'), (158, 'tensorflow.keras.layers.GRU', 'layers.GRU', ([], {'units': '(512)', 'activation': '"""tanh"""', 'use_bias': '(False)', 'kernel_initializer': '"""glorot_normal"""', 'return_sequences': '(True)', 'return_state': '(True)'}), False, 'from tensorflow.keras import layers\n'), (31, 'tensorflow.exp', 'tf.exp', (['a'], {}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.shape', 'tf.shape', (['a'], {}), True, 'import tensorflow as tf\n')] |
yasin-gh/Deep-Learning-for-Computer-Vision | d5b3e153369018029270a6a47349ee8ce7c7641e | import tensorflow as tf
input_height = 360
input_width = 480
kernel = 3
filter_size = 64
pad = 1
pool_size = 2
model = tf.keras.models.Sequential()
model.add(tf.keras.layers.Layer(input_shape=(3, input_height, input_width)))
# encoder
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(filter_size, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(128, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(256, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(pool_size, pool_size)))
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(512, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Activation('relu'))
# decoder
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(512, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.UpSampling2D(size=(pool_size, pool_size)))
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(256, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.UpSampling2D(size=(pool_size, pool_size)))
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(128, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.UpSampling2D(size=(pool_size, pool_size)))
model.add(tf.keras.layers.ZeroPadding2D(padding=(pad, pad)))
model.add(tf.keras.layers.Conv2D(filter_size, kernel, kernel, border_mode='valid'))
model.add(tf.keras.layers.BatchNormalization())
model.add(tf.keras.layers.Conv2D(nClasses, 1, 1, border_mode='valid', ))
model.outputHeight = model.output_shape[-2]
model.outputWidth = model.output_shape[-1]
model.add(tf.keras.layers.Reshape((nClasses, model.output_shape[-2] * model.output_shape[-1]),
input_shape=(nClasses, model.output_shape[-2], model.output_shape[-1])))
model.add(tf.keras.layers.Permute((2, 1)))
model.add(tf.keras.layers.Activation('softmax'))
model.compile(loss="categorical_crossentropy", optimizer=tf.keras.optimizers.Adam, metrics=['accuracy'])
| [
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.UpSampling2D",
"tensorflow.keras.layers.Layer",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Permute",
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.models.Sequential"
] | Chapter05/1_segnet.py | [(10, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (11, 'tensorflow.keras.layers.Layer', 'tf.keras.layers.Layer', ([], {'input_shape': '(3, input_height, input_width)'}), True, 'import tensorflow as tf\n'), (14, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (15, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filter_size', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (17, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow as tf\n'), (18, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(pool_size, pool_size)'}), True, 'import tensorflow as tf\n'), (20, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (21, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (22, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(pool_size, pool_size)'}), True, 'import tensorflow as tf\n'), (26, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (27, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (28, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow as tf\n'), (30, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(pool_size, pool_size)'}), True, 'import tensorflow as tf\n'), (32, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (33, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(512)', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""relu"""'], {}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(512)', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (40, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(pool_size, pool_size)'}), True, 'import tensorflow as tf\n'), (43, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (44, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(256)', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (45, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (47, 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(pool_size, pool_size)'}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(128)', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (50, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.layers.UpSampling2D', 'tf.keras.layers.UpSampling2D', ([], {'size': '(pool_size, pool_size)'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.layers.ZeroPadding2D', 'tf.keras.layers.ZeroPadding2D', ([], {'padding': '(pad, pad)'}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['filter_size', 'kernel', 'kernel'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (55, 'tensorflow.keras.layers.BatchNormalization', 'tf.keras.layers.BatchNormalization', ([], {}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['nClasses', '(1)', '(1)'], {'border_mode': '"""valid"""'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.keras.layers.Reshape', 'tf.keras.layers.Reshape', (['(nClasses, model.output_shape[-2] * model.output_shape[-1])'], {'input_shape': '(nClasses, model.output_shape[-2], model.output_shape[-1])'}), True, 'import tensorflow as tf\n'), (65, 'tensorflow.keras.layers.Permute', 'tf.keras.layers.Permute', (['(2, 1)'], {}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['"""softmax"""'], {}), True, 'import tensorflow as tf\n')] |
zhangAlwin/tf_keras_models | 054c9e596325bcafd107c3f51abf018daab98a14 | # -*- coding: utf-8 -*-
'''
@CreateTime : 2021/12/07 12:43:25
@Author : Alwin Zhang
@Mail : [email protected]
'''
import tensorflow as tf
from tensorflow.keras.layers import Embedding, Conv1D, GlobalAveragePooling1D, Dense, Concatenate, GlobalMaxPooling1D
from tensorflow.keras import Model
class TextCNN(Model):
def __init__(self, max_len, max_features, embedding_dims, class_num, kernel_sizes=[1, 2, 3], kernel_regularizer=None, last_activation='softmax'):
"""
:param max_len:文本最大长度
:param max_features: 词典大小
:param embedding_dims: embedding维度大小
:param kernel_sizes: 滑动卷积窗口大小的list, eg: [1,2,3]
:param kernel_regularizer: eg: tf.keras.regularizers.l2(0.001)
:param class_num:
:param last_activation:
"""
super(TextCNN, self).__init__()
self.max_len = max_len
self.kernel_sizes = kernel_sizes
self.class_num = class_num
self.embedding = Embedding(
input_dim=max_features, output_dim=embedding_dims, input_length=max_len)
self.conv1s = []
self.avgpools = []
for kernel_size in kernel_sizes:
self.conv1s.append(Conv1D(filters=128, kernel_size=kernel_size,
activation='relu', kernel_regularizer=kernel_regularizer))
self.avgpools.append(GlobalMaxPooling1D())
self.classifier = Dense(class_num, activation=last_activation)
def call(self, inputs, training=None, mask=None):
assert len(inputs.get_shape()) == 2
assert inputs.get_shape()[1] == self.max_len
emb = self.embedding(inputs)
conv1s = []
for i in range(len(self.kernel_sizes)):
c = self.conv1s[i](emb)
c = self.avgpools[i](c)
conv1s.append(c)
x = Concatenate()(conv1s) # batch_size,len(self.kernel_sizes) * filters
output = self.classifier(x)
return output
def build_graph(self, input_shape):
input_shape_nobatch = input_shape[1:]
self.build(input_shape)
inputs = tf.keras.Input(shape=input_shape_nobatch)
_ = self.call(inputs) | [
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.Input",
"tensorflow.keras.layers.Embedding",
"tensorflow.keras.layers.GlobalMaxPooling1D",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv1D"
] | textcnn/model.py | [(28, 'tensorflow.keras.layers.Embedding', 'Embedding', ([], {'input_dim': 'max_features', 'output_dim': 'embedding_dims', 'input_length': 'max_len'}), False, 'from tensorflow.keras.layers import Embedding, Conv1D, GlobalAveragePooling1D, Dense, Concatenate, GlobalMaxPooling1D\n'), (36, 'tensorflow.keras.layers.Dense', 'Dense', (['class_num'], {'activation': 'last_activation'}), False, 'from tensorflow.keras.layers import Embedding, Conv1D, GlobalAveragePooling1D, Dense, Concatenate, GlobalMaxPooling1D\n'), (55, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': 'input_shape_nobatch'}), True, 'import tensorflow as tf\n'), (48, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Embedding, Conv1D, GlobalAveragePooling1D, Dense, Concatenate, GlobalMaxPooling1D\n'), (33, 'tensorflow.keras.layers.Conv1D', 'Conv1D', ([], {'filters': '(128)', 'kernel_size': 'kernel_size', 'activation': '"""relu"""', 'kernel_regularizer': 'kernel_regularizer'}), False, 'from tensorflow.keras.layers import Embedding, Conv1D, GlobalAveragePooling1D, Dense, Concatenate, GlobalMaxPooling1D\n'), (35, 'tensorflow.keras.layers.GlobalMaxPooling1D', 'GlobalMaxPooling1D', ([], {}), False, 'from tensorflow.keras.layers import Embedding, Conv1D, GlobalAveragePooling1D, Dense, Concatenate, GlobalMaxPooling1D\n')] |
ak110/pytoolk | 8eef7e0add7bbc0ced1f1f1d82ed245388cc6684 | """EfficientNet。
参考: 本来の入力サイズ
- B0: 224
- B1: 240
- B2: 260
- B3: 300
- B4: 380
- B5: 456
- B6: 528
- B7: 600
"""
import tensorflow as tf
def create_b0(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB0(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def create_b1(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB1(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def create_b2(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB2(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def create_b3(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB3(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def create_b4(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB4(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def create_b5(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB5(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def create_b6(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB6(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def create_b7(
include_top=False, input_shape=None, input_tensor=None, weights="noisy-student"
):
"""ネットワークの作成。"""
import efficientnet.tfkeras as efn
return efn.EfficientNetB7(
include_top=include_top,
input_shape=input_shape,
input_tensor=input_tensor,
weights=weights,
)
def preprocess_input(x):
"""前処理。"""
return tf.keras.applications.imagenet_utils.preprocess_input(x, mode="torch")
def get_1_over_2(model):
"""入力から縦横1/2のところのテンソルを返す。"""
return model.get_layer("block2a_expand_conv").input
def get_1_over_4(model):
"""入力から縦横1/4のところのテンソルを返す。"""
return model.get_layer("block3a_expand_conv").input
def get_1_over_8(model):
"""入力から縦横1/8のところのテンソルを返す。"""
return model.get_layer("block4a_expand_conv").input
def get_1_over_16(model):
"""入力から縦横1/16のところのテンソルを返す。"""
return model.get_layer("block5a_expand_conv").input
def get_1_over_32(model):
"""入力から縦横1/32のところのテンソルを返す。"""
return model.get_layer("top_activation").output
| [
"tensorflow.keras.applications.imagenet_utils.preprocess_input"
] | pytoolkit/applications/efficientnet.py | [(26, 'efficientnet.tfkeras.EfficientNetB0', 'efn.EfficientNetB0', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (40, 'efficientnet.tfkeras.EfficientNetB1', 'efn.EfficientNetB1', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (54, 'efficientnet.tfkeras.EfficientNetB2', 'efn.EfficientNetB2', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (68, 'efficientnet.tfkeras.EfficientNetB3', 'efn.EfficientNetB3', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (82, 'efficientnet.tfkeras.EfficientNetB4', 'efn.EfficientNetB4', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (96, 'efficientnet.tfkeras.EfficientNetB5', 'efn.EfficientNetB5', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (110, 'efficientnet.tfkeras.EfficientNetB6', 'efn.EfficientNetB6', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (124, 'efficientnet.tfkeras.EfficientNetB7', 'efn.EfficientNetB7', ([], {'include_top': 'include_top', 'input_shape': 'input_shape', 'input_tensor': 'input_tensor', 'weights': 'weights'}), True, 'import efficientnet.tfkeras as efn\n'), (134, 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'tf.keras.applications.imagenet_utils.preprocess_input', (['x'], {'mode': '"""torch"""'}), True, 'import tensorflow as tf\n')] |
dwhite54/arcface-tf2 | b835b17238503942580a325bb408644120b61230 | import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.layers import (
Dense,
Dropout,
Flatten,
Input,
)
from tensorflow.keras.applications import (
MobileNetV2,
ResNet50
)
from .layers import (
BatchNormalization,
ArcMarginPenaltyLogists
)
def _regularizer(weights_decay=5e-4):
return tf.keras.regularizers.l2(weights_decay)
def Backbone(backbone_type='ResNet50', use_pretrain=True):
"""Backbone Model"""
weights = None
if use_pretrain:
weights = 'imagenet'
def backbone(x_in):
if backbone_type == 'ResNet50':
return ResNet50(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
elif backbone_type == 'MobileNetV2':
return MobileNetV2(input_shape=x_in.shape[1:], include_top=False,
weights=weights)(x_in)
else:
raise TypeError('backbone_type error!')
return backbone
def OutputLayer(embd_shape, w_decay=5e-4, name='OutputLayer'):
"""Output Later"""
def output_layer(x_in):
x = inputs = Input(x_in.shape[1:])
x = BatchNormalization()(x)
x = Dropout(rate=0.5)(x)
x = Flatten()(x)
x = Dense(embd_shape, kernel_regularizer=_regularizer(w_decay))(x)
x = BatchNormalization()(x)
return Model(inputs, x, name=name)(x_in)
return output_layer
def ArcHead(num_classes, margin=0.5, logist_scale=64, name='ArcHead'):
"""Arc Head"""
def arc_head(x_in, y_in):
x = inputs1 = Input(x_in.shape[1:])
y = Input(y_in.shape[1:])
x = ArcMarginPenaltyLogists(num_classes=num_classes,
margin=margin,
logist_scale=logist_scale)(x, y)
return Model((inputs1, y), x, name=name)((x_in, y_in))
return arc_head
def NormHead(num_classes, w_decay=5e-4, name='NormHead'):
"""Norm Head"""
def norm_head(x_in):
x = inputs = Input(x_in.shape[1:])
x = Dense(num_classes, kernel_regularizer=_regularizer(w_decay))(x)
return Model(inputs, x, name=name)(x_in)
return norm_head
def ArcFaceModel(size=None, channels=3, num_classes=None, name='arcface_model',
margin=0.5, logist_scale=64, embd_shape=512,
head_type='ArcHead', backbone_type='ResNet50',
w_decay=5e-4, use_pretrain=True, training=False):
"""Arc Face Model"""
x = inputs = Input([size, size, channels], name='input_image')
x = Backbone(backbone_type=backbone_type, use_pretrain=use_pretrain)(x)
embds = OutputLayer(embd_shape, w_decay=w_decay)(x)
embds = embds / tf.norm(embds, axis=1, keepdims=True)
if training:
assert num_classes is not None
labels = Input([], name='label')
if head_type == 'ArcHead':
logist = ArcHead(num_classes=num_classes, margin=margin,
logist_scale=logist_scale)(embds, labels)
else:
logist = NormHead(num_classes=num_classes, w_decay=w_decay)(embds)
return Model((inputs, labels), logist, name=name)
else:
return Model(inputs, embds, name=name)
| [
"tensorflow.norm",
"tensorflow.keras.regularizers.l2",
"tensorflow.keras.Model",
"tensorflow.keras.applications.ResNet50",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.applications.MobileNetV2",
"tensorflow.keras.layers.Input"
] | modules/models.py | [(20, 'tensorflow.keras.regularizers.l2', 'tf.keras.regularizers.l2', (['weights_decay'], {}), True, 'import tensorflow as tf\n'), (80, 'tensorflow.keras.layers.Input', 'Input', (['[size, size, channels]'], {'name': '"""input_image"""'}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (44, 'tensorflow.keras.layers.Input', 'Input', (['x_in.shape[1:]'], {}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (57, 'tensorflow.keras.layers.Input', 'Input', (['x_in.shape[1:]'], {}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (58, 'tensorflow.keras.layers.Input', 'Input', (['y_in.shape[1:]'], {}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (69, 'tensorflow.keras.layers.Input', 'Input', (['x_in.shape[1:]'], {}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (86, 'tensorflow.norm', 'tf.norm', (['embds'], {'axis': '(1)', 'keepdims': '(True)'}), True, 'import tensorflow as tf\n'), (90, 'tensorflow.keras.layers.Input', 'Input', (['[]'], {'name': '"""label"""'}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (96, 'tensorflow.keras.Model', 'Model', (['(inputs, labels)', 'logist'], {'name': 'name'}), False, 'from tensorflow.keras import Model\n'), (98, 'tensorflow.keras.Model', 'Model', (['inputs', 'embds'], {'name': 'name'}), False, 'from tensorflow.keras import Model\n'), (46, 'tensorflow.keras.layers.Dropout', 'Dropout', ([], {'rate': '(0.5)'}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (47, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Dense, Dropout, Flatten, Input\n'), (50, 'tensorflow.keras.Model', 'Model', (['inputs', 'x'], {'name': 'name'}), False, 'from tensorflow.keras import Model\n'), (62, 'tensorflow.keras.Model', 'Model', (['(inputs1, y)', 'x'], {'name': 'name'}), False, 'from tensorflow.keras import Model\n'), (71, 'tensorflow.keras.Model', 'Model', (['inputs', 'x'], {'name': 'name'}), False, 'from tensorflow.keras import Model\n'), (31, 'tensorflow.keras.applications.ResNet50', 'ResNet50', ([], {'input_shape': 'x_in.shape[1:]', 'include_top': '(False)', 'weights': 'weights'}), False, 'from tensorflow.keras.applications import MobileNetV2, ResNet50\n'), (34, 'tensorflow.keras.applications.MobileNetV2', 'MobileNetV2', ([], {'input_shape': 'x_in.shape[1:]', 'include_top': '(False)', 'weights': 'weights'}), False, 'from tensorflow.keras.applications import MobileNetV2, ResNet50\n')] |
rostekus/recognition_of_handwritten_digits | 1cdc86572b1aad8da126cd5623a8e857aa6bbc55 | #Import all Necessary Libraries
import tensorflow as tf
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense
from tensorflow.keras.utils import to_categorical
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split
from sklearn.decomposition import PCA
from sklearn.datasets import fetch_openml
from tensorflow.keras.callbacks import EarlyStopping
import pickle
import numpy as np
import pandas as pd
import os
os.environ['KMP_DUPLICATE_LIB_OK']='True'
# batch size and number of epochs
BATCH_SIZE = 32
EPOCHS = 5
#importing dataset, 28x28 images of digits
mnist = fetch_openml('mnist_784')
#unpacking data
X , y = mnist.data, mnist.target
# converting string into int
y = y.astype(np.short)
# Reshape image in 3 dimensions
# canal = 1 for gray scale
X = X.reshape(-1,28,28,1)
# Scaling numbers [0,1], normalization
X = tf.keras.utils.normalize(X, axis = 1)
# Split the train and the test set
X_train, X_test, y_train, y_test = train_test_split(X,y ,test_size=0.3, random_state = 42)
early_stopping_monitor = EarlyStopping(
monitor='val_loss',
min_delta=0,
patience=0,
verbose=0,
mode='auto',
baseline=None,
restore_best_weights=True
)
# Sequential Model
model =tf.keras.models.Sequential()
model.add(Conv2D(filters=64, kernel_size=3, input_shape = (28,28,1), activation='relu'))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Conv2D(filters=32, kernel_size = 3, activation='relu'))
model.add(Flatten())
model.add(BatchNormalization())
model.add(Dense(128,activation="relu"))
model.add(tf.keras.layers.Dense(10, activation=tf.nn.softmax))
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy'],
callbacks=[early_stopping_monitor],)
# Training model
model = model.fit(X_train, y_train,batch_size=BATCH_SIZE,
epochs=EPOCHS, validation_split=0.2)
#Saving model to json file
with open('model.h5', 'wb') as f:
pickle.dump(model.history, f) | [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"sklearn.model_selection.train_test_split",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.utils.normalize",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.callbacks.EarlyStopping",
"tensorflow.keras.models.Sequential",
"sklearn.datasets.fetch_openml"
] | model_training.py | [(22, 'sklearn.datasets.fetch_openml', 'fetch_openml', (['"""mnist_784"""'], {}), False, 'from sklearn.datasets import fetch_openml\n'), (35, 'tensorflow.keras.utils.normalize', 'tf.keras.utils.normalize', (['X'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (38, 'sklearn.model_selection.train_test_split', 'train_test_split', (['X', 'y'], {'test_size': '(0.3)', 'random_state': '(42)'}), False, 'from sklearn.model_selection import train_test_split\n'), (40, 'tensorflow.keras.callbacks.EarlyStopping', 'EarlyStopping', ([], {'monitor': '"""val_loss"""', 'min_delta': '(0)', 'patience': '(0)', 'verbose': '(0)', 'mode': '"""auto"""', 'baseline': 'None', 'restore_best_weights': '(True)'}), False, 'from tensorflow.keras.callbacks import EarlyStopping\n'), (51, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(64)', 'kernel_size': '(3)', 'input_shape': '(28, 28, 1)', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense\n'), (53, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense\n'), (54, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense\n'), (55, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense\n'), (56, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense\n'), (57, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, Lambda, MaxPooling2D, Flatten, BatchNormalization, Dense\n'), (58, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), True, 'import tensorflow as tf\n'), (70, 'pickle.dump', 'pickle.dump', (['model.history', 'f'], {}), False, 'import pickle\n')] |
maximoskp/guitar_tab_transcription | 2c4e3b4feb8b2c35020db050c89d33c5165798b1 | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 3 07:30:57 2021
@author: user
"""
import numpy as np
import sys
if sys.version_info >= (3,8):
import pickle
else:
import pickle5 as pickle
import tensorflow as tf
from tensorflow import keras
import os
import matplotlib.pyplot as plt
sys.path.insert(1, '..')
import data_utils
from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger
with open('..' + os.sep + 'data' + os.sep + 'flat_tablature_dataset.pickle', 'rb') as handle:
d = pickle.load(handle)
x_train = d['x_train'].T
y_train = d['y_train'].T
x_valid = d['x_valid'].T
y_valid = d['y_valid'].T
x_test = d['x_test'].T
y_test = d['y_test'].T
num_filters = 128
conv_decoder = keras.models.Sequential([
keras.layers.Conv2DTranspose(num_filters//2, kernel_size=3, strides=2, padding='valid',
activation='selu', input_shape=[1,6,num_filters//2]),
keras.layers.Conv2DTranspose(1, kernel_size=3, strides=2, padding='same',
activation='selu'),
# keras.layers.Lambda(lambda x: x[:,:,:-1,:]),
# keras.layers.Reshape([6, 25])
])
out_layer = keras.models.Sequential([
keras.layers.Lambda(lambda x: x[:,:,:-1,:]),
keras.layers.Reshape([6*25]),
keras.layers.Dense(y_train.shape[1], activation='sigmoid')
])
model = keras.models.Sequential()
model.add(keras.layers.Dense(512, activation='selu', input_shape=[x_train.shape[1]]))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(512, activation='selu'))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.BatchNormalization())
model.add(keras.layers.Dense(6*num_filters//2, activation='selu'))
model.add(keras.layers.Dropout(0.3))
model.add(keras.layers.BatchNormalization())
# to apply lstm, timesteps need to be keept in the input
# model.add(keras.layers.LSTM(6*num_filters//2, activation='selu'))
model.add(keras.layers.Reshape([1,6,num_filters//2]))
model.add(conv_decoder)
model.add(out_layer)
model.compile(loss='mean_squared_error', optimizer='adam', metrics=['cosine_similarity'])
model.summary()
os.makedirs( 'models/tab_flat_CNN_out', exist_ok=True )
# %%
filepath = '../models/tab_flat_CNN_out/tab_flat_CNN_out_epoch{epoch:02d}_valLoss{val_loss:.6f}.hdf5'
checkpoint = ModelCheckpoint(filepath=filepath,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min')
filepath_current_best = '../models/tab_flat_CNN_out/tab_flat_CNN_out_current_best.hdf5'
checkpoint_current_best = ModelCheckpoint(filepath=filepath_current_best,
monitor='val_loss',
verbose=1,
save_best_only=True,
mode='min')
csv_logger = CSVLogger('../models/tab_flat_CNN_out/flat_tab_logger.csv', append=True, separator=';')
history = model.fit( x_train, y_train, epochs=1000, batch_size=16,
validation_data=(x_valid,y_valid), callbacks=[checkpoint, checkpoint_current_best, csv_logger])
# model.save('models/tab_flat_ANN.h5')
# model.evaluate( x_test, y_test )
# # %%
# sessions_num = 10
# session_ids = np.random.choice(y_test.shape[0]-l, sessions_num, replace=False)
# frames_per_session = 10
# for i in session_ids:
# for j in range(frames_per_session):
# y_pred = model.predict( [x_test[i+j:i+j+1]] )
# print('predicted: ' + repr(y_pred))
# print('actual: ' + repr(y_test[i+j]))
# print('input: ' + repr( np.where(x_test[i+j, :128] != 0 ) ))
# plt.clf()
# plt.subplot(3,1,1)
# # plt.bar( np.arange(y_pred.shape[1]) , y_pred[0] )
# plt.imshow( np.reshape( y_pred[0] , [6, 25]) , cmap='gray_r' )
# plt.title('predicted')
# plt.subplot(3,1,2)
# # plt.bar( np.arange(y_pred.shape[1]) , y_test[i] )
# plt.imshow( np.reshape( y_test[i+j] , [6, 25]) , cmap='gray_r' )
# plt.title('actual')
# plt.subplot(3,1,3)
# plt.bar( np.arange(128) , x_test[i+j:i+j+1, :128][0] )
# plt.title('input')
# os.makedirs( 'figs/tab_flat_CNN_out/session_'+str(i), exist_ok=True )
# plt.savefig( 'figs/tab_flat_CNN_out/session_'+str(i)+'/fig_'+str(j)+'.png', dpi=300 )
| [
"tensorflow.keras.callbacks.ModelCheckpoint",
"tensorflow.keras.layers.Lambda",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2DTranspose",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.callbacks.CSVLogger",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.models.Sequential"
] | flat_tab_in_CNN/train_tab_flat_CNN_out.py | [(18, 'sys.path.insert', 'sys.path.insert', (['(1)', '""".."""'], {}), False, 'import sys\n'), (48, 'tensorflow.keras.models.Sequential', 'keras.models.Sequential', ([], {}), False, 'from tensorflow import keras\n'), (67, 'os.makedirs', 'os.makedirs', (['"""models/tab_flat_CNN_out"""'], {'exist_ok': '(True)'}), False, 'import os\n'), (71, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'filepath', 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger\n'), (78, 'tensorflow.keras.callbacks.ModelCheckpoint', 'ModelCheckpoint', ([], {'filepath': 'filepath_current_best', 'monitor': '"""val_loss"""', 'verbose': '(1)', 'save_best_only': '(True)', 'mode': '"""min"""'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger\n'), (84, 'tensorflow.keras.callbacks.CSVLogger', 'CSVLogger', (['"""../models/tab_flat_CNN_out/flat_tab_logger.csv"""'], {'append': '(True)', 'separator': '""";"""'}), False, 'from tensorflow.keras.callbacks import ModelCheckpoint, CSVLogger\n'), (24, 'pickle5.load', 'pickle.load', (['handle'], {}), True, 'import pickle5 as pickle\n'), (49, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""selu"""', 'input_shape': '[x_train.shape[1]]'}), False, 'from tensorflow import keras\n'), (50, 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.3)'], {}), False, 'from tensorflow import keras\n'), (51, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), False, 'from tensorflow import keras\n'), (52, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(512)'], {'activation': '"""selu"""'}), False, 'from tensorflow import keras\n'), (53, 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.3)'], {}), False, 'from tensorflow import keras\n'), (54, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), False, 'from tensorflow import keras\n'), (55, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['(6 * num_filters // 2)'], {'activation': '"""selu"""'}), False, 'from tensorflow import keras\n'), (56, 'tensorflow.keras.layers.Dropout', 'keras.layers.Dropout', (['(0.3)'], {}), False, 'from tensorflow import keras\n'), (57, 'tensorflow.keras.layers.BatchNormalization', 'keras.layers.BatchNormalization', ([], {}), False, 'from tensorflow import keras\n'), (60, 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['[1, 6, num_filters // 2]'], {}), False, 'from tensorflow import keras\n'), (35, 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(num_filters // 2)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""valid"""', 'activation': '"""selu"""', 'input_shape': '[1, 6, num_filters // 2]'}), False, 'from tensorflow import keras\n'), (37, 'tensorflow.keras.layers.Conv2DTranspose', 'keras.layers.Conv2DTranspose', (['(1)'], {'kernel_size': '(3)', 'strides': '(2)', 'padding': '"""same"""', 'activation': '"""selu"""'}), False, 'from tensorflow import keras\n'), (43, 'tensorflow.keras.layers.Lambda', 'keras.layers.Lambda', (['(lambda x: x[:, :, :-1, :])'], {}), False, 'from tensorflow import keras\n'), (44, 'tensorflow.keras.layers.Reshape', 'keras.layers.Reshape', (['[6 * 25]'], {}), False, 'from tensorflow import keras\n'), (45, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (['y_train.shape[1]'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow import keras\n')] |
abogdanova/FedMed | 72f238c31b6714c664e1b0e40204f9528f764182 | from __future__ import absolute_import, division, print_function
import collections
import numpy as np
from six.moves import range
import tensorflow as tf
import datetime
from tensorflow_federated import python as tff
from tensorflow.python.keras.optimizer_v2 import gradient_descent
from tensorflow.keras import layers
tf.compat.v1.enable_v2_behavior()
EXP_CODE = 'nB128C4'
NUM_EXAMPLES_PER_USER = 2000
BATCH_SIZE = 128
USERS = 5
NUM_EPOCHS = 1
CLASSES = 10
WIDTH = 32
HEIGHT = 32
CHANNELS = 3
def mane():
""" Run program """
cifar_train, cifar_test = tf.keras.datasets.cifar10.load_data()
federated_train_data = [get_distributed(cifar_train, u, 'n') for u in range(USERS)]
(X_test, y_test) = get_non_distributed(cifar_test)
sample_batch = federated_train_data[1][-2]
non_federated_model = create_compiled_keras_model()
def model_fn():
keras_model = create_compiled_keras_model()
return tff.learning.from_compiled_keras_model(keras_model, sample_batch)
iterative_process = tff.learning.build_federated_averaging_process(model_fn)
evaluation = tff.learning.build_federated_evaluation(model_fn)
state = iterative_process.initialize()
fd_test_accuracy = []
fd_test_loss = []
fd_train_loss = []
for round_num in range(50):
selected = np.random.choice(5, 4, replace=False)
state, metrics = iterative_process.next(state, list(np.array(federated_train_data)[selected]))
non_federated_model.set_weights(state.model.trainable)
(loss, accuracy) = non_federated_model.evaluate(X_test, y_test)
fd_train_loss.append(metrics[1])
fd_test_accuracy.append(accuracy)
fd_test_loss.append(loss)
try:
with open('Log/Exp11/'+ EXP_CODE + '.txt', 'w') as log:
print(EXP_CODE + "Train = {}".format(fd_train_loss), file=log)
print(EXP_CODE + "Test = {}".format(fd_test_loss), file=log)
print(EXP_CODE + "Accuracy = {}".format(fd_test_accuracy), file=log)
except IOError:
print('File Error')
def get_indices_realistic(y, u):
# split dataset into arrays of each class label
all_indices = [i for i, d in enumerate(y)]
shares_arr = [4000, 2000, 2000, 1000, 1000]
user_indices = []
for u in range(USERS):
user_indices.append([all_indices.pop(0) for i in range(shares_arr[u])])
return user_indices
def get_indices_unbalanced(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
# each user will have 2 classes excluded from their data sets, thus 250 examples * remaining 8 classes
class_shares = 250
# store indices for future use
user_indices = []
# auxilary index array to pop out pairs of classes missing at each user
class_index = list(range(CLASSES))
for u in range(USERS):
columns_out = [class_index.pop(0) for i in range(2)]
selected_columns = set(range(CLASSES)) - set(columns_out)
starting_index = u*class_shares
user_indices.append(
np.array(indices_array)[list(selected_columns)].T[starting_index:starting_index + class_shares]
.flatten())
return user_indices
def get_indices_unbalanced_completely(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
class_shares = CLASSES // min(CLASSES, USERS)
user_indices = []
for u in range(USERS):
user_indices.append(
np.array(
[indices_array.pop(0)[:NUM_EXAMPLES_PER_USER//class_shares] for j in range(class_shares)])
.flatten())
return user_indices
def get_indices_even(y):
# split dataset into arrays of each class label
indices_array = []
for c in range(CLASSES):
indices_array.append([i for i, d in enumerate(y) if d == c])
user_indices = []
class_shares = NUM_EXAMPLES_PER_USER // CLASSES
# take even shares of each class for every user
for u in range(USERS):
starting_index = u*class_shares
user_indices.append(np.array(indices_array).T[starting_index:starting_index + class_shares].flatten())
return user_indices
def get_distributed(source, u, distribution):
if distribution == 'i':
indices = get_indices_even(source[1])[u]
elif distribution == 'n':
indices = get_indices_unbalanced(source[1])[u]
elif distribution == 'r':
indices = get_indices_realistic(source[1][:10000], u)[u]
else:
indices = get_indices_unbalanced_completely(source[1])[u]
output_sequence = []
for repeat in range(NUM_EPOCHS):
for i in range(0, len(indices), BATCH_SIZE):
batch_samples = indices[i:i + BATCH_SIZE]
output_sequence.append({
'x': np.array([source[0][b] / 255.0 for b in batch_samples], dtype=np.float32),
'y': np.array([source[1][b] for b in batch_samples], dtype=np.int32)})
return output_sequence
def get_non_distributed(source):
y = np.array(source[1][:10000], dtype=np.int32)
X = np.array(source[0][:10000], dtype=np.float32) / 255.0
return X, y
def create_compiled_keras_model():
model = tf.keras.models.Sequential([
tf.keras.layers.Conv2D(32,(3, 3),
activation="tanh",
padding="same",
input_shape=(WIDTH, HEIGHT, CHANNELS)),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Conv2D(64, (3, 3), activation="tanh", padding="same"),
tf.keras.layers.MaxPooling2D(pool_size=(2,2)),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation="tanh"),
tf.keras.layers.Dense(10, activation=tf.nn.softmax)])
def loss_fn(y_true, y_pred):
return tf.reduce_mean(tf.keras.losses.sparse_categorical_crossentropy(y_true, y_pred))
model.compile(loss=loss_fn, optimizer="adam", metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
return model
if __name__ == "__main__":
mane()
| [
"tensorflow.compat.v1.enable_v2_behavior",
"numpy.random.choice",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.losses.sparse_categorical_crossentropy",
"tensorflow.keras.datasets.cifar10.load_data",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"numpy.array",
"tensorflow.keras.layers.Flatten"
] | Past_experiments/nB128C4.py | [(14, 'tensorflow.compat.v1.enable_v2_behavior', 'tf.compat.v1.enable_v2_behavior', ([], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.keras.datasets.cifar10.load_data', 'tf.keras.datasets.cifar10.load_data', ([], {}), True, 'import tensorflow as tf\n'), (39, 'tensorflow_federated.python.learning.build_federated_averaging_process', 'tff.learning.build_federated_averaging_process', (['model_fn'], {}), True, 'from tensorflow_federated import python as tff\n'), (40, 'tensorflow_federated.python.learning.build_federated_evaluation', 'tff.learning.build_federated_evaluation', (['model_fn'], {}), True, 'from tensorflow_federated import python as tff\n'), (46, 'six.moves.range', 'range', (['(50)'], {}), False, 'from six.moves import range\n'), (69, 'six.moves.range', 'range', (['USERS'], {}), False, 'from six.moves import range\n'), (76, 'six.moves.range', 'range', (['CLASSES'], {}), False, 'from six.moves import range\n'), (84, 'six.moves.range', 'range', (['USERS'], {}), False, 'from six.moves import range\n'), (96, 'six.moves.range', 'range', (['CLASSES'], {}), False, 'from six.moves import range\n'), (100, 'six.moves.range', 'range', (['USERS'], {}), False, 'from six.moves import range\n'), (110, 'six.moves.range', 'range', (['CLASSES'], {}), False, 'from six.moves import range\n'), (117, 'six.moves.range', 'range', (['USERS'], {}), False, 'from six.moves import range\n'), (132, 'six.moves.range', 'range', (['NUM_EPOCHS'], {}), False, 'from six.moves import range\n'), (141, 'numpy.array', 'np.array', (['source[1][:10000]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (37, 'tensorflow_federated.python.learning.from_compiled_keras_model', 'tff.learning.from_compiled_keras_model', (['keras_model', 'sample_batch'], {}), True, 'from tensorflow_federated import python as tff\n'), (47, 'numpy.random.choice', 'np.random.choice', (['(5)', '(4)'], {'replace': '(False)'}), True, 'import numpy as np\n'), (83, 'six.moves.range', 'range', (['CLASSES'], {}), False, 'from six.moves import range\n'), (142, 'numpy.array', 'np.array', (['source[0][:10000]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (30, 'six.moves.range', 'range', (['USERS'], {}), False, 'from six.moves import range\n'), (147, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(3, 3)'], {'activation': '"""tanh"""', 'padding': '"""same"""', 'input_shape': '(WIDTH, HEIGHT, CHANNELS)'}), True, 'import tensorflow as tf\n'), (151, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), True, 'import tensorflow as tf\n'), (152, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(3, 3)'], {'activation': '"""tanh"""', 'padding': '"""same"""'}), True, 'import tensorflow as tf\n'), (153, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', ([], {'pool_size': '(2, 2)'}), True, 'import tensorflow as tf\n'), (154, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(128)'], {'activation': '"""tanh"""'}), True, 'import tensorflow as tf\n'), (156, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': 'tf.nn.softmax'}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.keras.losses.sparse_categorical_crossentropy', 'tf.keras.losses.sparse_categorical_crossentropy', (['y_true', 'y_pred'], {}), True, 'import tensorflow as tf\n'), (85, 'six.moves.range', 'range', (['(2)'], {}), False, 'from six.moves import range\n'), (86, 'six.moves.range', 'range', (['CLASSES'], {}), False, 'from six.moves import range\n'), (161, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (48, 'numpy.array', 'np.array', (['federated_train_data'], {}), True, 'import numpy as np\n'), (70, 'six.moves.range', 'range', (['shares_arr[u]'], {}), False, 'from six.moves import range\n'), (136, 'numpy.array', 'np.array', (['[(source[0][b] / 255.0) for b in batch_samples]'], {'dtype': 'np.float32'}), True, 'import numpy as np\n'), (137, 'numpy.array', 'np.array', (['[source[1][b] for b in batch_samples]'], {'dtype': 'np.int32'}), True, 'import numpy as np\n'), (119, 'numpy.array', 'np.array', (['indices_array'], {}), True, 'import numpy as np\n'), (89, 'numpy.array', 'np.array', (['indices_array'], {}), True, 'import numpy as np\n'), (103, 'six.moves.range', 'range', (['class_shares'], {}), False, 'from six.moves import range\n')] |
PsycheShaman/Keras-GAN | 9a1f2576af8f67fad7845421ea5feb53012c1c9f | from __future__ import print_function, division
import tensorflow as tf
from tensorflow.keras.datasets import mnist
from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D
from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, Reshape
#from tensorflow.keras.layers.advanced_activations import LeakyReLU
from tensorflow.keras.layers import UpSampling2D, Conv2D
from tensorflow.keras.models import Sequential, Model
from tensorflow.keras.optimizers import Adam
import tensorflow.keras.backend as K
import matplotlib.pyplot as plt
import sys
import numpy as np
def load_data():
tracks = np.load("C:/Users/Gerhard/Documents/6_tracklets_large_calib_train/0_tracks.npy")
infosets = np.load("C:/Users/Gerhard/Documents/6_tracklets_large_calib_train/0_info_set.npy")
x = tracks.reshape((-1, 17,24))
y = np.repeat(infosets[:, 0], 6)
return (x,y)
class BGAN():
"""Reference: https://wiseodd.github.io/techblog/2017/03/07/boundary-seeking-gan/"""
def __init__(self):
self.img_rows = 17
self.img_cols = 24
self.channels = 1
self.img_shape = (self.img_rows, self.img_cols, self.channels)
self.latent_dim = 81
optimizer = Adam(0.000001)
# Build and compile the discriminator
self.discriminator = self.build_discriminator()
self.discriminator.compile(loss='binary_crossentropy',
optimizer=optimizer,
metrics=['accuracy'])
# Build the generator
self.generator = self.build_generator()
# The generator takes noise as input and generated imgs
z = Input(shape=(self.latent_dim,))
img = self.generator(z)
# For the combined model we will only train the generator
self.discriminator.trainable = False
# The valid takes generated images as input and determines validity
valid = self.discriminator(img)
# The combined model (stacked generator and discriminator)
# Trains the generator to fool the discriminator
self.combined = Model(z, valid)
self.combined.compile(loss=self.boundary_loss, optimizer=optimizer)
def build_generator(self):
model = Sequential()
model.add(Reshape((9,9,1), input_shape=(81,)))
# model.add(LeakyReLU(alpha=0.2))
# model.add(BatchNormalization(momentum=0.8))
model.add(Conv2D(filters=16,kernel_size=(3,3),activation=tf.nn.leaky_relu))
model.add(Conv2D(32,(2,2),activation=tf.nn.leaky_relu))
model.add(Conv2D(64,(3,2),activation=tf.nn.leaky_relu))
model.add(Flatten())
model.add(Dense(128,activation=tf.nn.leaky_relu))
model.add(Dense(256,activation=tf.nn.leaky_relu))
# model.add(LeakyReLU(alpha=0.2))
# model.add(BatchNormalization(momentum=0.8))
model.add(Dense(512,activation=tf.nn.leaky_relu))
model.add(BatchNormalization(momentum=0.5))
model.add(Dense(512,activation=tf.nn.leaky_relu))
model.add(BatchNormalization(momentum=0.6))
model.add(Dense(1024,activation=tf.nn.leaky_relu))
model.add(BatchNormalization(momentum=0.7))
model.add(Dense(1024,activation=tf.nn.leaky_relu))
model.add(BatchNormalization(momentum=0.8))
model.add(Dense(np.prod(self.img_shape), activation='tanh'))
model.add(Reshape(self.img_shape))
model.summary()
noise = Input(shape=(self.latent_dim,))
img = model(noise)
return Model(noise, img)
def build_discriminator(self):
model = Sequential()
# model.add(Flatten(input_shape=self.img_shape))
model.add(Conv2D(filters=16,kernel_size=3,input_shape=self.img_shape))
model.add(Conv2D(filters=32,kernel_size=3))
model.add(MaxPooling2D())
model.add(Flatten())
model.add(Dense(1024,activation=tf.nn.leaky_relu))
model.add(Dense(512,activation=tf.nn.leaky_relu))
# model.add(LeakyReLU(alpha=0.2))
model.add(Dense(256,activation=tf.nn.leaky_relu))
model.add(Dense(128,activation=tf.nn.leaky_relu))
model.add(Dense(64,activation=tf.nn.leaky_relu))
# model.add(LeakyReLU(alpha=0.2))
model.add(Dense(1, activation='sigmoid'))
model.summary()
img = Input(shape=self.img_shape)
validity = model(img)
return Model(img, validity)
def boundary_loss(self, y_true, y_pred):
"""
Boundary seeking loss.
Reference: https://wiseodd.github.io/techblog/2017/03/07/boundary-seeking-gan/
"""
return 0.5 * K.mean((K.log(y_pred) - K.log(1 - y_pred))**2)
def train(self, epochs, batch_size=128, sample_interval=50):
# Load the dataset
(X_train, _) = load_data()
# Rescale -1 to 1
# for i in range(0,X_train.shape[0]):
# ma = np.max(X_train[i,:,:])
# X_train[i,:,:] = X_train[i,:,:]/ma
X_train = X_train/np.max(X_train)
# X_train = X_train / 127.5 - 1.
X_train = np.expand_dims(X_train, axis=3)
# Adversarial ground truths
valid = np.ones((batch_size, 1))
fake = np.zeros((batch_size, 1))
for epoch in range(epochs):
# ---------------------
# Train Discriminator
# ---------------------
# Select a random batch of images
idx = np.random.randint(0, X_train.shape[0], batch_size)
imgs = X_train[idx]
noise = np.random.normal(0, 1, (batch_size, self.latent_dim))
# Generate a batch of new images
gen_imgs = self.generator.predict(noise)
# Train the discriminator
d_loss_real = self.discriminator.train_on_batch(imgs, valid)
d_loss_fake = self.discriminator.train_on_batch(gen_imgs, fake)
d_loss = 0.5 * np.add(d_loss_real, d_loss_fake)
# ---------------------
# Train Generator
# ---------------------
g_loss = self.combined.train_on_batch(noise, valid)
# Plot the progress
print ("%d [D loss: %f, acc.: %.2f%%] [G loss: %f]" % (epoch, d_loss[0], 100*d_loss[1], g_loss))
# If at save interval => save generated image samples
if epoch % sample_interval == 0:
self.sample_images(epoch)
def sample_images(self, epoch):
r, c = 5, 5
noise = np.random.normal(0, 1, (r * c, self.latent_dim))
gen_imgs = self.generator.predict(noise)
# Rescale images 0 - 1
gen_imgs = 0.5 * gen_imgs + 0.5
fig, axs = plt.subplots(r, c)
cnt = 0
for i in range(r):
for j in range(c):
axs[i,j].imshow(gen_imgs[cnt, :,:,0], cmap='gray')
axs[i,j].axis('off')
cnt += 1
fig.savefig("images/mnist_%d.png" % epoch)
plt.close()
if __name__ == '__main__':
bgan = BGAN()
bgan.train(epochs=30000, batch_size=32, sample_interval=200)
| [
"numpy.expand_dims",
"numpy.max",
"tensorflow.keras.backend.log",
"numpy.random.randint",
"tensorflow.keras.layers.Conv2D",
"matplotlib.pyplot.close",
"numpy.load",
"numpy.repeat",
"tensorflow.keras.layers.Flatten",
"tensorflow.keras.models.Sequential",
"numpy.zeros",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Reshape",
"matplotlib.pyplot.subplots",
"numpy.ones",
"numpy.random.normal",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.layers.BatchNormalization",
"numpy.prod",
"numpy.add",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Input"
] | bgan/bgan9/bgan9.py | [(21, 'numpy.load', 'np.load', (['"""C:/Users/Gerhard/Documents/6_tracklets_large_calib_train/0_tracks.npy"""'], {}), True, 'import numpy as np\n'), (23, 'numpy.load', 'np.load', (['"""C:/Users/Gerhard/Documents/6_tracklets_large_calib_train/0_info_set.npy"""'], {}), True, 'import numpy as np\n'), (27, 'numpy.repeat', 'np.repeat', (['infosets[:, (0)]', '(6)'], {}), True, 'import numpy as np\n'), (39, 'tensorflow.keras.optimizers.Adam', 'Adam', (['(1e-06)'], {}), False, 'from tensorflow.keras.optimizers import Adam\n'), (51, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (62, 'tensorflow.keras.models.Model', 'Model', (['z', 'valid'], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (67, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (93, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': '(self.latent_dim,)'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (96, 'tensorflow.keras.models.Model', 'Model', (['noise', 'img'], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (100, 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (117, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'self.img_shape'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (120, 'tensorflow.keras.models.Model', 'Model', (['img', 'validity'], {}), False, 'from tensorflow.keras.models import Sequential, Model\n'), (143, 'numpy.expand_dims', 'np.expand_dims', (['X_train'], {'axis': '(3)'}), True, 'import numpy as np\n'), (146, 'numpy.ones', 'np.ones', (['(batch_size, 1)'], {}), True, 'import numpy as np\n'), (147, 'numpy.zeros', 'np.zeros', (['(batch_size, 1)'], {}), True, 'import numpy as np\n'), (185, 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(r * c, self.latent_dim)'], {}), True, 'import numpy as np\n'), (190, 'matplotlib.pyplot.subplots', 'plt.subplots', (['r', 'c'], {}), True, 'import matplotlib.pyplot as plt\n'), (198, 'matplotlib.pyplot.close', 'plt.close', ([], {}), True, 'import matplotlib.pyplot as plt\n'), (69, 'tensorflow.keras.layers.Reshape', 'Reshape', (['(9, 9, 1)'], {'input_shape': '(81,)'}), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, Reshape\n'), (72, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(3, 3)', 'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import UpSampling2D, Conv2D\n'), (73, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(2, 2)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import UpSampling2D, Conv2D\n'), (74, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 2)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import UpSampling2D, Conv2D\n'), (75, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (76, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (77, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (80, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (81, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.5)'}), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, Reshape\n'), (82, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (83, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.6)'}), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, Reshape\n'), (84, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (85, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.7)'}), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, Reshape\n'), (86, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (87, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'momentum': '(0.8)'}), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, Reshape\n'), (89, 'tensorflow.keras.layers.Reshape', 'Reshape', (['self.img_shape'], {}), False, 'from tensorflow.keras.layers import BatchNormalization, Activation, ZeroPadding2D, Reshape\n'), (103, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(16)', 'kernel_size': '(3)', 'input_shape': 'self.img_shape'}), False, 'from tensorflow.keras.layers import UpSampling2D, Conv2D\n'), (104, 'tensorflow.keras.layers.Conv2D', 'Conv2D', ([], {'filters': '(32)', 'kernel_size': '(3)'}), False, 'from tensorflow.keras.layers import UpSampling2D, Conv2D\n'), (105, 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (106, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (107, 'tensorflow.keras.layers.Dense', 'Dense', (['(1024)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (108, 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (110, 'tensorflow.keras.layers.Dense', 'Dense', (['(256)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (111, 'tensorflow.keras.layers.Dense', 'Dense', (['(128)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (112, 'tensorflow.keras.layers.Dense', 'Dense', (['(64)'], {'activation': 'tf.nn.leaky_relu'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (114, 'tensorflow.keras.layers.Dense', 'Dense', (['(1)'], {'activation': '"""sigmoid"""'}), False, 'from tensorflow.keras.layers import Input, Dense, Reshape, Flatten, Dropout, MaxPooling2D\n'), (140, 'numpy.max', 'np.max', (['X_train'], {}), True, 'import numpy as np\n'), (156, 'numpy.random.randint', 'np.random.randint', (['(0)', 'X_train.shape[0]', 'batch_size'], {}), True, 'import numpy as np\n'), (159, 'numpy.random.normal', 'np.random.normal', (['(0)', '(1)', '(batch_size, self.latent_dim)'], {}), True, 'import numpy as np\n'), (88, 'numpy.prod', 'np.prod', (['self.img_shape'], {}), True, 'import numpy as np\n'), (167, 'numpy.add', 'np.add', (['d_loss_real', 'd_loss_fake'], {}), True, 'import numpy as np\n'), (127, 'tensorflow.keras.backend.log', 'K.log', (['y_pred'], {}), True, 'import tensorflow.keras.backend as K\n'), (127, 'tensorflow.keras.backend.log', 'K.log', (['(1 - y_pred)'], {}), True, 'import tensorflow.keras.backend as K\n')] |
NodLabs/SHARK | 71f5cfcb30b3e7032c6d1d9f952860ff7769afa0 | from iree import runtime as ireert
from iree.tf.support import module_utils
from iree.compiler import tf as tfc
from iree.compiler import compile_str
from absl import app
import time
import numpy as np
import os
import tensorflow as tf
from official.nlp.modeling import layers
from official.nlp.modeling import networks
from official.nlp.modeling.models import bert_classifier
vocab_size = 100
NUM_CLASSES = 5
SEQUENCE_LENGTH = 512
BATCH_SIZE = 1
# Create a set of 2-dimensional inputs
bert_input = [
tf.TensorSpec(shape=[BATCH_SIZE, SEQUENCE_LENGTH], dtype=tf.int32),
tf.TensorSpec(shape=[BATCH_SIZE, SEQUENCE_LENGTH], dtype=tf.int32),
tf.TensorSpec(shape=[BATCH_SIZE, SEQUENCE_LENGTH], dtype=tf.int32),
]
class BertModule(tf.Module):
def __init__(self):
super(BertModule, self).__init__()
dict_outputs = False
test_network = networks.BertEncoder(
vocab_size=vocab_size,
num_layers=24,
hidden_size=1024,
num_attention_heads=16,
dict_outputs=dict_outputs,
)
# Create a BERT trainer with the created network.
bert_trainer_model = bert_classifier.BertClassifier(
test_network, num_classes=NUM_CLASSES
)
bert_trainer_model.summary()
# Invoke the trainer model on the inputs. This causes the layer to be built.
self.m = bert_trainer_model
self.m.predict = lambda x: self.m.call(x, training=False)
self.predict = tf.function(input_signature=[bert_input])(self.m.predict)
self.m.learn = lambda x, y: self.m.call(x, training=False)
self.loss = tf.keras.losses.SparseCategoricalCrossentropy()
self.optimizer = tf.keras.optimizers.SGD(learning_rate=1e-2)
@tf.function(
input_signature=[
bert_input, # inputs
tf.TensorSpec(shape=[BATCH_SIZE], dtype=tf.int32), # labels
]
)
def learn(self, inputs, labels):
with tf.GradientTape() as tape:
# Capture the gradients from forward prop...
probs = self.m(inputs, training=True)
loss = self.loss(labels, probs)
# ...and use them to update the model's weights.
variables = self.m.trainable_variables
gradients = tape.gradient(loss, variables)
self.optimizer.apply_gradients(zip(gradients, variables))
return loss
if __name__ == "__main__":
# BertModule()
# Compile the model using IREE
compiler_module = tfc.compile_module(
BertModule(), exported_names=["learn"], import_only=True
)
# Compile the model using IREE
backend = "dylib-llvm-aot"
args = [
"--iree-llvm-target-cpu-features=host",
"--iree-mhlo-demote-i64-to-i32=false",
"--iree-stream-resource-index-bits=64",
"--iree-vm-target-index-bits=64",
]
backend_config = "dylib"
# backend = "cuda"
# backend_config = "cuda"
# args = ["--iree-cuda-llvm-target-arch=sm_80", "--iree-hal-cuda-disable-loop-nounroll-wa", "--iree-enable-fusion-with-reduction-ops"]
flatbuffer_blob = compile_str(
compiler_module,
target_backends=[backend],
extra_args=args,
input_type="mhlo",
)
# flatbuffer_blob = compile_str(compiler_module, target_backends=["dylib-llvm-aot"])
# Save module as MLIR file in a directory
vm_module = ireert.VmModule.from_flatbuffer(flatbuffer_blob)
tracer = ireert.Tracer(os.getcwd())
config = ireert.Config("dylib", tracer)
ctx = ireert.SystemContext(config=config)
ctx.add_vm_module(vm_module)
BertCompiled = ctx.modules.module
predict_sample_input = [
np.random.randint(5, size=(BATCH_SIZE, SEQUENCE_LENGTH)),
np.random.randint(5, size=(BATCH_SIZE, SEQUENCE_LENGTH)),
np.random.randint(5, size=(BATCH_SIZE, SEQUENCE_LENGTH)),
]
learn_sample_input = [
predict_sample_input,
np.random.randint(5, size=(BATCH_SIZE)),
]
warmup = 5
total_iter = 10
num_iter = total_iter - warmup
for i in range(10):
if i == warmup - 1:
start = time.time()
print(
BertCompiled.learn(
predict_sample_input, np.random.randint(5, size=(BATCH_SIZE))
)
)
end = time.time()
total_time = end - start
print("time: " + str(total_time))
print("time/iter: " + str(total_time / num_iter))
| [
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.optimizers.SGD",
"tensorflow.GradientTape",
"tensorflow.function",
"tensorflow.TensorSpec",
"numpy.random.randint"
] | tank/tf/bert_large_run.py | [(22, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[BATCH_SIZE, SEQUENCE_LENGTH]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[BATCH_SIZE, SEQUENCE_LENGTH]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (24, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[BATCH_SIZE, SEQUENCE_LENGTH]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (92, 'iree.compiler.compile_str', 'compile_str', (['compiler_module'], {'target_backends': '[backend]', 'extra_args': 'args', 'input_type': '"""mhlo"""'}), False, 'from iree.compiler import compile_str\n'), (101, 'iree.runtime.VmModule.from_flatbuffer', 'ireert.VmModule.from_flatbuffer', (['flatbuffer_blob'], {}), True, 'from iree import runtime as ireert\n'), (103, 'iree.runtime.Config', 'ireert.Config', (['"""dylib"""', 'tracer'], {}), True, 'from iree import runtime as ireert\n'), (104, 'iree.runtime.SystemContext', 'ireert.SystemContext', ([], {'config': 'config'}), True, 'from iree import runtime as ireert\n'), (127, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (32, 'official.nlp.modeling.networks.BertEncoder', 'networks.BertEncoder', ([], {'vocab_size': 'vocab_size', 'num_layers': '(24)', 'hidden_size': '(1024)', 'num_attention_heads': '(16)', 'dict_outputs': 'dict_outputs'}), False, 'from official.nlp.modeling import networks\n'), (41, 'official.nlp.modeling.models.bert_classifier.BertClassifier', 'bert_classifier.BertClassifier', (['test_network'], {'num_classes': 'NUM_CLASSES'}), False, 'from official.nlp.modeling.models import bert_classifier\n'), (51, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (52, 'tensorflow.keras.optimizers.SGD', 'tf.keras.optimizers.SGD', ([], {'learning_rate': '(0.01)'}), True, 'import tensorflow as tf\n'), (102, 'os.getcwd', 'os.getcwd', ([], {}), False, 'import os\n'), (108, 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': '(BATCH_SIZE, SEQUENCE_LENGTH)'}), True, 'import numpy as np\n'), (109, 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': '(BATCH_SIZE, SEQUENCE_LENGTH)'}), True, 'import numpy as np\n'), (110, 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': '(BATCH_SIZE, SEQUENCE_LENGTH)'}), True, 'import numpy as np\n'), (114, 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': 'BATCH_SIZE'}), True, 'import numpy as np\n'), (49, 'tensorflow.function', 'tf.function', ([], {'input_signature': '[bert_input]'}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (121, 'time.time', 'time.time', ([], {}), False, 'import time\n'), (57, 'tensorflow.TensorSpec', 'tf.TensorSpec', ([], {'shape': '[BATCH_SIZE]', 'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (124, 'numpy.random.randint', 'np.random.randint', (['(5)'], {'size': 'BATCH_SIZE'}), True, 'import numpy as np\n')] |
sv641/km | fc7c70bf691692a06f219c2c6a8f658a91e81ae6 | # USAGE
# Start the server:
# python app.py
# Submit a request via cURL:
# curl -X POST -F [email protected] 'http://localhost:5000/predict'
# import the necessary packages
from tensorflow.keras.applications import ResNet50
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.applications import imagenet_utils
from tensorflow.python.keras.backend import set_session
from PIL import Image
import numpy as np
import flask
import io
import tensorflow as tf
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
# initialize our Flask application and the Keras model
app = flask.Flask(__name__)
app.config['UPLOAD_FOLDER'] = '/tmp'
model = None
sess = tf.compat.v1.Session()
def load_model():
# load the pre-trained Keras model (here we are using a model
# pre-trained on ImageNet and provided by Keras, but you can
# substitute in your own networks just as easily)
global model
global sess
set_session(sess)
model = ResNet50(weights="imagenet")
global graph
graph = tf.compat.v1.get_default_graph()
def prepare_image(image, target):
# if the image mode is not RGB, convert it
if image.mode != "RGB":
image = image.convert("RGB")
# resize the input image and preprocess it
image = image.resize(target)
image = img_to_array(image)
image = np.expand_dims(image, axis=0)
image = imagenet_utils.preprocess_input(image)
# return the processed image
return image
@app.route("/predict", methods=["POST"])
def predict():
# initialize the data dictionary that will be returned from the
# view
data = {"success": False}
# ensure an image was properly uploaded to our endpoint
if flask.request.method == "POST":
if flask.request.files.get("image"):
# read the image in PIL format
image = flask.request.files["image"].read()
image = Image.open(io.BytesIO(image))
# preprocess the image and prepare it for classification
image = prepare_image(image, target=(224, 224))
# classify the input image and then initialize the list
# of predictions to return to the client
global sess
with graph.as_default():
set_session(sess)
preds = model.predict(image)
results = imagenet_utils.decode_predictions(preds)
data["predictions"] = []
# loop over the results and add them to the list of
# returned predictions
for (imagenetID, label, prob) in results[0]:
r = {"label": label, "probability": float(prob)}
data["predictions"].append(r)
# indicate that the request was a success
data["success"] = True
# return the data dictionary as a JSON response
return flask.jsonify(data)
# if this is the main thread of execution first load the model and
# then start the server
if __name__ == "__main__":
print(("* Loading Keras model and Flask starting server..."
"please wait until server has fully started"))
load_model()
app.run(host='0.0.0.0')
| [
"tensorflow.compat.v1.get_default_graph",
"numpy.expand_dims",
"tensorflow.keras.applications.imagenet_utils.preprocess_input",
"tensorflow.keras.applications.imagenet_utils.decode_predictions",
"tensorflow.compat.v1.Session",
"tensorflow.keras.applications.ResNet50",
"tensorflow.python.keras.backend.set_session",
"tensorflow.python.framework.ops.disable_eager_execution",
"tensorflow.keras.preprocessing.image.img_to_array"
] | demo/test-app/app.py | [(20, 'tensorflow.python.framework.ops.disable_eager_execution', 'disable_eager_execution', ([], {}), False, 'from tensorflow.python.framework.ops import disable_eager_execution\n'), (23, 'flask.Flask', 'flask.Flask', (['__name__'], {}), False, 'import flask\n'), (26, 'tensorflow.compat.v1.Session', 'tf.compat.v1.Session', ([], {}), True, 'import tensorflow as tf\n'), (34, 'tensorflow.python.keras.backend.set_session', 'set_session', (['sess'], {}), False, 'from tensorflow.python.keras.backend import set_session\n'), (35, 'tensorflow.keras.applications.ResNet50', 'ResNet50', ([], {'weights': '"""imagenet"""'}), False, 'from tensorflow.keras.applications import ResNet50\n'), (37, 'tensorflow.compat.v1.get_default_graph', 'tf.compat.v1.get_default_graph', ([], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.keras.preprocessing.image.img_to_array', 'img_to_array', (['image'], {}), False, 'from tensorflow.keras.preprocessing.image import img_to_array\n'), (47, 'numpy.expand_dims', 'np.expand_dims', (['image'], {'axis': '(0)'}), True, 'import numpy as np\n'), (48, 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'imagenet_utils.preprocess_input', (['image'], {}), False, 'from tensorflow.keras.applications import imagenet_utils\n'), (88, 'flask.jsonify', 'flask.jsonify', (['data'], {}), False, 'import flask\n'), (61, 'flask.request.files.get', 'flask.request.files.get', (['"""image"""'], {}), False, 'import flask\n'), (64, 'io.BytesIO', 'io.BytesIO', (['image'], {}), False, 'import io\n'), (73, 'tensorflow.python.keras.backend.set_session', 'set_session', (['sess'], {}), False, 'from tensorflow.python.keras.backend import set_session\n'), (75, 'tensorflow.keras.applications.imagenet_utils.decode_predictions', 'imagenet_utils.decode_predictions', (['preds'], {}), False, 'from tensorflow.keras.applications import imagenet_utils\n')] |
n-hutton/colearn | 4e1257dae1316a4366a745fa965ea5e28d0ead14 | # ------------------------------------------------------------------------------
#
# Copyright 2021 Fetch.AI Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# ------------------------------------------------------------------------------
import os
import pickle
import tempfile
from pathlib import Path
from typing import Tuple, List, Optional
import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
from tensorflow.python.data.ops.dataset_ops import PrefetchDataset
from colearn.utils.data import split_list_into_fractions
from colearn_keras.keras_learner import KerasLearner
from colearn_keras.utils import normalize_img
from colearn_grpc.factory_registry import FactoryRegistry
IMAGE_FL = "images.pickle"
LABEL_FL = "labels.pickle"
def _get_keras_cifar10_conv2D_model(learning_rate: float) -> tf.keras.Model:
"""
2D Convolutional model for image recognition
:param learning_rate: Learning rate for optimiser
:return: Return instance of Keras model
"""
loss = "sparse_categorical_crossentropy"
optimizer = tf.keras.optimizers.Adam
input_img = tf.keras.Input(
shape=(32, 32, 3), name="Input"
)
x = tf.keras.layers.Conv2D(
32, (5, 5), activation="relu", padding="same", name="Conv1_1"
)(input_img)
x = tf.keras.layers.MaxPooling2D((2, 2), name="pool1")(x)
x = tf.keras.layers.Conv2D(
32, (5, 5), activation="relu", padding="same", name="Conv2_1"
)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), name="pool2")(x)
x = tf.keras.layers.Conv2D(
64, (5, 5), activation="relu", padding="same", name="Conv3_1"
)(x)
x = tf.keras.layers.MaxPooling2D((2, 2), name="pool3")(x)
x = tf.keras.layers.Flatten(name="flatten")(x)
x = tf.keras.layers.Dense(
64, activation="relu", name="fc1"
)(x)
x = tf.keras.layers.Dense(
10, activation="softmax", name="fc2"
)(x)
model = tf.keras.Model(inputs=input_img, outputs=x)
opt = optimizer(
lr=learning_rate
)
model.compile(
loss=loss,
metrics=[tf.keras.metrics.SparseCategoricalAccuracy()],
optimizer=opt)
return model
@FactoryRegistry.register_model_architecture("KERAS_CIFAR10", ["KERAS_CIFAR10"])
def prepare_learner(data_loaders: Tuple[PrefetchDataset, PrefetchDataset],
steps_per_epoch: int = 100,
vote_batches: int = 10,
learning_rate: float = 0.001,
**_kwargs) -> KerasLearner:
"""
Creates new instance of KerasLearner
:param data_loaders: Tuple of train_loader and test_loader
:param steps_per_epoch: Number of batches per training epoch
:param vote_batches: Number of batches to get vote_score
:param learning_rate: Learning rate for optimiser
:param _kwargs: Residual parameters not used by this function
:return: New instance of KerasLearner
"""
learner = KerasLearner(
model=_get_keras_cifar10_conv2D_model(learning_rate),
train_loader=data_loaders[0],
test_loader=data_loaders[1],
criterion="sparse_categorical_accuracy",
minimise_criterion=False,
model_fit_kwargs={"steps_per_epoch": steps_per_epoch},
model_evaluate_kwargs={"steps": vote_batches},
)
return learner
def _make_loader(images: np.array,
labels: np.array,
batch_size: int) -> PrefetchDataset:
"""
Converts array of images and labels to Tensorflow dataset
:param images: Numpy array of input data
:param labels: Numpy array of output labels
:param batch_size: Batch size
:return: Shuffled Tensorflow prefetch dataset holding images and labels
"""
dataset = tf.data.Dataset.from_tensor_slices((images, labels))
n_datapoints = images.shape[0]
dataset = dataset.cache()
dataset = dataset.shuffle(n_datapoints)
dataset = dataset.batch(batch_size)
dataset = dataset.prefetch(tf.data.experimental.AUTOTUNE)
return dataset
@FactoryRegistry.register_dataloader("KERAS_CIFAR10")
def prepare_data_loaders(train_folder: str,
train_ratio: float = 0.9,
batch_size: int = 32,
**_kwargs) -> Tuple[PrefetchDataset, PrefetchDataset]:
"""
Load training data from folders and create train and test dataloader
:param train_folder: Path to training dataset
:param train_ratio: What portion of train_data should be used as test set
:param batch_size:
:param _kwargs: Residual parameters not used by this function
:return: Tuple of train_loader and test_loader
"""
images = pickle.load(open(Path(train_folder) / IMAGE_FL, "rb"))
labels = pickle.load(open(Path(train_folder) / LABEL_FL, "rb"))
n_cases = int(train_ratio * len(images))
train_loader = _make_loader(images[:n_cases], labels[:n_cases], batch_size)
test_loader = _make_loader(images[n_cases:], labels[n_cases:], batch_size)
return train_loader, test_loader
def split_to_folders(
n_learners: int,
data_split: Optional[List[float]] = None,
shuffle_seed: Optional[int] = None,
output_folder: Optional[Path] = None,
**_kwargs
) -> List[str]:
"""
Loads images with labels and splits them to specified number of subsets
:param n_learners: Number of parts for splitting
:param data_split: List of percentage portions for each subset
:param shuffle_seed: Seed for shuffling
:param output_folder: Folder where splitted parts will be stored as numbered subfolders
:param _kwargs: Residual parameters not used by this function
:return: List of folders containing individual subsets
"""
if output_folder is None:
output_folder = Path(tempfile.gettempdir()) / "cifar10"
if data_split is None:
data_split = [1 / n_learners] * n_learners
# Load CIFAR10 from tfds
train_dataset, info = tfds.load('cifar10', split='train+test', as_supervised=True, with_info=True)
n_datapoints = info.splits['train+test'].num_examples
train_dataset = train_dataset.map(normalize_img).batch(n_datapoints)
# there is only one batch in the iterator, and this contains all the data
all_data = next(iter(tfds.as_numpy(train_dataset)))
all_images = all_data[0]
all_labels = all_data[1]
np.random.seed(shuffle_seed)
random_indices = np.random.permutation(np.arange(n_datapoints))
split_indices = split_list_into_fractions(random_indices, data_split)
local_output_dir = Path(output_folder)
dir_names = []
for i in range(n_learners):
dir_name = local_output_dir / str(i)
os.makedirs(str(dir_name), exist_ok=True)
learner_images = all_images[split_indices[i]]
learner_labels = all_labels[split_indices[i]]
pickle.dump(learner_images, open(dir_name / IMAGE_FL, "wb"))
pickle.dump(learner_labels, open(dir_name / LABEL_FL, "wb"))
dir_names.append(dir_name)
print(dir_names)
return [str(x) for x in dir_names]
| [
"tensorflow.keras.Input",
"numpy.random.seed",
"tensorflow.keras.layers.Dense",
"tensorflow.data.Dataset.from_tensor_slices",
"tensorflow.keras.layers.Conv2D",
"numpy.arange",
"tensorflow.keras.Model",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Flatten"
] | colearn_keras/keras_cifar10.py | [(81, 'colearn_grpc.factory_registry.FactoryRegistry.register_model_architecture', 'FactoryRegistry.register_model_architecture', (['"""KERAS_CIFAR10"""', "['KERAS_CIFAR10']"], {}), False, 'from colearn_grpc.factory_registry import FactoryRegistry\n'), (129, 'colearn_grpc.factory_registry.FactoryRegistry.register_dataloader', 'FactoryRegistry.register_dataloader', (['"""KERAS_CIFAR10"""'], {}), False, 'from colearn_grpc.factory_registry import FactoryRegistry\n'), (47, 'tensorflow.keras.Input', 'tf.keras.Input', ([], {'shape': '(32, 32, 3)', 'name': '"""Input"""'}), True, 'import tensorflow as tf\n'), (69, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'input_img', 'outputs': 'x'}), True, 'import tensorflow as tf\n'), (118, 'tensorflow.data.Dataset.from_tensor_slices', 'tf.data.Dataset.from_tensor_slices', (['(images, labels)'], {}), True, 'import tensorflow as tf\n'), (177, 'tensorflow_datasets.load', 'tfds.load', (['"""cifar10"""'], {'split': '"""train+test"""', 'as_supervised': '(True)', 'with_info': '(True)'}), True, 'import tensorflow_datasets as tfds\n'), (186, 'numpy.random.seed', 'np.random.seed', (['shuffle_seed'], {}), True, 'import numpy as np\n'), (188, 'colearn.utils.data.split_list_into_fractions', 'split_list_into_fractions', (['random_indices', 'data_split'], {}), False, 'from colearn.utils.data import split_list_into_fractions\n'), (190, 'pathlib.Path', 'Path', (['output_folder'], {}), False, 'from pathlib import Path\n'), (50, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""Conv1_1"""'}), True, 'import tensorflow as tf\n'), (53, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {'name': '"""pool1"""'}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(32)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""Conv2_1"""'}), True, 'import tensorflow as tf\n'), (57, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {'name': '"""pool2"""'}), True, 'import tensorflow as tf\n'), (58, 'tensorflow.keras.layers.Conv2D', 'tf.keras.layers.Conv2D', (['(64)', '(5, 5)'], {'activation': '"""relu"""', 'padding': '"""same"""', 'name': '"""Conv3_1"""'}), True, 'import tensorflow as tf\n'), (61, 'tensorflow.keras.layers.MaxPooling2D', 'tf.keras.layers.MaxPooling2D', (['(2, 2)'], {'name': '"""pool3"""'}), True, 'import tensorflow as tf\n'), (62, 'tensorflow.keras.layers.Flatten', 'tf.keras.layers.Flatten', ([], {'name': '"""flatten"""'}), True, 'import tensorflow as tf\n'), (63, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(64)'], {'activation': '"""relu"""', 'name': '"""fc1"""'}), True, 'import tensorflow as tf\n'), (66, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(10)'], {'activation': '"""softmax"""', 'name': '"""fc2"""'}), True, 'import tensorflow as tf\n'), (187, 'numpy.arange', 'np.arange', (['n_datapoints'], {}), True, 'import numpy as np\n'), (182, 'tensorflow_datasets.as_numpy', 'tfds.as_numpy', (['train_dataset'], {}), True, 'import tensorflow_datasets as tfds\n'), (76, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {}), True, 'import tensorflow as tf\n'), (144, 'pathlib.Path', 'Path', (['train_folder'], {}), False, 'from pathlib import Path\n'), (145, 'pathlib.Path', 'Path', (['train_folder'], {}), False, 'from pathlib import Path\n'), (171, 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), False, 'import tempfile\n')] |
prashantramnani/nn_likelihoods | 94e7a1d8fdf8c4e635eeaa66a7e941aa6b226f41 | import tensorflow as tf
from tensorflow import keras
import os
import pandas as pd
# Function asks for a dictionary as input with the following keys (and associated datatypes)
# params = {'input_shape': 3,
# 'output_shape': 1,
# 'output_activation': 'sigmoid',
# 'hidden_layers': [20, 20, 20],
# 'hidden_activations': ['relu', 'relu', 'relu'],
# 'l1_activation': [0.0, 0.0, 0.0],
# 'l2_activation': [0.0, 0.0, 0.0],
# 'l1_kernel': [0.0, 0.0, 0.0, 0.0],
# 'l2_kernel': [0.0, 0.0, 0.0, 0.0],
# 'optimizer': 'Nadam',
# 'loss': 'mse',
# 'metrics': ['mse'],
# 'batch_size': 100,
# 'max_epoch': 1000,
# 'eval_after_n_epochs': 10,
# 'data_type': 'choice_probabilities',
# 'model_directory': '',
# 'training_data_size': 'online'
# }
def keras_model_generate(params = {}):
# This returns a tensor
inputs = keras.layers.Input(shape = (params['input_shape'], ))
# Model hidden
op = keras.layers.Dense(params['hidden_layers'][0],
activation = params['hidden_activations'][0],
kernel_regularizer = keras.regularizers.l1_l2(l1 = params['l1_kernel'][0],
l2 = params['l2_kernel'][0]),
activity_regularizer = keras.regularizers.l1_l2(l1 = params['l1_activation'][0],
l2 = params['l2_activation'][0])
)(inputs)
for cnt in range(1, len(params['hidden_layers']), 1):
op = keras.layers.Dense(params['hidden_layers'][cnt],
activation = params['hidden_activations'][cnt],
kernel_regularizer = keras.regularizers.l1_l2(l1 = params['l1_kernel'][cnt],
l2 = params['l2_kernel'][cnt]),
activity_regularizer = keras.regularizers.l1_l2(l1 = params['l1_activation'][cnt],
l2 = params['l2_activation'][cnt]))(op)
# Model output
outputs = keras.layers.Dense(params['output_shape'], params['output_activation'])(op)
# Make model
model = keras.models.Model(inputs = inputs, outputs = outputs)
model.compile(
optimizer = model_params['optimizer'],
loss = model_params['loss'],
metrics = model_params['metrics']
)
return model
| [
"tensorflow.keras.layers.Dense",
"tensorflow.keras.regularizers.l1_l2",
"tensorflow.keras.models.Model",
"tensorflow.keras.layers.Input"
] | dnnreg_model_keras.py | [(33, 'tensorflow.keras.layers.Input', 'keras.layers.Input', ([], {'shape': "(params['input_shape'],)"}), False, 'from tensorflow import keras\n'), (56, 'tensorflow.keras.models.Model', 'keras.models.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), False, 'from tensorflow import keras\n'), (53, 'tensorflow.keras.layers.Dense', 'keras.layers.Dense', (["params['output_shape']", "params['output_activation']"], {}), False, 'from tensorflow import keras\n'), (38, 'tensorflow.keras.regularizers.l1_l2', 'keras.regularizers.l1_l2', ([], {'l1': "params['l1_kernel'][0]", 'l2': "params['l2_kernel'][0]"}), False, 'from tensorflow import keras\n'), (40, 'tensorflow.keras.regularizers.l1_l2', 'keras.regularizers.l1_l2', ([], {'l1': "params['l1_activation'][0]", 'l2': "params['l2_activation'][0]"}), False, 'from tensorflow import keras\n'), (47, 'tensorflow.keras.regularizers.l1_l2', 'keras.regularizers.l1_l2', ([], {'l1': "params['l1_kernel'][cnt]", 'l2': "params['l2_kernel'][cnt]"}), False, 'from tensorflow import keras\n'), (49, 'tensorflow.keras.regularizers.l1_l2', 'keras.regularizers.l1_l2', ([], {'l1': "params['l1_activation'][cnt]", 'l2': "params['l2_activation'][cnt]"}), False, 'from tensorflow import keras\n')] |
jonarani/IAS0360_project | 1f8182dcf6edcba6607a0f287fa3fbaab05f50fd | import json
from PIL.Image import SEQUENCE
import matplotlib
import matplotlib.pyplot as plt
from numpy.random.mtrand import shuffle
import cv2
import numpy as np
import scipy.ndimage as scpy
from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten
from tensorflow.keras.callbacks import EarlyStopping
import tensorflow as tf
import sys
import random
import os
# when printing numpy array then all will be printed
#np.set_printoptions(threshold=sys.maxsize)
# 32 x 32
IMG_HEIGHT = 32
IMG_WIDTH = 32
# Due to sensor placement it seems that first rows are always
# cold and unable to detect humans
DEL_ROW_AMNT = 8
# IMG_Y_RESIZED = int((IMG_HEIGHT - DEL_ROW_AMNT) * 0.75)
# IMG_X_RESIZED = int(IMG_WIDTH * 2.0 * 0.75)
IMG_Y_RESIZED = IMG_HEIGHT - DEL_ROW_AMNT
IMG_X_RESIZED = IMG_WIDTH
# Sensor 3078
S3078_FILE = '../dataset/thermal_raw_20210507_full/20210507_1605_3078.txt'
# Sensor C088
SC088_FILE = '../dataset/thermal_raw_20210507_full/20210507_1605_C088.txt'
s3078_data_arr = []
sc088_data_arr = []
human_images = []
background_images = []
x_train = []
y_train = []
x_test = []
y_test = []
def readSensorData():
s3078_file = open(S3078_FILE, 'r')
sc088_file = open(SC088_FILE, 'r')
counter = 0
while True:
counter = counter + 1
# Get one sample from the file
s3078_sample = s3078_file.readline()
sc088_sample = sc088_file.readline()
# eof
if (not s3078_sample or not sc088_sample):
break
if (counter % 4 == 0):
# Convert sample into json form so it would be easier to parse
s3078_json = json.loads(s3078_sample)
sc088_json = json.loads(sc088_sample)
# Get the data part from the sample
s3078_data = np.array(s3078_json["data"])
sc088_data = np.array(sc088_json["data"])
s3078_data = np.delete(s3078_data, np.s_[0:DEL_ROW_AMNT], 0)
sc088_data = np.delete(sc088_data, np.s_[0:DEL_ROW_AMNT], 0)
s3078_data_arr.append(s3078_data)
sc088_data_arr.append(sc088_data)
# close sensor txt file
s3078_file.close()
sc088_file.close()
def removeHotPixels(img):
image = np.copy(img)
mean_temp = np.mean(image)
for i, row in enumerate(image):
for j, col in enumerate (row):
if (image[i][j] > mean_temp):
rand_float = (np.random.random() / 2) - 0.25
image[i][j] = mean_temp - 0.5 + rand_float
return image
def dataAugmentation():
for sample in s3078_data_arr:
# Human images
human_images.append(sample)
sample_cpy = np.copy(sample)
sample_cpy = scpy.median_filter(sample_cpy, size=(3,3))
human_images.append(sample_cpy)
sample_cpy = np.copy(sample)
sample_cpy = np.flip(sample_cpy, 1)
human_images.append(sample_cpy)
sample_cpy = scpy.median_filter(sample_cpy, size=(3,3))
human_images.append(sample_cpy)
# Background images
sample_no_hot_pixels = removeHotPixels(sample)
background_images.append(sample_no_hot_pixels)
sample_no_hot_pixels_filtered = scpy.median_filter(sample_no_hot_pixels, size=(3,3))
background_images.append(sample_no_hot_pixels_filtered)
np.random.shuffle(sample_no_hot_pixels)
background_images.append(sample_no_hot_pixels)
sample_no_hot_pixels_filtered = scpy.median_filter(sample_no_hot_pixels, size=(3,3))
background_images.append(sample_no_hot_pixels_filtered)
for sample in sc088_data_arr:
# Human images
human_images.append(sample)
sample_cpy = np.copy(sample)
sample_cpy = scpy.median_filter(sample_cpy, size=(3,3))
human_images.append(sample_cpy)
sample_cpy = np.copy(sample)
sample_cpy = np.flip(sample_cpy, 1)
human_images.append(sample_cpy)
sample_cpy = scpy.median_filter(sample_cpy, size=(3,3))
human_images.append(sample_cpy)
# Background images
sample_no_hot_pixels = removeHotPixels(sample)
background_images.append(sample_no_hot_pixels)
sample_no_hot_pixels_filtered = scpy.median_filter(sample_no_hot_pixels, size=(3,3))
background_images.append(sample_no_hot_pixels_filtered)
np.random.shuffle(sample_no_hot_pixels)
background_images.append(sample_no_hot_pixels)
sample_no_hot_pixels_filtered = scpy.median_filter(sample_no_hot_pixels, size=(3,3))
background_images.append(sample_no_hot_pixels_filtered)
def storeImages():
for i, img in enumerate(human_images):
# Multiplied by 10 in order not to lose precision
# For example 13.4 will be 134 rather than 13
img = img * 10
cv2.imwrite("./imgs_human/img{}.png".format(i), img)
# Resize images to be smaller
#img = cv2.imread("imgs_human/img{}.png".format(i))
#res = cv2.resize(img, (IMG_X_RESIZED, IMG_Y_RESIZED), interpolation = cv2.INTER_CUBIC)
#cv2.imwrite("imgs_human_resized/img{}.png".format(i), img)
for i, img in enumerate(background_images):
# Multiplied by 10 in order not to lose precision
# For example 13.4 will be 134 rather than 13
img = img * 10
cv2.imwrite("./imgs_background/img{}.png".format(i), img)
# Resize images to be smaller
#img = cv2.imread("imgs_background/img{}.png".format(i))
#res = cv2.resize(img, (IMG_X_RESIZED, IMG_Y_RESIZED), interpolation = cv2.INTER_CUBIC)
#cv2.imwrite("imgs_background_resized/img{}.png".format(i), img)
def prepareDataForTraining():
global x_train
global y_train
global x_test
global y_test
training_data_prct = 0.8
img_label_tuple = []
for idx, im in enumerate(os.listdir("imgs_human/")):
try:
img_array = cv2.imread(os.path.join("imgs_human/", im))
# Remove third dimension and divide by 10 to get original temp array
img_array = np.array(img_array[:, :, 0]) / 10
img_label_tuple.append((img_array, 1))
except Exception as e:
print("EXCEPTION")
pass
for idx, im in enumerate(os.listdir("imgs_background/")):
try:
img_array = cv2.imread(os.path.join("imgs_background/", im))
# Remove third dimension and divide by 10 to get original temp array
img_array = np.array(img_array[:, :, 0]) / 10
img_label_tuple.append((img_array, 0))
except Exception as e:
print("EXCEPTION")
pass
random.shuffle(img_label_tuple)
imgs, labels = zip(*img_label_tuple)
training_amount = int((len(imgs) * training_data_prct))
validation_amount = len(imgs) - training_amount
x_train = np.array(imgs[:training_amount])
y_train = np.array(labels[:training_amount])
x_test = np.array(imgs[(-validation_amount):])
y_test = np.array(labels[(-validation_amount):])
# Normalize everything
# x_train = tf.keras.utils.normalize(x_train)
# x_test = tf.keras.utils.normalize(x_test)
# TODO: something more reasonable perhaps
x_train = x_train / 255
x_test = x_test / 255
x_train = np.array(x_train).reshape((-1, IMG_Y_RESIZED, IMG_X_RESIZED, 1))
x_test = np.array(x_test).reshape((-1, IMG_Y_RESIZED, IMG_X_RESIZED, 1))
# TODO maybe: https://bleedai.com/human-activity-recognition-using-tensorflow-cnn-lstm/
def train():
model = tf.keras.models.Sequential()
model.add(Conv2D(32, kernel_size=(3,3), padding='same', activation='relu', input_shape=(IMG_Y_RESIZED, IMG_X_RESIZED, 1)))
model.add(Conv2D(32, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(64, kernel_size=(3,3), padding='same', activation='relu'))
model.add(Conv2D(64, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=2))
model.add(Dropout(0.5))
model.add(Conv2D(128, kernel_size=(3,3), padding='same', activation='relu'))
#model.add(Conv2D(256, kernel_size=(3,3), padding='same', activation='relu'))
model.add(MaxPool2D(pool_size=(2,2), strides=2))
model.add(Dropout(0.5))
model.add(Flatten())
model.add(tf.keras.layers.Dense(2))
model.summary()
# Define parameters for training the model
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=1e-4),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), # BinaryCrossentropy
metrics=['accuracy'])
# Train model - Adjust model parameters to minimize the loss and train it
model.fit(x_train, y_train, epochs=2, batch_size=32)
# Evaluate model performance
val_loss, val_acc = model.evaluate(x_test, y_test)
print ("Validation evaluation results: loss - ", format(val_loss, '.3f'), "accuracy - ", format(val_acc, '.3f'))
model.save('models/my_mnist.model')
return model
def convertToTfLite(model):
# https://www.tensorflow.org/lite/convert/index
converter = tf.lite.TFLiteConverter.from_keras_model(model)
converter.optimizations = [tf.lite.Optimize.DEFAULT]
tflite_model = converter.convert()
with open('models/model.tflite', 'wb') as f:
f.write(tflite_model)
def runSomeInferenceTests(model):
# TODO: run it on some unseen data
predictions = model.predict(x_train[:10])
print (y_train[:10])
print (predictions)
def main():
readSensorData()
dataAugmentation()
storeImages()
prepareDataForTraining()
model = train()
convertToTfLite(model)
runSomeInferenceTests(model)
if __name__ == "__main__":
main()
# Write image to .txt file as C array
# with open('background.txt', 'w') as f:
# counter = 0
# for item in background_images:
# for i in item:
# f.write("{")
# for j in i:
# f.write("%.4s, " % j)
# f.write("},\n")
# f.write("\n")
# counter = counter +1
# print (item)
# if (counter >= 5):
# break | [
"tensorflow.lite.TFLiteConverter.from_keras_model",
"numpy.random.random",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.keras.layers.Dense",
"tensorflow.keras.layers.Conv2D",
"scipy.ndimage.median_filter",
"tensorflow.keras.layers.MaxPool2D",
"numpy.random.shuffle",
"numpy.copy",
"numpy.delete",
"numpy.mean",
"tensorflow.keras.optimizers.Adam",
"numpy.flip",
"tensorflow.keras.layers.Dropout",
"numpy.array",
"tensorflow.keras.models.Sequential",
"tensorflow.keras.layers.Flatten"
] | source/preprocess_and_train.py | [(84, 'numpy.copy', 'np.copy', (['img'], {}), True, 'import numpy as np\n'), (85, 'numpy.mean', 'np.mean', (['image'], {}), True, 'import numpy as np\n'), (203, 'random.shuffle', 'random.shuffle', (['img_label_tuple'], {}), False, 'import random\n'), (209, 'numpy.array', 'np.array', (['imgs[:training_amount]'], {}), True, 'import numpy as np\n'), (210, 'numpy.array', 'np.array', (['labels[:training_amount]'], {}), True, 'import numpy as np\n'), (211, 'numpy.array', 'np.array', (['imgs[-validation_amount:]'], {}), True, 'import numpy as np\n'), (212, 'numpy.array', 'np.array', (['labels[-validation_amount:]'], {}), True, 'import numpy as np\n'), (228, 'tensorflow.keras.models.Sequential', 'tf.keras.models.Sequential', ([], {}), True, 'import tensorflow as tf\n'), (267, 'tensorflow.lite.TFLiteConverter.from_keras_model', 'tf.lite.TFLiteConverter.from_keras_model', (['model'], {}), True, 'import tensorflow as tf\n'), (98, 'numpy.copy', 'np.copy', (['sample'], {}), True, 'import numpy as np\n'), (99, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_cpy'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (102, 'numpy.copy', 'np.copy', (['sample'], {}), True, 'import numpy as np\n'), (103, 'numpy.flip', 'np.flip', (['sample_cpy', '(1)'], {}), True, 'import numpy as np\n'), (106, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_cpy'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (113, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_no_hot_pixels'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (116, 'numpy.random.shuffle', 'np.random.shuffle', (['sample_no_hot_pixels'], {}), True, 'import numpy as np\n'), (119, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_no_hot_pixels'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (126, 'numpy.copy', 'np.copy', (['sample'], {}), True, 'import numpy as np\n'), (127, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_cpy'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (130, 'numpy.copy', 'np.copy', (['sample'], {}), True, 'import numpy as np\n'), (131, 'numpy.flip', 'np.flip', (['sample_cpy', '(1)'], {}), True, 'import numpy as np\n'), (134, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_cpy'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (141, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_no_hot_pixels'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (144, 'numpy.random.shuffle', 'np.random.shuffle', (['sample_no_hot_pixels'], {}), True, 'import numpy as np\n'), (147, 'scipy.ndimage.median_filter', 'scpy.median_filter', (['sample_no_hot_pixels'], {'size': '(3, 3)'}), True, 'import scipy.ndimage as scpy\n'), (183, 'os.listdir', 'os.listdir', (['"""imgs_human/"""'], {}), False, 'import os\n'), (193, 'os.listdir', 'os.listdir', (['"""imgs_background/"""'], {}), False, 'import os\n'), (230, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""', 'input_shape': '(IMG_Y_RESIZED, IMG_X_RESIZED, 1)'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (231, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (232, 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (233, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (235, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (236, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (237, 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (238, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (240, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(128)'], {'kernel_size': '(3, 3)', 'padding': '"""same"""', 'activation': '"""relu"""'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (242, 'tensorflow.keras.layers.MaxPool2D', 'MaxPool2D', ([], {'pool_size': '(2, 2)', 'strides': '(2)'}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (243, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (245, 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), False, 'from tensorflow.keras.layers import Conv2D, MaxPool2D, Dropout, BatchNormalization, Flatten\n'), (246, 'tensorflow.keras.layers.Dense', 'tf.keras.layers.Dense', (['(2)'], {}), True, 'import tensorflow as tf\n'), (66, 'json.loads', 'json.loads', (['s3078_sample'], {}), False, 'import json\n'), (67, 'json.loads', 'json.loads', (['sc088_sample'], {}), False, 'import json\n'), (70, 'numpy.array', 'np.array', (["s3078_json['data']"], {}), True, 'import numpy as np\n'), (71, 'numpy.array', 'np.array', (["sc088_json['data']"], {}), True, 'import numpy as np\n'), (73, 'numpy.delete', 'np.delete', (['s3078_data', 'np.s_[0:DEL_ROW_AMNT]', '(0)'], {}), True, 'import numpy as np\n'), (74, 'numpy.delete', 'np.delete', (['sc088_data', 'np.s_[0:DEL_ROW_AMNT]', '(0)'], {}), True, 'import numpy as np\n'), (222, 'numpy.array', 'np.array', (['x_train'], {}), True, 'import numpy as np\n'), (223, 'numpy.array', 'np.array', (['x_test'], {}), True, 'import numpy as np\n'), (251, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'learning_rate': '(0.0001)'}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)'}), True, 'import tensorflow as tf\n'), (185, 'os.path.join', 'os.path.join', (['"""imgs_human/"""', 'im'], {}), False, 'import os\n'), (187, 'numpy.array', 'np.array', (['img_array[:, :, (0)]'], {}), True, 'import numpy as np\n'), (195, 'os.path.join', 'os.path.join', (['"""imgs_background/"""', 'im'], {}), False, 'import os\n'), (197, 'numpy.array', 'np.array', (['img_array[:, :, (0)]'], {}), True, 'import numpy as np\n'), (89, 'numpy.random.random', 'np.random.random', ([], {}), True, 'import numpy as np\n')] |
riverliuc/transformers | 3f51e6a35871fefbdfb705902355d7530a72d1b8 | # coding=utf-8
# Copyright 2018 The Google AI Language Team Authors and The HuggingFace Inc. team.
# Copyright (c) 2018, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""TF general model utils."""
import functools
import inspect
import os
import re
import warnings
from typing import Dict, List, Optional, Union
import h5py
import numpy as np
import tensorflow as tf
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.saving import hdf5_format
from .configuration_utils import PretrainedConfig
from .file_utils import (
DUMMY_INPUTS,
TF2_WEIGHTS_NAME,
WEIGHTS_NAME,
ModelOutput,
PushToHubMixin,
cached_path,
copy_func,
hf_bucket_url,
is_offline_mode,
is_remote_url,
)
from .generation_tf_utils import TFGenerationMixin
from .tokenization_utils_base import BatchEncoding
from .utils import logging
logger = logging.get_logger(__name__)
tf_logger = tf.get_logger()
TFModelInputType = Union[
List[tf.Tensor], List[np.ndarray], Dict[str, tf.Tensor], Dict[str, np.ndarray], np.ndarray, tf.Tensor
]
class TFModelUtilsMixin:
"""
A few utilities for :obj:`tf.keras.Model`, to be used as a mixin.
"""
def num_parameters(self, only_trainable: bool = False) -> int:
"""
Get the number of (optionally, trainable) parameters in the model.
Args:
only_trainable (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to return only the number of trainable parameters
Returns:
:obj:`int`: The number of parameters.
"""
if only_trainable:
return int(sum(np.prod(w.shape.as_list()) for w in self.trainable_variables))
else:
return self.count_params()
def keras_serializable(cls):
"""
Decorate a Keras Layer class to support Keras serialization.
This is done by:
1. Adding a :obj:`transformers_config` dict to the Keras config dictionary in :obj:`get_config` (called by Keras at
serialization time.
2. Wrapping :obj:`__init__` to accept that :obj:`transformers_config` dict (passed by Keras at deserialization
time) and convert it to a config object for the actual layer initializer.
3. Registering the class as a custom object in Keras (if the Tensorflow version supports this), so that it does not
need to be supplied in :obj:`custom_objects` in the call to :obj:`tf.keras.models.load_model`.
Args:
cls (a :obj:`tf.keras.layers.Layers subclass`):
Typically a :obj:`TF.MainLayer` class in this project, in general must accept a :obj:`config` argument to
its initializer.
Returns:
The same class object, with modifications for Keras deserialization.
"""
initializer = cls.__init__
config_class = getattr(cls, "config_class", None)
if config_class is None:
raise AttributeError("Must set `config_class` to use @keras_serializable")
@functools.wraps(initializer)
def wrapped_init(self, *args, **kwargs):
config = args[0] if args and isinstance(args[0], PretrainedConfig) else kwargs.pop("config", None)
if isinstance(config, dict):
config = config_class.from_dict(config)
initializer(self, config, *args, **kwargs)
elif isinstance(config, PretrainedConfig):
if len(args) > 0:
initializer(self, *args, **kwargs)
else:
initializer(self, config, *args, **kwargs)
else:
raise ValueError("Must pass either `config` (PretrainedConfig) or `config` (dict)")
self._config = config
self._kwargs = kwargs
cls.__init__ = wrapped_init
if not hasattr(cls, "get_config"):
raise TypeError("Only use @keras_serializable on tf.keras.layers.Layer subclasses")
if hasattr(cls.get_config, "_is_default"):
def get_config(self):
cfg = super(cls, self).get_config()
cfg["config"] = self._config.to_dict()
cfg.update(self._kwargs)
return cfg
cls.get_config = get_config
cls._keras_serializable = True
if hasattr(tf.keras.utils, "register_keras_serializable"):
cls = tf.keras.utils.register_keras_serializable()(cls)
return cls
class TFCausalLanguageModelingLoss:
"""
Loss function suitable for causal language modeling (CLM), that is, the task of guessing the next token.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100 affect the loss
active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFQuestionAnsweringLoss:
"""
Loss function suitable for question answering.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
start_loss = loss_fn(labels["start_position"], logits[0])
end_loss = loss_fn(labels["end_position"], logits[1])
return (start_loss + end_loss) / 2.0
class TFTokenClassificationLoss:
"""
Loss function suitable for token classification.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
if tf.math.reduce_any(labels == -1):
warnings.warn("Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead.")
active_loss = tf.reshape(labels, (-1,)) != -1
else:
active_loss = tf.reshape(labels, (-1,)) != -100
reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, shape_list(logits)[2])), active_loss)
labels = tf.boolean_mask(tf.reshape(labels, (-1,)), active_loss)
return loss_fn(labels, reduced_logits)
class TFSequenceClassificationLoss:
"""
Loss function suitable for sequence classification.
"""
def compute_loss(self, labels, logits):
if len(shape_list(logits)) == 1 or shape_list(logits)[1] == 1:
loss_fn = tf.keras.losses.MeanSquaredError(reduction=tf.keras.losses.Reduction.NONE)
else:
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
return loss_fn(labels, logits)
class TFMultipleChoiceLoss(TFSequenceClassificationLoss):
"""Loss function suitable for multiple choice tasks."""
class TFMaskedLanguageModelingLoss(TFCausalLanguageModelingLoss):
"""
Loss function suitable for masked language modeling (MLM), that is, the task of guessing the masked tokens.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
class TFNextSentencePredictionLoss:
"""
Loss function suitable for next sentence prediction (NSP), that is, the task of guessing the next sentence.
.. note::
Any label of -100 will be ignored (along with the corresponding logits) in the loss computation.
"""
def compute_loss(self, labels, logits):
loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(
from_logits=True, reduction=tf.keras.losses.Reduction.NONE
)
# make sure only labels that are not equal to -100
# are taken into account as loss
next_sentence_active_loss = tf.not_equal(tf.reshape(labels, (-1,)), -100)
next_sentence_reduced_logits = tf.boolean_mask(tf.reshape(logits, (-1, 2)), next_sentence_active_loss)
next_sentence_label = tf.boolean_mask(tf.reshape(labels, (-1,)), next_sentence_active_loss)
return loss_fn(next_sentence_label, next_sentence_reduced_logits)
def booleans_processing(config, **kwargs):
"""
Process the input booleans of each model in order to be sure they are compliant with the execution mode (eager or
graph)
Args:
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The boolean parameters
Returns:
A dictionary with the proper values for each boolean
"""
final_booleans = {}
if tf.executing_eagerly():
final_booleans["output_attentions"] = (
kwargs["output_attentions"] if kwargs["output_attentions"] is not None else config.output_attentions
)
final_booleans["output_hidden_states"] = (
kwargs["output_hidden_states"]
if kwargs["output_hidden_states"] is not None
else config.output_hidden_states
)
final_booleans["return_dict"] = (
kwargs["return_dict"] if kwargs["return_dict"] is not None else config.return_dict
)
if "use_cache" in kwargs:
final_booleans["use_cache"] = kwargs["use_cache"] if kwargs["use_cache"] is not None else config.use_cache
else:
if (
kwargs["output_attentions"] is not None
or kwargs["output_hidden_states"] is not None
or ("use_cache" in kwargs and kwargs["use_cache"] is not None)
):
tf_logger.warning(
"The parameters `output_attentions`, `output_hidden_states` and `use_cache` cannot be updated when calling a model."
"They have to be set to True/False in the config object (i.e.: `config=XConfig.from_pretrained('name', output_attentions=True)`)."
)
final_booleans["output_attentions"] = config.output_attentions
final_booleans["output_hidden_states"] = config.output_hidden_states
if kwargs["return_dict"] is not None:
tf_logger.warning(
"The parameter `return_dict` cannot be set in graph mode and will always be set to `True`."
)
final_booleans["return_dict"] = True
if "use_cache" in kwargs:
final_booleans["use_cache"] = config.use_cache
return final_booleans
def input_processing(func, config, input_ids, **kwargs):
"""
Process the input of each TensorFlow model including the booleans. In case of a list of symbolic inputs, each input
has to be named accordingly to the parameters name, i.e. `input_ids = tf.keras.Input(shape=(128,), dtype='int32',
name="input_ids")` otherwise the order of the tensors will not be guaranteed during the training.
Args:
func (:obj:`callable`):
The callable function of the TensorFlow model.
config (:class:`~transformers.PretrainedConfig`):
The config of the running model.
**kwargs:
The inputs of the model.
Returns:
Two lists, one for the missing layers, and another one for the unexpected layers.
"""
signature = dict(inspect.signature(func).parameters)
signature.pop("kwargs", None)
signature.pop("self", None)
parameter_names = list(signature.keys())
output = {}
allowed_types = (tf.Tensor, bool, int, ModelOutput, tuple, list, dict, np.ndarray)
if "inputs" in kwargs["kwargs_call"]:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = kwargs["kwargs_call"].pop("inputs")
if "decoder_cached_states" in kwargs["kwargs_call"]:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = kwargs["kwargs_call"].pop("decoder_cached_states")
if len(kwargs["kwargs_call"]) > 0:
raise ValueError(
f"The following keyword arguments are not supported by this model: {list(kwargs['kwargs_call'].keys())}."
)
kwargs.pop("kwargs_call")
for k, v in kwargs.items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
if isinstance(input_ids, (tuple, list)):
for i, input in enumerate(input_ids):
# EagerTensors don't allow to use the .name property so we check for a real Tensor
if type(input) == tf.Tensor:
# Tensor names have always the pattern `name:id` then we check only the
# `name` part
tensor_name = input.name.split(":")[0]
if tensor_name in parameter_names:
output[tensor_name] = input
else:
output[parameter_names[i]] = input
elif isinstance(input, allowed_types) or input is None:
output[parameter_names[i]] = input
else:
raise ValueError(
f"Data of type {type(input)} is not allowed only {allowed_types} is accepted for {parameter_names[i]}."
)
elif isinstance(input_ids, (dict, BatchEncoding)):
if "inputs" in input_ids:
warnings.warn(
"The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead.",
FutureWarning,
)
output["input_ids"] = input_ids.pop("inputs")
if "decoder_cached_states" in input_ids:
warnings.warn(
"The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead.",
FutureWarning,
)
output["past_key_values"] = input_ids.pop("decoder_cached_states")
for k, v in dict(input_ids).items():
if isinstance(v, allowed_types) or v is None:
output[k] = v
elif k not in parameter_names and "args" not in parameter_names:
logger.warning(
f"The parameter {k} does not belongs to the parameter list {parameter_names} and will be ignored."
)
continue
else:
raise ValueError(f"Data of type {type(v)} is not allowed only {allowed_types} is accepted for {k}.")
else:
if isinstance(input_ids, tf.Tensor) or input_ids is None:
output[parameter_names[0]] = input_ids
else:
raise ValueError(
f"Data of type {type(input_ids)} is not allowed only {allowed_types} is accepted for {parameter_names[0]}."
)
for name in parameter_names:
if name not in list(output.keys()) and name != "args":
output[name] = kwargs.pop(name, signature[name].default)
# When creating a SavedModel TF calls the method with LayerCall.__call__(args, **kwargs)
# So to respect the proper output we have to add this exception
if "args" in output:
if output["args"] is not None and type(output["args"]) == tf.Tensor:
tensor_name = output["args"].name.split(":")[0]
output[tensor_name] = output["args"]
else:
# `args` in this case is always the first parameter, then `input_ids`
output["input_ids"] = output["args"]
del output["args"]
if "kwargs" in output:
del output["kwargs"]
boolean_dict = {
k: v
for k, v in output.items()
if k in ["return_dict", "output_attentions", "output_hidden_states", "use_cache"]
}
output.update(
booleans_processing(
config=config,
**boolean_dict,
)
)
return output
def load_tf_weights(model, resolved_archive_file, ignore_mismatched_sizes=False, _prefix=None):
"""
Detect missing and unexpected layers and load the TF weights accordingly to their names and shapes.
Args:
model (:obj:`tf.keras.models.Model`):
The model to load the weights into.
resolved_archive_file (:obj:`str`):
The location of the H5 file.
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to ignore weights with shapes that don't match between the checkpoint of the model.
Returns:
Three lists, one for the missing layers, another one for the unexpected layers, and a last one for the
mismatched layers.
"""
missing_layers = []
unexpected_layers = []
mismatched_layers = []
# Read the H5 file
with h5py.File(resolved_archive_file, "r") as f:
# Retrieve the name of each layer from the H5 file
saved_h5_model_layers_name = set(hdf5_format.load_attributes_from_hdf5_group(f, "layer_names"))
# Find the missing layers from the high level list of layers
missing_layers = list(set([layer.name for layer in model.layers]) - saved_h5_model_layers_name)
# Find the unexpected layers from the high level list of layers
unexpected_layers = list(saved_h5_model_layers_name - set([layer.name for layer in model.layers]))
saved_weight_names_set = set()
symbolic_weights_names = set()
weight_value_tuples = []
# Compute missing and unexpected sub layers
# Store the weights in list of tuples that looks like [(weight_object, value_of_weight),...]
for layer in model.layers:
# if layer_name from the H5 file belongs to the layers from the instantiated model
if layer.name in saved_h5_model_layers_name:
# Get the H5 layer object from its name
h5_layer_object = f[layer.name]
# Get all the weights as a list from the layer object
symbolic_weights = layer.trainable_weights + layer.non_trainable_weights
saved_weights = {}
# Create a dict from the H5 saved model that looks like {"weight_name": weight_value}
# And a set with only the names
for weight_name in hdf5_format.load_attributes_from_hdf5_group(h5_layer_object, "weight_names"):
# TF names always start with the model name so we ignore it
name = "/".join(weight_name.split("/")[1:])
if _prefix is not None:
name = _prefix + "/" + name
saved_weights[name] = np.asarray(h5_layer_object[weight_name])
# Add the updated name to the final list for computing missing/unexpected values
saved_weight_names_set.add(name)
# Loop over each weights from the instantiated model and compare with the weights from the H5 file
for symbolic_weight in symbolic_weights:
# TF names always start with the model name so we ignore it
if _prefix is not None:
delimeter = len(_prefix.split("/"))
symbolic_weight_name = "/".join(
symbolic_weight.name.split("/")[:delimeter]
+ symbolic_weight.name.split("/")[delimeter + 1 :]
)
else:
symbolic_weight_name = "/".join(symbolic_weight.name.split("/")[1:])
# here we check if the current weight is among the weights from the H5 file
# If yes, get the weight_value of the corresponding weight from the H5 file
# If not, make the value to None
saved_weight_value = saved_weights.get(symbolic_weight_name, None)
# Add the updated name to the final list for computing missing/unexpected values
symbolic_weights_names.add(symbolic_weight_name)
# If the current weight is found
if saved_weight_value is not None:
# Check if the shape of the current weight and the one from the H5 file are different
if K.int_shape(symbolic_weight) != saved_weight_value.shape:
# If yes we reshape the weight from the H5 file accordingly to the current weight
# If the two shapes are not compatible we raise an issue
try:
array = np.reshape(saved_weight_value, K.int_shape(symbolic_weight))
except ValueError as e:
if ignore_mismatched_sizes:
mismatched_layers.append(
(symbolic_weight_name, saved_weight_value.shape, K.int_shape(symbolic_weight))
)
continue
else:
raise e
else:
array = saved_weight_value
# We create the tuple that will be loaded and add it to the final list
weight_value_tuples.append((symbolic_weight, array))
# Load all the weights
K.batch_set_value(weight_value_tuples)
# Compute the missing and unexpected layers
missing_layers.extend(list(symbolic_weights_names - saved_weight_names_set))
unexpected_layers.extend(list(saved_weight_names_set - symbolic_weights_names))
return missing_layers, unexpected_layers, mismatched_layers
def init_copy_embeddings(old_embeddings, new_num_tokens):
r"""
This function aims to reduce the embeddings in case new_num_tokens < old_num_tokens or to pad with -1 in case
new_num_tokens > old_num_tokens. A mask is also computed in order to know which weight in the embeddings should be
kept or not. Example:
- if new_num_tokens=5 and old_num_tokens=4 and old_embeddings=[w1,w2,w3,w4]
- mask=[True,True,True,True,False] and current_weights=[w1,w2,w3,w4,-1]
- if new_num_tokens=4 and old_num_tokens=5 and old_embeddings=[w1,w2,w3,w4,w5]
- mask=[True,True,True,True] and current_weights=[w1,w2,w3,w4]
"""
old_num_tokens, old_embedding_dim = shape_list(old_embeddings)
size_diff = new_num_tokens - old_num_tokens
# initialize new embeddings
# Copy token embeddings from the previous ones
if tf.math.greater(size_diff, 0):
# if the new size is greater than the old one, we extend the current embeddings with a padding until getting new size
# and we create a mask to properly identify the padded values and be replaced by the values of the newly created
# embeddings
current_weights = tf.pad(
old_embeddings.value(), tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=-1
)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask = tf.fill(tf.convert_to_tensor([num_tokens_to_copy, 1]), True)
mask = tf.pad(mask, tf.convert_to_tensor([[0, size_diff], [0, 0]]), constant_values=False)
else:
# if the new size if lower than the old one, we take the current embeddings until the new size
current_weights = tf.slice(
old_embeddings.value(),
tf.convert_to_tensor([0, 0]),
tf.convert_to_tensor([new_num_tokens, old_embedding_dim]),
)
mask = tf.fill(tf.convert_to_tensor([new_num_tokens, 1]), True)
return mask, current_weights
class TFPreTrainedModel(tf.keras.Model, TFModelUtilsMixin, TFGenerationMixin, PushToHubMixin):
r"""
Base class for all TF models.
:class:`~transformers.TFPreTrainedModel` takes care of storing the configuration of the models and handles methods
for loading, downloading and saving models as well as a few methods common to all models to:
* resize the input embeddings,
* prune heads in the self-attention heads.
Class attributes (overridden by derived classes):
- **config_class** (:class:`~transformers.PretrainedConfig`) -- A subclass of
:class:`~transformers.PretrainedConfig` to use as configuration class for this model architecture.
- **base_model_prefix** (:obj:`str`) -- A string indicating the attribute associated to the base model in
derived classes of the same architecture adding modules on top of the base model.
"""
config_class = None
base_model_prefix = ""
# a list of re pattern of tensor names to ignore from the model when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_missing = None
# a list of re pattern of tensor names to ignore from the weights when loading the model weights
# (and avoid unnecessary warnings).
_keys_to_ignore_on_load_unexpected = None
_requires_load_weight_prefix = False
@property
def dummy_inputs(self) -> Dict[str, tf.Tensor]:
"""
Dummy inputs to build the network.
Returns:
:obj:`Dict[str, tf.Tensor]`: The dummy inputs.
"""
return {
"input_ids": tf.constant(DUMMY_INPUTS),
}
def __init__(self, config, *inputs, **kwargs):
super().__init__(*inputs, **kwargs)
if not isinstance(config, PretrainedConfig):
raise ValueError(
f"Parameter config in `{self.__class__.__name__}(config)` should be an instance of class "
"`PretrainedConfig`. To create a model from a pretrained model use "
f"`model = {self.__class__.__name__}.from_pretrained(PRETRAINED_MODEL_NAME)`"
)
# Save config and origin of the pretrained weights if given in model
self.config = config
self.name_or_path = config.name_or_path
@classmethod
def _from_config(cls, config, **kwargs):
"""
All context managers that the model should be initialized under go here.
"""
return cls(config, **kwargs)
@tf.function(
input_signature=[
{
"input_ids": tf.TensorSpec((None, None), tf.int32, name="input_ids"),
"attention_mask": tf.TensorSpec((None, None), tf.int32, name="attention_mask"),
"token_type_ids": tf.TensorSpec((None, None), tf.int32, name="token_type_ids"),
}
]
)
def serving(self, inputs):
"""
Method used for serving the model.
Args:
inputs (:obj:`Dict[str, tf.Tensor]`):
The input of the saved model as a dictionary of tensors.
"""
output = self.call(inputs)
return self.serving_output(output)
def serving_output(output):
"""
Prepare the output of the saved model. Each model must implement this function.
Args:
output (:obj:`~transformers.TFBaseModelOutput`):
The output returned by the model.
"""
raise NotImplementedError
def get_input_embeddings(self) -> tf.keras.layers.Layer:
"""
Returns the model's input embeddings layer.
Returns:
:obj:`tf.Variable`: The embeddings layer mapping vocabulary to hidden states.
"""
main_layer = getattr(self, self.base_model_prefix, self)
if main_layer is not self:
return main_layer.get_input_embeddings()
else:
raise NotImplementedError
def set_input_embeddings(self, value):
"""
Set model's input embeddings
Args:
value (:obj:`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
main_layer = getattr(self, self.base_model_prefix)
if main_layer is None:
raise NotImplementedError("The model does not implements the base_model_prefix attribute.")
try:
main_layer.set_input_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
main_layer.set_input_embeddings(value)
def get_output_embeddings(self) -> Union[None, tf.keras.layers.Layer]:
"""
Returns the model's output embeddings
Returns:
:obj:`tf.Variable`: The new weights mapping vocabulary to hidden states.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
return lm_head.get_output_embeddings()
return None # Overwrite for models with output embeddings
def set_output_embeddings(self, value):
"""
Set model's output embeddings
Args:
value (:obj:`tf.Variable`):
The new weights mapping hidden states to vocabulary.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_output_embeddings(value)
except AttributeError:
logger.info("Building the model")
self(self.dummy_inputs)
lm_head.set_output_embeddings(value)
def get_output_layer_with_bias(self) -> Union[None, tf.keras.layers.Layer]:
"""
Get the layer that handles a bias attribute in case the model has an LM head with weights tied to the
embeddings
Return:
:obj:`tf.keras.layers.Layer`: The layer that handles the bias, None if not an LM model.
"""
warnings.warn(
"The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead.", FutureWarning
)
return self.get_lm_head()
def get_prefix_bias_name(self) -> Union[None, str]:
"""
Get the concatenated _prefix name of the bias from the model name to the parent layer
Return:
:obj:`str`: The _prefix name of the bias.
"""
warnings.warn("The method get_prefix_bias_name is deprecated. Please use `get_bias` instead.", FutureWarning)
return None
def get_bias(self) -> Union[None, Dict[str, tf.Variable]]:
"""
Dict of bias attached to an LM head. The key represents the name of the bias attribute.
Return:
:obj:`tf.Variable`: The weights representing the bias, None if not an LM model.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
return lm_head.get_bias()
except AttributeError:
self(self.dummy_inputs)
return lm_head.get_bias()
return None
def set_bias(self, value):
"""
Set all the bias in the LM head.
Args:
value (:obj:`Dict[tf.Variable]`):
All the new bias attached to an LM head.
"""
if self.get_lm_head() is not None:
lm_head = self.get_lm_head()
try:
lm_head.set_bias(value)
except AttributeError:
self(self.dummy_inputs)
lm_head.set_bias(value)
def get_lm_head(self) -> tf.keras.layers.Layer:
"""
The LM Head layer. This method must be overwritten by all the models that have a lm head.
Return:
:obj:`tf.keras.layers.Layer`: The LM head layer if the model has one, None if not.
"""
return None
def resize_token_embeddings(self, new_num_tokens=None) -> tf.Variable:
"""
Resizes input token embeddings matrix of the model if :obj:`new_num_tokens != config.vocab_size`.
Takes care of tying weights embeddings afterwards if the model class has a :obj:`tie_weights()` method.
Arguments:
new_num_tokens (:obj:`int`, `optional`):
The number of new tokens in the embedding matrix. Increasing the size will add newly initialized
vectors at the end. Reducing the size will remove vectors from the end. If not provided or :obj:`None`,
just returns a pointer to the input tokens :obj:`tf.Variable` module of the model without doing
anything.
Return:
:obj:`tf.Variable`: Pointer to the input tokens Embeddings Module of the model.
"""
if new_num_tokens is None or new_num_tokens == self.config.vocab_size:
return self._get_word_embedding_weight(self.get_input_embeddings())
model_embeds = self._resize_token_embeddings(new_num_tokens)
# Update base model and current model config
self.config.vocab_size = new_num_tokens
return model_embeds
def _get_word_embedding_weight(model, embedding_layer):
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
# The reason why the attributes don't exist might be
# because the model is not built, so retry getting
# the argument after building the model
model(model.dummy_inputs)
embeds = getattr(embedding_layer, "weight", None)
if embeds is not None:
return embeds
embeds = getattr(embedding_layer, "decoder", None)
if embeds is not None:
return embeds
return None
def _resize_token_embeddings(self, new_num_tokens):
old_embeddings = self._get_word_embedding_weight(self.get_input_embeddings())
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens)
# if word embeddings are not tied, make sure that lm head bias is resized as well
if self.get_bias() is not None:
old_lm_head_bias = self.get_bias()
new_lm_head_bias = self._get_resized_lm_head_bias(old_lm_head_bias, new_num_tokens)
self.set_bias(new_lm_head_bias)
# if word embeddings are not tied, make sure that lm head decoder is resized as well
if self.get_output_embeddings() is not None:
old_lm_head_decoder = self._get_word_embedding_weight(self.get_output_embeddings())
new_lm_head_decoder = self._get_resized_lm_head_decoder(old_lm_head_decoder, new_num_tokens)
self.set_output_embeddings(new_lm_head_decoder)
self.set_input_embeddings(new_embeddings)
return self.get_input_embeddings()
def _get_resized_lm_head_bias(self, old_lm_head_bias, new_num_tokens):
"""
Build a resized bias from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_bias (:obj:`tf.Variable`):
Old lm head bias to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns None
Return:
:obj:`tf.Variable`: Pointer to the resized bias.
"""
new_lm_head_bias = {}
for attr, weight in old_lm_head_bias.items():
first_dim, old_num_tokens = (None, shape_list(weight)[0]) if tf.rank(weight) == 1 else shape_list(weight)
size_diff = new_num_tokens - old_num_tokens
final_shape = [new_num_tokens] if first_dim is None else [first_dim, new_num_tokens]
# initialize new bias
if tf.math.greater(size_diff, 0):
padding_shape = [[0, size_diff]] if first_dim is None else [[0, 0], [0, size_diff]]
current_bias = tf.pad(weight.value(), tf.convert_to_tensor(padding_shape), constant_values=-1)
num_tokens_to_copy = min(old_num_tokens, new_num_tokens)
mask_shape = [num_tokens_to_copy] if first_dim is None else [1, num_tokens_to_copy]
bias_mask = tf.fill(tf.convert_to_tensor(mask_shape), True)
bias_mask = tf.pad(bias_mask, tf.convert_to_tensor(padding_shape), constant_values=False)
else:
slice_from = [0] if first_dim is None else [0, 0]
current_bias = tf.slice(
weight.value(), tf.convert_to_tensor(slice_from), tf.convert_to_tensor(final_shape)
)
bias_mask = tf.fill(tf.convert_to_tensor(final_shape), True)
new_bias = self.add_weight(
shape=final_shape,
initializer="zeros",
trainable=True,
name=weight.name.split(":")[0],
)
init_bias = tf.where(bias_mask, current_bias, new_bias.value())
new_bias.assign(init_bias)
new_lm_head_bias[attr] = new_bias
return new_lm_head_bias
def _get_resized_lm_head_decoder(self, old_lm_head_decoder, new_num_tokens):
"""
Build a resized decoder from the old ones. Increasing the size will add newly initialized vectors at the end.
Reducing the size will remove vectors from the end
Args:
old_lm_head_decoder (:obj:`tf.Variable`):
Old lm head decoder to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the linear matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns None
Return:
:obj:`tf.Variable`: Pointer to the resized decoder or None if the output embeddings are different from the
input ones.
"""
new_lm_head_decoder = old_lm_head_decoder
is_input_output_equals = tf.reduce_any(
self._get_word_embedding_weight(self.get_input_embeddings()) == old_lm_head_decoder
)
if old_lm_head_decoder is not None and not is_input_output_equals:
old_embedding_dim = shape_list(old_lm_head_decoder)[1]
decoder_mask, current_decoder = init_copy_embeddings(old_lm_head_decoder, new_num_tokens)
new_lm_head_decoder = self.add_weight(
shape=(new_num_tokens, old_embedding_dim),
initializer="zeros",
trainable=True,
name=old_lm_head_decoder.name.split(":")[0],
)
init_decoder = tf.where(decoder_mask, current_decoder, new_lm_head_decoder.value())
new_lm_head_decoder.assign(init_decoder)
return new_lm_head_decoder
def _get_resized_embeddings(self, old_embeddings, new_num_tokens=None) -> tf.Variable:
"""
Build a resized Embedding weights from a provided token Embedding weights. Increasing the size will add newly
initialized vectors at the end. Reducing the size will remove vectors from the end
Args:
old_embeddings (:obj:`tf.Variable`):
Old embeddings to be resized.
new_num_tokens (:obj:`int`, `optional`):
New number of tokens in the embedding matrix.
Increasing the size will add newly initialized vectors at the end. Reducing the size will remove
vectors from the end. If not provided or :obj:`None`, just returns a pointer to the input tokens
:obj:`tf.Variable`` module of the model without doing anything.
Return:
:obj:`tf.Variable`: Pointer to the resized Embedding Module or the old Embedding Module if
:obj:`new_num_tokens` is :obj:`None`
"""
old_embedding_dim = shape_list(old_embeddings)[1]
init_range = getattr(self.config, "initializer_range", 0.02)
embeddings_mask, current_embeddings = init_copy_embeddings(old_embeddings, new_num_tokens)
new_embeddings = self.add_weight(
name=old_embeddings.name.split(":")[0],
shape=[new_num_tokens, old_embedding_dim],
initializer=get_initializer(init_range),
dtype=tf.float32,
)
init_embeddings = tf.where(embeddings_mask, current_embeddings, new_embeddings.value())
new_embeddings.assign(init_embeddings)
return new_embeddings
def prune_heads(self, heads_to_prune):
"""
Prunes heads of the base model.
Arguments:
heads_to_prune (:obj:`Dict[int, List[int]]`):
Dictionary with keys being selected layer indices (:obj:`int`) and associated values being the list of
heads to prune in said layer (list of :obj:`int`). For instance {1: [0, 2], 2: [2, 3]} will prune heads
0 and 2 on layer 1 and heads 2 and 3 on layer 2.
"""
raise NotImplementedError
def save_pretrained(self, save_directory, saved_model=False, version=1, push_to_hub=False, **kwargs):
"""
Save a model and its configuration file to a directory, so that it can be re-loaded using the
:func:`~transformers.TFPreTrainedModel.from_pretrained` class method.
Arguments:
save_directory (:obj:`str`):
Directory to which to save. Will be created if it doesn't exist.
saved_model (:obj:`bool`, `optional`, defaults to :obj:`False`):
If the model has to be saved in saved model format as well or not.
version (:obj:`int`, `optional`, defaults to 1):
The version of the saved model. A saved model needs to be versioned in order to be properly loaded by
TensorFlow Serving as detailed in the official documentation
https://www.tensorflow.org/tfx/serving/serving_basic
push_to_hub (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to push your model to the Hugging Face model hub after saving it.
.. warning::
Using :obj:`push_to_hub=True` will synchronize the repository you are pushing to with
:obj:`save_directory`, which requires :obj:`save_directory` to be a local clone of the repo you are
pushing to if it's an existing folder. Pass along :obj:`temp_dir=True` to use a temporary directory
instead.
kwargs:
Additional key word arguments passed along to the
:meth:`~transformers.file_utils.PushToHubMixin.push_to_hub` method.
"""
if os.path.isfile(save_directory):
logger.error(f"Provided path ({save_directory}) should be a directory, not a file")
return
if push_to_hub:
commit_message = kwargs.pop("commit_message", None)
repo = self._create_or_get_repo(save_directory, **kwargs)
os.makedirs(save_directory, exist_ok=True)
if saved_model:
saved_model_dir = os.path.join(save_directory, "saved_model", str(version))
self.save(saved_model_dir, include_optimizer=False, signatures=self.serving)
logger.info(f"Saved model created in {saved_model_dir}")
# Save configuration file
self.config.architectures = [self.__class__.__name__[2:]]
self.config.save_pretrained(save_directory)
# If we save using the predefined names, we can load using `from_pretrained`
output_model_file = os.path.join(save_directory, TF2_WEIGHTS_NAME)
self.save_weights(output_model_file)
logger.info(f"Model weights saved in {output_model_file}")
if push_to_hub:
url = self._push_to_hub(repo, commit_message=commit_message)
logger.info(f"Model pushed to the hub in this commit: {url}")
@classmethod
def from_pretrained(cls, pretrained_model_name_or_path, *model_args, **kwargs):
r"""
Instantiate a pretrained TF 2.0 model from a pre-trained model configuration.
The warning `Weights from XXX not initialized from pretrained model` means that the weights of XXX do not come
pretrained with the rest of the model. It is up to you to train those weights with a downstream fine-tuning
task.
The warning `Weights from XXX not used in YYY` means that the layer XXX is not used by YYY, therefore those
weights are discarded.
Parameters:
pretrained_model_name_or_path (:obj:`str`, `optional`):
Can be either:
- A string, the `model id` of a pretrained model hosted inside a model repo on huggingface.co.
Valid model ids can be located at the root-level, like ``bert-base-uncased``, or namespaced under
a user or organization name, like ``dbmdz/bert-base-german-cased``.
- A path to a `directory` containing model weights saved using
:func:`~transformers.TFPreTrainedModel.save_pretrained`, e.g., ``./my_model_directory/``.
- A path or url to a `PyTorch state_dict save file` (e.g, ``./pt_model/pytorch_model.bin``). In
this case, ``from_pt`` should be set to :obj:`True` and a configuration object should be provided
as ``config`` argument. This loading path is slower than converting the PyTorch model in a
TensorFlow model using the provided conversion scripts and loading the TensorFlow model
afterwards.
- :obj:`None` if you are both providing the configuration and state dictionary (resp. with keyword
arguments ``config`` and ``state_dict``).
model_args (sequence of positional arguments, `optional`):
All remaning positional arguments will be passed to the underlying model's ``__init__`` method.
config (:obj:`Union[PretrainedConfig, str]`, `optional`):
Can be either:
- an instance of a class derived from :class:`~transformers.PretrainedConfig`,
- a string valid as input to :func:`~transformers.PretrainedConfig.from_pretrained`.
Configuration for the model to use instead of an automatically loaded configuation. Configuration can
be automatically loaded when:
- The model is a model provided by the library (loaded with the `model id` string of a pretrained
model).
- The model was saved using :func:`~transformers.TFPreTrainedModel.save_pretrained` and is reloaded
by supplying the save directory.
- The model is loaded by supplying a local directory as ``pretrained_model_name_or_path`` and a
configuration JSON file named `config.json` is found in the directory.
from_pt: (:obj:`bool`, `optional`, defaults to :obj:`False`):
Load the model weights from a PyTorch state_dict save file (see docstring of
``pretrained_model_name_or_path`` argument).
ignore_mismatched_sizes (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to raise an error if some of the weights from the checkpoint do not have the same size
as the weights of the model (if for instance, you are instantiating a model with 10 labels from a
checkpoint with 3 labels).
cache_dir (:obj:`str`, `optional`):
Path to a directory in which a downloaded pretrained model configuration should be cached if the
standard cache should not be used.
force_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to force the (re-)download of the model weights and configuration files, overriding the
cached versions if they exist.
resume_download (:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to delete incompletely received files. Will attempt to resume the download if such a
file exists.
proxies: (:obj:`Dict[str, str], `optional`):
A dictionary of proxy servers to use by protocol or endpoint, e.g., :obj:`{'http': 'foo.bar:3128',
'http://hostname': 'foo.bar:4012'}`. The proxies are used on each request.
output_loading_info(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether ot not to also return a dictionary containing missing keys, unexpected keys and error messages.
local_files_only(:obj:`bool`, `optional`, defaults to :obj:`False`):
Whether or not to only look at local files (e.g., not try doanloading the model).
use_auth_token (:obj:`str` or `bool`, `optional`):
The token to use as HTTP bearer authorization for remote files. If :obj:`True`, will use the token
generated when running :obj:`transformers-cli login` (stored in :obj:`~/.huggingface`).
revision(:obj:`str`, `optional`, defaults to :obj:`"main"`):
The specific model version to use. It can be a branch name, a tag name, or a commit id, since we use a
git-based system for storing models and other artifacts on huggingface.co, so ``revision`` can be any
identifier allowed by git.
mirror(:obj:`str`, `optional`):
Mirror source to accelerate downloads in China. If you are from China and have an accessibility
problem, you can set this option to resolve it. Note that we do not guarantee the timeliness or safety.
Please refer to the mirror site for more information.
kwargs (remaining dictionary of keyword arguments, `optional`):
Can be used to update the configuration object (after it being loaded) and initiate the model (e.g.,
:obj:`output_attentions=True`). Behaves differently depending on whether a ``config`` is provided or
automatically loaded:
- If a configuration is provided with ``config``, ``**kwargs`` will be directly passed to the
underlying model's ``__init__`` method (we assume all relevant updates to the configuration have
already been done)
- If a configuration is not provided, ``kwargs`` will be first passed to the configuration class
initialization function (:func:`~transformers.PretrainedConfig.from_pretrained`). Each key of
``kwargs`` that corresponds to a configuration attribute will be used to override said attribute
with the supplied ``kwargs`` value. Remaining keys that do not correspond to any configuration
attribute will be passed to the underlying model's ``__init__`` function.
.. note::
Passing :obj:`use_auth_token=True` is required when you want to use a private model.
Examples::
>>> from transformers import BertConfig, TFBertModel
>>> # Download model and configuration from huggingface.co and cache.
>>> model = TFBertModel.from_pretrained('bert-base-uncased')
>>> # Model was saved using `save_pretrained('./test/saved_model/')` (for example purposes, not runnable).
>>> model = TFBertModel.from_pretrained('./test/saved_model/')
>>> # Update configuration during loading.
>>> model = TFBertModel.from_pretrained('bert-base-uncased', output_attentions=True)
>>> assert model.config.output_attentions == True
>>> # Loading from a Pytorch model file instead of a TensorFlow checkpoint (slower, for example purposes, not runnable).
>>> config = BertConfig.from_json_file('./pt_model/my_pt_model_config.json')
>>> model = TFBertModel.from_pretrained('./pt_model/my_pytorch_model.bin', from_pt=True, config=config)
"""
config = kwargs.pop("config", None)
cache_dir = kwargs.pop("cache_dir", None)
from_pt = kwargs.pop("from_pt", False)
ignore_mismatched_sizes = kwargs.pop("ignore_mismatched_sizes", False)
force_download = kwargs.pop("force_download", False)
resume_download = kwargs.pop("resume_download", False)
proxies = kwargs.pop("proxies", None)
output_loading_info = kwargs.pop("output_loading_info", False)
local_files_only = kwargs.pop("local_files_only", False)
use_auth_token = kwargs.pop("use_auth_token", None)
revision = kwargs.pop("revision", None)
mirror = kwargs.pop("mirror", None)
load_weight_prefix = kwargs.pop("load_weight_prefix", None)
from_pipeline = kwargs.pop("_from_pipeline", None)
from_auto_class = kwargs.pop("_from_auto", False)
user_agent = {"file_type": "model", "framework": "tensorflow", "from_auto_class": from_auto_class}
if from_pipeline is not None:
user_agent["using_pipeline"] = from_pipeline
if is_offline_mode() and not local_files_only:
logger.info("Offline mode: forcing local_files_only=True")
local_files_only = True
# Load config if we don't provide a configuration
if not isinstance(config, PretrainedConfig):
config_path = config if config is not None else pretrained_model_name_or_path
config, model_kwargs = cls.config_class.from_pretrained(
config_path,
*model_args,
cache_dir=cache_dir,
return_unused_kwargs=True,
force_download=force_download,
resume_download=resume_download,
proxies=proxies,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
revision=revision,
_from_auto=from_auto_class,
_from_pipeline=from_pipeline,
**kwargs,
)
else:
model_kwargs = kwargs
# Load model
if pretrained_model_name_or_path is not None:
if os.path.isdir(pretrained_model_name_or_path):
if from_pt and os.path.isfile(os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)):
# Load from a PyTorch checkpoint in priority if from_pt
archive_file = os.path.join(pretrained_model_name_or_path, WEIGHTS_NAME)
elif os.path.isfile(os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)):
# Load from a TF 2.0 checkpoint
archive_file = os.path.join(pretrained_model_name_or_path, TF2_WEIGHTS_NAME)
else:
raise EnvironmentError(
f"Error no file named {[WEIGHTS_NAME, TF2_WEIGHTS_NAME]} found in directory "
f"{pretrained_model_name_or_path} or `from_pt` set to False"
)
elif os.path.isfile(pretrained_model_name_or_path) or is_remote_url(pretrained_model_name_or_path):
archive_file = pretrained_model_name_or_path
elif os.path.isfile(pretrained_model_name_or_path + ".index"):
archive_file = pretrained_model_name_or_path + ".index"
else:
archive_file = hf_bucket_url(
pretrained_model_name_or_path,
filename=(WEIGHTS_NAME if from_pt else TF2_WEIGHTS_NAME),
revision=revision,
mirror=mirror,
)
try:
# Load from URL or cache if already cached
resolved_archive_file = cached_path(
archive_file,
cache_dir=cache_dir,
force_download=force_download,
proxies=proxies,
resume_download=resume_download,
local_files_only=local_files_only,
use_auth_token=use_auth_token,
user_agent=user_agent,
)
except EnvironmentError as err:
logger.error(err)
msg = (
f"Can't load weights for '{pretrained_model_name_or_path}'. Make sure that:\n\n"
f"- '{pretrained_model_name_or_path}' is a correct model identifier listed on 'https://huggingface.co/models'\n\n"
f"- or '{pretrained_model_name_or_path}' is the correct path to a directory containing a file named one of {TF2_WEIGHTS_NAME}, {WEIGHTS_NAME}.\n\n"
)
raise EnvironmentError(msg)
if resolved_archive_file == archive_file:
logger.info(f"loading weights file {archive_file}")
else:
logger.info(f"loading weights file {archive_file} from cache at {resolved_archive_file}")
else:
resolved_archive_file = None
config.name_or_path = pretrained_model_name_or_path
# composed models, *e.g.* TFRag, require special treatment when it comes to loading
# pre-trained weights.
if cls._requires_load_weight_prefix and model_kwargs.get("name") is not None:
model_kwargs["load_weight_prefix"] = load_weight_prefix + "/" + model_kwargs.get("name")
# Instantiate model.
model = cls(config, *model_args, **model_kwargs)
if from_pt:
from .modeling_tf_pytorch_utils import load_pytorch_checkpoint_in_tf2_model
# Load from a PyTorch checkpoint
return load_pytorch_checkpoint_in_tf2_model(model, resolved_archive_file, allow_missing_keys=True)
# we might need to extend the variable scope for composite models
if load_weight_prefix is not None:
with tf.compat.v1.variable_scope(load_weight_prefix):
model(model.dummy_inputs) # build the network with dummy inputs
else:
model(model.dummy_inputs) # build the network with dummy inputs
assert os.path.isfile(resolved_archive_file), f"Error retrieving file {resolved_archive_file}"
# 'by_name' allow us to do transfer learning by skipping/adding layers
# see https://github.com/tensorflow/tensorflow/blob/00fad90125b18b80fe054de1055770cfb8fe4ba3/tensorflow/python/keras/engine/network.py#L1339-L1357
try:
missing_keys, unexpected_keys, mismatched_keys = load_tf_weights(
model,
resolved_archive_file,
ignore_mismatched_sizes=ignore_mismatched_sizes,
_prefix=load_weight_prefix,
)
except OSError:
raise OSError(
"Unable to load weights from h5 file. "
"If you tried to load a TF 2.0 model from a PyTorch checkpoint, please set from_pt=True. "
)
model(model.dummy_inputs) # Make sure restore ops are run
if cls._keys_to_ignore_on_load_missing is not None:
for pat in cls._keys_to_ignore_on_load_missing:
missing_keys = [k for k in missing_keys if re.search(pat, k) is None]
if cls._keys_to_ignore_on_load_unexpected is not None:
for pat in cls._keys_to_ignore_on_load_unexpected:
unexpected_keys = [k for k in unexpected_keys if re.search(pat, k) is None]
if len(unexpected_keys) > 0:
logger.warning(
f"Some layers from the model checkpoint at {pretrained_model_name_or_path} were not used when "
f"initializing {model.__class__.__name__}: {unexpected_keys}\n"
f"- This IS expected if you are initializing {model.__class__.__name__} from the checkpoint of a model trained on another task "
f"or with another architecture (e.g. initializing a BertForSequenceClassification model from a BertForPreTraining model).\n"
f"- This IS NOT expected if you are initializing {model.__class__.__name__} from the checkpoint of a model that you expect "
f"to be exactly identical (initializing a BertForSequenceClassification model from a BertForSequenceClassification model)."
)
else:
logger.warning(f"All model checkpoint layers were used when initializing {model.__class__.__name__}.\n")
if len(missing_keys) > 0:
logger.warning(
f"Some layers of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized: {missing_keys}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
elif len(mismatched_keys) == 0:
logger.warning(
f"All the layers of {model.__class__.__name__} were initialized from the model checkpoint at {pretrained_model_name_or_path}.\n"
f"If your task is similar to the task the model of the checkpoint was trained on, "
f"you can already use {model.__class__.__name__} for predictions without further training."
)
if len(mismatched_keys) > 0:
mismatched_warning = "\n".join(
[
f"- {key}: found shape {shape1} in the checkpoint and {shape2} in the model instantiated"
for key, shape1, shape2 in mismatched_keys
]
)
logger.warning(
f"Some weights of {model.__class__.__name__} were not initialized from the model checkpoint at {pretrained_model_name_or_path} "
f"and are newly initialized because the shapes did not match:\n{mismatched_warning}\n"
f"You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference."
)
if output_loading_info:
loading_info = {
"missing_keys": missing_keys,
"unexpected_keys": unexpected_keys,
"mismatched_keys": mismatched_keys,
}
return model, loading_info
return model
# To update the docstring, we need to copy the method, otherwise we change the original docstring.
TFPreTrainedModel.push_to_hub = copy_func(TFPreTrainedModel.push_to_hub)
TFPreTrainedModel.push_to_hub.__doc__ = TFPreTrainedModel.push_to_hub.__doc__.format(
object="model", object_class="TFAutoModel", object_files="model checkpoint"
)
class TFConv1D(tf.keras.layers.Layer):
"""
1D-convolutional layer as defined by Radford et al. for OpenAI GPT (and also used in GPT-2).
Basically works like a linear layer but the weights are transposed.
Args:
nf (:obj:`int`):
The number of output features.
nx (:obj:`int`):
The number of input features.
initializer_range (:obj:`float`, `optional`, defaults to 0.02):
The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, nf, nx, initializer_range=0.02, **kwargs):
super().__init__(**kwargs)
self.nf = nf
self.nx = nx
self.initializer_range = initializer_range
def build(self, input_shape):
self.weight = self.add_weight(
"weight", shape=[self.nx, self.nf], initializer=get_initializer(self.initializer_range)
)
self.bias = self.add_weight("bias", shape=[1, self.nf], initializer=tf.zeros_initializer())
def call(self, x):
bz, sl = shape_list(x)[:2]
x = tf.reshape(x, [-1, self.nx])
x = tf.matmul(x, self.weight) + self.bias
x = tf.reshape(x, [bz, sl, self.nf])
return x
class TFSharedEmbeddings(tf.keras.layers.Layer):
r"""
Construct shared token embeddings.
The weights of the embedding layer is usually shared with the weights of the linear decoder when doing language
modeling.
Args:
vocab_size (:obj:`int`):
The size of the vocabulary, e.g., the number of unique tokens.
hidden_size (:obj:`int`):
The size of the embedding vectors.
initializer_range (:obj:`float`, `optional`):
The standard deviation to use when initializing the weights. If no value is provided, it will default to
:math:`1/\sqrt{hidden\_size}`.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, vocab_size: int, hidden_size: int, initializer_range: Optional[float] = None, **kwargs):
super().__init__(**kwargs)
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.initializer_range = hidden_size ** -0.5 if initializer_range is None else initializer_range
def build(self, input_shape):
"""
Build shared token embedding layer Shared weights logic adapted from
https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24
"""
self.weight = self.add_weight(
"weight", shape=[self.vocab_size, self.hidden_size], initializer=get_initializer(self.initializer_range)
)
super().build(input_shape)
def get_config(self):
config = {
"vocab_size": self.vocab_size,
"hidden_size": self.hidden_size,
"initializer_range": self.initializer_range,
}
base_config = super().get_config()
return dict(list(base_config.items()) + list(config.items()))
def call(self, inputs: tf.Tensor, mode: str = "embedding") -> tf.Tensor:
"""
Get token embeddings of inputs or decode final hidden state.
Args:
inputs (:obj:`tf.Tensor`):
In embedding mode, should be an int64 tensor with shape :obj:`[batch_size, length]`.
In linear mode, should be a float tensor with shape :obj:`[batch_size, length, hidden_size]`.
mode (:obj:`str`, defaults to :obj:`"embedding"`):
A valid value is either :obj:`"embedding"` or :obj:`"linear"`, the first one indicates that the layer
should be used as an embedding layer, the second one that the layer should be used as a linear decoder.
Returns:
:obj:`tf.Tensor`: In embedding mode, the output is a float32 embedding tensor, with shape
:obj:`[batch_size, length, embedding_size]`.
In linear mode, the output is a float32 with shape :obj:`[batch_size, length, vocab_size]`.
Raises:
ValueError: if :obj:`mode` is not valid.
Shared weights logic is adapted from `here
<https://github.com/tensorflow/models/blob/a009f4fb9d2fc4949e32192a944688925ef78659/official/transformer/v2/embedding_layer.py#L24>`__.
"""
if mode == "embedding":
return self._embedding(inputs)
elif mode == "linear":
return self._linear(inputs)
else:
raise ValueError(f"mode {mode} is not valid.")
def _embedding(self, input_ids):
"""Applies embedding based on inputs tensor."""
return tf.gather(self.weight, input_ids)
def _linear(self, inputs):
"""
Computes logits by running inputs through a linear layer.
Args:
inputs: A float32 tensor with shape [..., hidden_size]
Returns:
float32 tensor with shape [..., vocab_size].
"""
first_dims = shape_list(inputs)[:-1]
x = tf.reshape(inputs, [-1, self.hidden_size])
logits = tf.matmul(x, self.weight, transpose_b=True)
return tf.reshape(logits, first_dims + [self.vocab_size])
class TFSequenceSummary(tf.keras.layers.Layer):
"""
Compute a single vector summary of a sequence hidden states.
Args:
config (:class:`~transformers.PretrainedConfig`):
The config used by the model. Relevant arguments in the config class of the model are (refer to the actual
config class of your model for the default values it uses):
- **summary_type** (:obj:`str`) -- The method to use to make this summary. Accepted values are:
- :obj:`"last"` -- Take the last token hidden state (like XLNet)
- :obj:`"first"` -- Take the first token hidden state (like Bert)
- :obj:`"mean"` -- Take the mean of all tokens hidden states
- :obj:`"cls_index"` -- Supply a Tensor of classification token position (GPT/GPT-2)
- :obj:`"attn"` -- Not implemented now, use multi-head attention
- **summary_use_proj** (:obj:`bool`) -- Add a projection after the vector extraction.
- **summary_proj_to_labels** (:obj:`bool`) -- If :obj:`True`, the projection outputs to
:obj:`config.num_labels` classes (otherwise to :obj:`config.hidden_size`).
- **summary_activation** (:obj:`Optional[str]`) -- Set to :obj:`"tanh"` to add a tanh activation to the
output, another string or :obj:`None` will add no activation.
- **summary_first_dropout** (:obj:`float`) -- Optional dropout probability before the projection and
activation.
- **summary_last_dropout** (:obj:`float`)-- Optional dropout probability after the projection and
activation.
initializer_range (:obj:`float`, defaults to 0.02): The standard deviation to use to initialize the weights.
kwargs:
Additional keyword arguments passed along to the :obj:`__init__` of :obj:`tf.keras.layers.Layer`.
"""
def __init__(self, config: PretrainedConfig, initializer_range: float = 0.02, **kwargs):
super().__init__(**kwargs)
self.summary_type = config.summary_type if hasattr(config, "summary_use_proj") else "last"
if self.summary_type == "attn":
# We should use a standard multi-head attention module with absolute positional embedding for that.
# Cf. https://github.com/zihangdai/xlnet/blob/master/modeling.py#L253-L276
# We can probably just use the multi-head attention module of PyTorch >=1.1.0
raise NotImplementedError
self.has_summary = hasattr(config, "summary_use_proj") and config.summary_use_proj
if self.has_summary:
if hasattr(config, "summary_proj_to_labels") and config.summary_proj_to_labels and config.num_labels > 0:
num_classes = config.num_labels
else:
num_classes = config.hidden_size
self.summary = tf.keras.layers.Dense(
num_classes, kernel_initializer=get_initializer(initializer_range), name="summary"
)
self.has_activation = hasattr(config, "summary_activation") and config.summary_activation == "tanh"
if self.has_activation:
self.activation = tf.keras.activations.tanh
self.has_first_dropout = hasattr(config, "summary_first_dropout") and config.summary_first_dropout > 0
if self.has_first_dropout:
self.first_dropout = tf.keras.layers.Dropout(config.summary_first_dropout)
self.has_last_dropout = hasattr(config, "summary_last_dropout") and config.summary_last_dropout > 0
if self.has_last_dropout:
self.last_dropout = tf.keras.layers.Dropout(config.summary_last_dropout)
def call(self, inputs, cls_index=None, training=False):
if not isinstance(inputs, (dict, tuple, list)):
hidden_states = inputs
elif isinstance(inputs, (tuple, list)):
hidden_states = inputs[0]
cls_index = inputs[1] if len(inputs) > 1 else None
assert len(inputs) <= 2, "Too many inputs."
else:
hidden_states = inputs.get("hidden_states")
cls_index = inputs.get("cls_index", None)
if self.summary_type == "last":
output = hidden_states[:, -1]
elif self.summary_type == "first":
output = hidden_states[:, 0]
elif self.summary_type == "mean":
output = tf.reduce_mean(hidden_states, axis=1)
elif self.summary_type == "cls_index":
hidden_shape = shape_list(hidden_states) # e.g. [batch, num choices, seq length, hidden dims]
if cls_index is None:
cls_index = tf.fill(
hidden_shape[:-2], hidden_shape[-2] - 1
) # A tensor full of shape [batch] or [batch, num choices] full of sequence length
cls_shape = shape_list(cls_index)
if len(cls_shape) <= len(hidden_shape) - 2:
cls_index = tf.expand_dims(cls_index, axis=-1)
# else:
# cls_index = cls_index[..., tf.newaxis]
# cls_index = cls_index.expand((-1,) * (cls_index.dim()-1) + (hidden_states.size(-1),))
# shape of cls_index: (bsz, XX, 1, hidden_size) where XX are optional leading dim of hidden_states
output = tf.gather(hidden_states, cls_index, batch_dims=len(hidden_shape) - 2)
output = tf.squeeze(
output, axis=len(hidden_shape) - 2
) # shape of output: (batch, num choices, hidden_size)
elif self.summary_type == "attn":
raise NotImplementedError
if self.has_first_dropout:
output = self.first_dropout(output, training=training)
if self.has_summary:
output = self.summary(output)
if self.has_activation:
output = self.activation(output)
if self.has_last_dropout:
output = self.last_dropout(output, training=training)
return output
def shape_list(tensor: tf.Tensor) -> List[int]:
"""
Deal with dynamic shape in tensorflow cleanly.
Args:
tensor (:obj:`tf.Tensor`): The tensor we want the shape of.
Returns:
:obj:`List[int]`: The shape of the tensor as a list.
"""
dynamic = tf.shape(tensor)
if tensor.shape == tf.TensorShape(None):
return dynamic
static = tensor.shape.as_list()
return [dynamic[i] if s is None else s for i, s in enumerate(static)]
def get_initializer(initializer_range: float = 0.02) -> tf.initializers.TruncatedNormal:
"""
Creates a :obj:`tf.initializers.TruncatedNormal` with the given range.
Args:
initializer_range (`float`, defaults to 0.02): Standard deviation of the initializer range.
Returns:
:obj:`tf.initializers.TruncatedNormal`: The truncated normal initializer.
"""
return tf.keras.initializers.TruncatedNormal(stddev=initializer_range)
class TFWrappedEmbeddings:
"""
this class wraps a the TFSharedEmbeddingTokens layer into a python 'no-keras-layer' class to avoid problem with
weight restoring. Also it makes sure that the layer is called from the correct scope to avoid problem with
saving/storing the correct weights
"""
def __init__(self, layer, abs_scope_name=None):
self._layer = layer
self._abs_scope_name = abs_scope_name
def call(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer.call(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer.call(inputs, mode)
def __call__(self, inputs, mode="embedding"):
if self._abs_scope_name is None:
return self._layer(inputs, mode)
# if an abs scope name is given to the embedding variable, call variable from absolute scope
with tf.compat.v1.variable_scope(self._abs_scope_name, auxiliary_name_scope=False) as abs_scope_name:
with tf.name_scope(abs_scope_name.original_name_scope):
return self._layer(inputs, mode)
| [
"tensorflow.convert_to_tensor",
"numpy.asarray",
"tensorflow.python.keras.backend.int_shape",
"tensorflow.keras.utils.register_keras_serializable",
"tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group",
"tensorflow.rank",
"tensorflow.math.reduce_any",
"tensorflow.gather",
"tensorflow.name_scope",
"tensorflow.keras.initializers.TruncatedNormal",
"tensorflow.compat.v1.variable_scope",
"tensorflow.matmul",
"tensorflow.TensorShape",
"tensorflow.executing_eagerly",
"tensorflow.fill",
"tensorflow.shape",
"tensorflow.keras.losses.MeanSquaredError",
"tensorflow.zeros_initializer",
"tensorflow.python.keras.backend.batch_set_value",
"tensorflow.constant",
"tensorflow.reduce_mean",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.reshape",
"tensorflow.expand_dims",
"tensorflow.get_logger",
"tensorflow.math.greater",
"tensorflow.keras.layers.Dropout",
"tensorflow.TensorSpec"
] | src/transformers/modeling_tf_utils.py | [(50, 'tensorflow.get_logger', 'tf.get_logger', ([], {}), True, 'import tensorflow as tf\n'), (106, 'functools.wraps', 'functools.wraps', (['initializer'], {}), False, 'import functools\n'), (274, 'tensorflow.executing_eagerly', 'tf.executing_eagerly', ([], {}), True, 'import tensorflow as tf\n'), (556, 'tensorflow.python.keras.backend.batch_set_value', 'K.batch_set_value', (['weight_value_tuples'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (583, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1667, 'tensorflow.shape', 'tf.shape', (['tensor'], {}), True, 'import tensorflow as tf\n'), (1687, 'tensorflow.keras.initializers.TruncatedNormal', 'tf.keras.initializers.TruncatedNormal', ([], {'stddev': 'initializer_range'}), True, 'import tensorflow as tf\n'), (155, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (171, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (191, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (196, 'tensorflow.math.reduce_any', 'tf.math.reduce_any', (['(labels == -1)'], {}), True, 'import tensorflow as tf\n'), (246, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (340, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (348, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (475, 'h5py.File', 'h5py.File', (['resolved_archive_file', '"""r"""'], {}), False, 'import h5py\n'), (767, 'warnings.warn', 'warnings.warn', (['"""The method get_output_layer_with_bias is deprecated. Please use `get_lm_head` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (779, 'warnings.warn', 'warnings.warn', (['"""The method get_prefix_bias_name is deprecated. Please use `get_bias` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (1060, 'os.path.isfile', 'os.path.isfile', (['save_directory'], {}), False, 'import os\n'), (1068, 'os.makedirs', 'os.makedirs', (['save_directory'], {'exist_ok': '(True)'}), False, 'import os\n'), (1080, 'os.path.join', 'os.path.join', (['save_directory', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1321, 'os.path.isfile', 'os.path.isfile', (['resolved_archive_file'], {}), False, 'import os\n'), (1435, 'tensorflow.reshape', 'tf.reshape', (['x', '[-1, self.nx]'], {}), True, 'import tensorflow as tf\n'), (1438, 'tensorflow.reshape', 'tf.reshape', (['x', '[bz, sl, self.nf]'], {}), True, 'import tensorflow as tf\n'), (1522, 'tensorflow.gather', 'tf.gather', (['self.weight', 'input_ids'], {}), True, 'import tensorflow as tf\n'), (1535, 'tensorflow.reshape', 'tf.reshape', (['inputs', '[-1, self.hidden_size]'], {}), True, 'import tensorflow as tf\n'), (1536, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {'transpose_b': '(True)'}), True, 'import tensorflow as tf\n'), (1538, 'tensorflow.reshape', 'tf.reshape', (['logits', '(first_dims + [self.vocab_size])'], {}), True, 'import tensorflow as tf\n'), (1669, 'tensorflow.TensorShape', 'tf.TensorShape', (['None'], {}), True, 'import tensorflow as tf\n'), (140, 'tensorflow.keras.utils.register_keras_serializable', 'tf.keras.utils.register_keras_serializable', ([], {}), True, 'import tensorflow as tf\n'), (159, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (161, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (197, 'warnings.warn', 'warnings.warn', (['"""Using `-1` to mask the loss for the token is deprecated. Please use `-100` instead."""'], {}), False, 'import warnings\n'), (202, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (214, 'tensorflow.keras.losses.MeanSquaredError', 'tf.keras.losses.MeanSquaredError', ([], {'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (216, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {'from_logits': '(True)', 'reduction': 'tf.keras.losses.Reduction.NONE'}), True, 'import tensorflow as tf\n'), (251, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (252, 'tensorflow.reshape', 'tf.reshape', (['logits', '(-1, 2)'], {}), True, 'import tensorflow as tf\n'), (253, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (332, 'inspect.signature', 'inspect.signature', (['func'], {}), False, 'import inspect\n'), (477, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['f', '"""layer_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (588, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (591, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[num_tokens_to_copy, 1]'], {}), True, 'import tensorflow as tf\n'), (592, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[[0, size_diff], [0, 0]]'], {}), True, 'import tensorflow as tf\n'), (597, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[0, 0]'], {}), True, 'import tensorflow as tf\n'), (598, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, old_embedding_dim]'], {}), True, 'import tensorflow as tf\n'), (600, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['[new_num_tokens, 1]'], {}), True, 'import tensorflow as tf\n'), (641, 'tensorflow.constant', 'tf.constant', (['DUMMY_INPUTS'], {}), True, 'import tensorflow as tf\n'), (921, 'tensorflow.math.greater', 'tf.math.greater', (['size_diff', '(0)'], {}), True, 'import tensorflow as tf\n'), (1247, 'os.path.isdir', 'os.path.isdir', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1436, 'tensorflow.matmul', 'tf.matmul', (['x', 'self.weight'], {}), True, 'import tensorflow as tf\n'), (1599, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_first_dropout'], {}), True, 'import tensorflow as tf\n'), (1603, 'tensorflow.keras.layers.Dropout', 'tf.keras.layers.Dropout', (['config.summary_last_dropout'], {}), True, 'import tensorflow as tf\n'), (1706, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (1715, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['self._abs_scope_name'], {'auxiliary_name_scope': '(False)'}), True, 'import tensorflow as tf\n'), (198, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (200, 'tensorflow.reshape', 'tf.reshape', (['labels', '(-1,)'], {}), True, 'import tensorflow as tf\n'), (387, 'warnings.warn', 'warnings.warn', (['"""The `inputs` argument is deprecated and will be removed in a future version, use `input_ids` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (395, 'warnings.warn', 'warnings.warn', (['"""The `decoder_cached_states` argument is deprecated and will be removed in a future version, use `past_key_values` instead."""', 'FutureWarning'], {}), False, 'import warnings\n'), (501, 'tensorflow.python.keras.saving.hdf5_format.load_attributes_from_hdf5_group', 'hdf5_format.load_attributes_from_hdf5_group', (['h5_layer_object', '"""weight_names"""'], {}), False, 'from tensorflow.python.keras.saving import hdf5_format\n'), (1316, 'tensorflow.compat.v1.variable_scope', 'tf.compat.v1.variable_scope', (['load_weight_prefix'], {}), True, 'import tensorflow as tf\n'), (1430, 'tensorflow.zeros_initializer', 'tf.zeros_initializer', ([], {}), True, 'import tensorflow as tf\n'), (1707, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (1716, 'tensorflow.name_scope', 'tf.name_scope', (['abs_scope_name.original_name_scope'], {}), True, 'import tensorflow as tf\n'), (508, 'numpy.asarray', 'np.asarray', (['h5_layer_object[weight_name]'], {}), True, 'import numpy as np\n'), (666, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""input_ids"""'}), True, 'import tensorflow as tf\n'), (667, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""attention_mask"""'}), True, 'import tensorflow as tf\n'), (668, 'tensorflow.TensorSpec', 'tf.TensorSpec', (['(None, None)', 'tf.int32'], {'name': '"""token_type_ids"""'}), True, 'import tensorflow as tf\n'), (916, 'tensorflow.rank', 'tf.rank', (['weight'], {}), True, 'import tensorflow as tf\n'), (923, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (926, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['mask_shape'], {}), True, 'import tensorflow as tf\n'), (927, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['padding_shape'], {}), True, 'import tensorflow as tf\n'), (931, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['slice_from'], {}), True, 'import tensorflow as tf\n'), (931, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (933, 'tensorflow.convert_to_tensor', 'tf.convert_to_tensor', (['final_shape'], {}), True, 'import tensorflow as tf\n'), (1250, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1259, 'os.path.isfile', 'os.path.isfile', (['pretrained_model_name_or_path'], {}), False, 'import os\n'), (1261, 'os.path.isfile', 'os.path.isfile', (["(pretrained_model_name_or_path + '.index')"], {}), False, 'import os\n'), (1621, 'tensorflow.reduce_mean', 'tf.reduce_mean', (['hidden_states'], {'axis': '(1)'}), True, 'import tensorflow as tf\n'), (1248, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'WEIGHTS_NAME'], {}), False, 'import os\n'), (1251, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (1253, 'os.path.join', 'os.path.join', (['pretrained_model_name_or_path', 'TF2_WEIGHTS_NAME'], {}), False, 'import os\n'), (536, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (1341, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1345, 're.search', 're.search', (['pat', 'k'], {}), False, 'import re\n'), (1625, 'tensorflow.fill', 'tf.fill', (['hidden_shape[:-2]', '(hidden_shape[-2] - 1)'], {}), True, 'import tensorflow as tf\n'), (1630, 'tensorflow.expand_dims', 'tf.expand_dims', (['cls_index'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n'), (540, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n'), (544, 'tensorflow.python.keras.backend.int_shape', 'K.int_shape', (['symbolic_weight'], {}), True, 'from tensorflow.python.keras import backend as K\n')] |
baleian/python-ml-keyword_classifier | 06de1c768934f8382829a91fa7b14f1cb1a78ab7 | import tensorflow as tf
import tensorflow.keras.layers as layers
from .transformer import VALID_CHARS
class InputLayer(layers.Layer):
def __init__(self, num_class, **kwargs):
super(InputLayer, self).__init__(**kwargs)
self.num_class = num_class
self.reshape_layer = layers.Reshape((-1, 3, num_class))
def call(self, x, **kwargs):
x = tf.cast(x, dtype=tf.int32)
x = tf.one_hot(x, self.num_class)
x = self.reshape_layer(x)
return x
def get_config(self):
config = super(InputLayer, self).get_config()
config.update({'num_class': self.num_class})
return config
class ClassifierLayer(layers.Layer):
def __init__(self, num_class, **kwargs):
super(ClassifierLayer, self).__init__(**kwargs)
self.num_class = num_class
self.conv1_layers = [
layers.Conv2D(kernel_size=2, strides=1, filters=32, padding='same', activation='relu'),
layers.Conv2D(kernel_size=2, strides=1, filters=64, padding='same', activation='relu'),
layers.Conv2D(kernel_size=2, strides=1, filters=128, padding='same', activation='relu'),
layers.GlobalMaxPool2D()
]
self.conv2_layers = [
layers.Conv2D(kernel_size=2, strides=2, filters=32, padding='same', activation='relu'),
layers.Conv2D(kernel_size=3, strides=2, filters=64, padding='same', activation='relu'),
layers.Conv2D(kernel_size=4, strides=2, filters=128, padding='same', activation='relu'),
layers.GlobalMaxPool2D()
]
self.conv3_layers = [
layers.Conv2D(kernel_size=4, strides=1, filters=32, padding='same', activation='relu'),
layers.Conv2D(kernel_size=5, strides=1, filters=64, padding='same', activation='relu'),
layers.Conv2D(kernel_size=6, strides=1, filters=128, padding='same', activation='relu'),
layers.GlobalMaxPool2D()
]
self.concat_layer = layers.Concatenate(axis=-1)
self.dense_layer = layers.Dense(256, activation='relu')
self.output_layer = layers.Dense(num_class, activation='softmax')
def call(self, x, **kwargs):
h1 = self._call_sequential(x, self.conv1_layers)
h2 = self._call_sequential(x, self.conv2_layers)
h3 = self._call_sequential(x, self.conv3_layers)
x = self.concat_layer([h1, h2, h3])
x = self.dense_layer(x)
x = self.output_layer(x)
return x
def _call_sequential(self, x, layers):
for layer in layers:
x = layer(x)
return x
def get_config(self):
config = super(ClassifierLayer, self).get_config()
config.update({'num_class': self.num_class})
return config
def create_training_model(input_shape, num_class, **kwargs):
x = inputs = layers.Input(shape=input_shape, dtype=tf.int32)
x = InputLayer(len(VALID_CHARS))(x)
x = outputs = ClassifierLayer(num_class)(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs, **kwargs)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
return model
def create_ensemble_model(input_shape, num_class, folds, **kwargs):
x = inputs = layers.Input(shape=input_shape, dtype=tf.int32, name='feature')
x = InputLayer(len(VALID_CHARS))(x)
classifier_layers = [ClassifierLayer(num_class, name=fold.name) for fold in folds]
x = [layer(x) for layer in classifier_layers]
x = outputs = layers.Average(name='predicted')(x)
model = tf.keras.Model(inputs=inputs, outputs=outputs, **kwargs)
model.compile(optimizer='adam',
loss='sparse_categorical_crossentropy',
metrics=['accuracy'])
for layer, fold in zip(classifier_layers, folds):
layer.set_weights(fold.get_weights())
return model
| [
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.layers.GlobalMaxPool2D",
"tensorflow.keras.layers.Average",
"tensorflow.keras.layers.Dense",
"tensorflow.cast",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.Model",
"tensorflow.one_hot",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.layers.Input"
] | baleian/ml/keyword_classifier/model.py | [(74, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'input_shape', 'dtype': 'tf.int32'}), True, 'import tensorflow.keras.layers as layers\n'), (78, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), True, 'import tensorflow as tf\n'), (86, 'tensorflow.keras.layers.Input', 'layers.Input', ([], {'shape': 'input_shape', 'dtype': 'tf.int32', 'name': '"""feature"""'}), True, 'import tensorflow.keras.layers as layers\n'), (92, 'tensorflow.keras.Model', 'tf.keras.Model', ([], {'inputs': 'inputs', 'outputs': 'outputs'}), True, 'import tensorflow as tf\n'), (12, 'tensorflow.keras.layers.Reshape', 'layers.Reshape', (['(-1, 3, num_class)'], {}), True, 'import tensorflow.keras.layers as layers\n'), (15, 'tensorflow.cast', 'tf.cast', (['x'], {'dtype': 'tf.int32'}), True, 'import tensorflow as tf\n'), (16, 'tensorflow.one_hot', 'tf.one_hot', (['x', 'self.num_class'], {}), True, 'import tensorflow as tf\n'), (49, 'tensorflow.keras.layers.Concatenate', 'layers.Concatenate', ([], {'axis': '(-1)'}), True, 'import tensorflow.keras.layers as layers\n'), (50, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['(256)'], {'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (51, 'tensorflow.keras.layers.Dense', 'layers.Dense', (['num_class'], {'activation': '"""softmax"""'}), True, 'import tensorflow.keras.layers as layers\n'), (90, 'tensorflow.keras.layers.Average', 'layers.Average', ([], {'name': '"""predicted"""'}), True, 'import tensorflow.keras.layers as layers\n'), (32, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(2)', 'strides': '(1)', 'filters': '(32)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (33, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(2)', 'strides': '(1)', 'filters': '(64)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (34, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(2)', 'strides': '(1)', 'filters': '(128)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (35, 'tensorflow.keras.layers.GlobalMaxPool2D', 'layers.GlobalMaxPool2D', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (38, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(2)', 'strides': '(2)', 'filters': '(32)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (39, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(3)', 'strides': '(2)', 'filters': '(64)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (40, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(4)', 'strides': '(2)', 'filters': '(128)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (41, 'tensorflow.keras.layers.GlobalMaxPool2D', 'layers.GlobalMaxPool2D', ([], {}), True, 'import tensorflow.keras.layers as layers\n'), (44, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(4)', 'strides': '(1)', 'filters': '(32)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (45, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(5)', 'strides': '(1)', 'filters': '(64)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (46, 'tensorflow.keras.layers.Conv2D', 'layers.Conv2D', ([], {'kernel_size': '(6)', 'strides': '(1)', 'filters': '(128)', 'padding': '"""same"""', 'activation': '"""relu"""'}), True, 'import tensorflow.keras.layers as layers\n'), (47, 'tensorflow.keras.layers.GlobalMaxPool2D', 'layers.GlobalMaxPool2D', ([], {}), True, 'import tensorflow.keras.layers as layers\n')] |
404-Brain-Not-Found/Bird-Watcher | 2a9e2033faa17c4a28ae1be5d1145d556f2bc7b8 | import numpy as np
from tensorflow.keras import backend as k
from image_utils import non_max_suppression
def xywh2minmax(xy, wh):
xy_min = xy - wh / 2
xy_max = xy + wh / 2
return xy_min, xy_max
def iou(pred_mins, pred_maxes, true_mins, true_maxes):
intersect_mins = k.maximum(pred_mins, true_mins)
intersect_maxes = k.minimum(pred_maxes, true_maxes)
intersect_wh = k.maximum(intersect_maxes - intersect_mins, 0.)
intersect_areas = intersect_wh[..., 0] * intersect_wh[..., 1]
pred_wh = pred_maxes - pred_mins
true_wh = true_maxes - true_mins
pred_areas = pred_wh[..., 0] * pred_wh[..., 1]
true_areas = true_wh[..., 0] * true_wh[..., 1]
union_areas = pred_areas + true_areas - intersect_areas
iou_scores = intersect_areas / union_areas
return iou_scores
def yolo_head(feats):
# Dynamic implementation of conv dims for fully convolutional model.
conv_dims = k.shape(feats)[1:3] # assuming channels last
# In YOLO the height index is the inner most iteration.
conv_height_index = k.arange(0, stop=conv_dims[0])
conv_width_index = k.arange(0, stop=conv_dims[1])
conv_height_index = k.tile(conv_height_index, [conv_dims[1]])
# TODO: Repeat_elements and tf.split doesn't support dynamic splits.
# conv_width_index = K.repeat_elements(conv_width_index, conv_dims[1], axis=0)
conv_width_index = k.tile(
k.expand_dims(conv_width_index, 0), [conv_dims[0], 1])
conv_width_index = k.flatten(k.transpose(conv_width_index))
conv_index = k.transpose(k.stack([conv_height_index, conv_width_index]))
conv_index = k.reshape(conv_index, [1, conv_dims[0], conv_dims[1], 1, 2])
conv_index = k.cast(conv_index, k.dtype(feats))
conv_dims = k.cast(k.reshape(conv_dims, [1, 1, 1, 1, 2]), k.dtype(feats))
box_xy = (feats[..., :2] + conv_index) / conv_dims * 448
box_wh = feats[..., 2:4] * 448
return box_xy, box_wh
def build_yolo_loss(n_classes=20, n_boxes=2, grid_w=7, grid_h=7):
def yolo_loss(y_true, y_pred):
label_class = y_true[..., :n_classes] # ? * 7 * 7 * 20
label_box = y_true[..., n_classes:n_classes + 4] # ? * 7 * 7 * 4
response_mask = y_true[..., n_classes + 4] # ? * 7 * 7
response_mask = k.expand_dims(response_mask) # ? * 7 * 7 * 1
predict_class = y_pred[..., :n_classes] # ? * 7 * 7 * 20
predict_trust = y_pred[..., n_classes:n_classes + 2] # ? * 7 * 7 * 2
predict_box = y_pred[..., n_classes + 2:] # ? * 7 * 7 * 8
_label_box = k.reshape(label_box, [-1, grid_h, grid_w, 1, 4])
_predict_box = k.reshape(predict_box, [-1, grid_h, grid_w, n_boxes, 4])
label_xy, label_wh = yolo_head(_label_box) # ? * 7 * 7 * 1 * 2, ? * 7 * 7 * 1 * 2
label_xy = k.expand_dims(label_xy, 3) # ? * 7 * 7 * 1 * 1 * 2
label_wh = k.expand_dims(label_wh, 3) # ? * 7 * 7 * 1 * 1 * 2
label_xy_min, label_xy_max = xywh2minmax(label_xy, label_wh) # ? * 7 * 7 * 1 * 1 * 2, ? * 7 * 7 * 1 * 1 * 2
predict_xy, predict_wh = yolo_head(_predict_box) # ? * 7 * 7 * 2 * 2, ? * 7 * 7 * 2 * 2
predict_xy = k.expand_dims(predict_xy, 4) # ? * 7 * 7 * 2 * 1 * 2
predict_wh = k.expand_dims(predict_wh, 4) # ? * 7 * 7 * 2 * 1 * 2
predict_xy_min, predict_xy_max = xywh2minmax(predict_xy,
predict_wh) # ? * 7 * 7 * 2 * 1 * 2, ? * 7 * 7 * 2 * 1 * 2
iou_scores = iou(predict_xy_min, predict_xy_max, label_xy_min, label_xy_max) # ? * 7 * 7 * 2 * 1
best_ious = k.max(iou_scores, axis=4) # ? * 7 * 7 * 2
best_box = k.max(best_ious, axis=3, keepdims=True) # ? * 7 * 7 * 1
box_mask = k.cast(best_ious >= best_box, k.dtype(best_ious)) # ? * 7 * 7 * 2
no_object_loss = 0.5 * (1 - box_mask * response_mask) * k.square(0 - predict_trust)
object_loss = box_mask * response_mask * k.square(1 - predict_trust)
confidence_loss = no_object_loss + object_loss
confidence_loss = k.sum(confidence_loss)
class_loss = response_mask * k.square(label_class - predict_class)
class_loss = k.sum(class_loss)
_label_box = k.reshape(label_box, [-1, 7, 7, 1, 4])
_predict_box = k.reshape(predict_box, [-1, 7, 7, 2, 4])
label_xy, label_wh = yolo_head(_label_box) # ? * 7 * 7 * 1 * 2, ? * 7 * 7 * 1 * 2
predict_xy, predict_wh = yolo_head(_predict_box) # ? * 7 * 7 * 2 * 2, ? * 7 * 7 * 2 * 2
box_mask = k.expand_dims(box_mask)
response_mask = k.expand_dims(response_mask)
box_loss = 5 * box_mask * response_mask * k.square((label_xy - predict_xy) / 448)
box_loss += 5 * box_mask * response_mask * k.square((k.sqrt(label_wh) - k.sqrt(predict_wh)) / 448)
box_loss = k.sum(box_loss)
loss = confidence_loss + class_loss + box_loss
return loss
return yolo_loss
def decode_yolo_output(image, output, labels, n_boxes=2, box_conf_threshold=0.5):
decoded = []
grid_h, grid_w = output.shape[:2]
img_h, img_w = image.shape[:2]
for row in range(grid_h):
for col in range(grid_w):
class_matrix = output[row][col][:len(labels)]
boxes_matrix = output[row][col][len(labels):]
label_index = int(np.argmax(class_matrix))
info = {'label': labels[label_index], "label_conf": class_matrix[label_index]}
boxes = []
for b in range(n_boxes):
cen_x, cen_y, width, height, conf = boxes_matrix[b * 5: (b + 1) * 5]
if box_conf_threshold <= conf:
width *= img_w
height *= img_h
x = ((cen_x + col) / grid_w) * img_w
y = ((cen_y + row) / grid_h) * img_h
box = {"conf": conf, "xmin": x, "ymin": y, "xmax": width + x, "ymax": height + y}
boxes.append(box)
if 0 < len(box):
box = non_max_suppression(boxes, 0.3)[0]
info["xmin"] = box["xmin"]
info["ymin"] = box["ymin"]
info["xmax"] = box["xmax"]
info["ymax"] = box["ymax"]
if len(info) > 2:
decoded.append(info)
return decoded | [
"tensorflow.keras.backend.maximum",
"tensorflow.keras.backend.tile",
"tensorflow.keras.backend.max",
"tensorflow.keras.backend.sum",
"tensorflow.keras.backend.sqrt",
"tensorflow.keras.backend.transpose",
"tensorflow.keras.backend.reshape",
"tensorflow.keras.backend.arange",
"tensorflow.keras.backend.shape",
"tensorflow.keras.backend.expand_dims",
"tensorflow.keras.backend.minimum",
"tensorflow.keras.backend.stack",
"tensorflow.keras.backend.square",
"numpy.argmax",
"tensorflow.keras.backend.dtype"
] | yolo_utils.py | [(14, 'tensorflow.keras.backend.maximum', 'k.maximum', (['pred_mins', 'true_mins'], {}), True, 'from tensorflow.keras import backend as k\n'), (15, 'tensorflow.keras.backend.minimum', 'k.minimum', (['pred_maxes', 'true_maxes'], {}), True, 'from tensorflow.keras import backend as k\n'), (16, 'tensorflow.keras.backend.maximum', 'k.maximum', (['(intersect_maxes - intersect_mins)', '(0.0)'], {}), True, 'from tensorflow.keras import backend as k\n'), (34, 'tensorflow.keras.backend.arange', 'k.arange', (['(0)'], {'stop': 'conv_dims[0]'}), True, 'from tensorflow.keras import backend as k\n'), (35, 'tensorflow.keras.backend.arange', 'k.arange', (['(0)'], {'stop': 'conv_dims[1]'}), True, 'from tensorflow.keras import backend as k\n'), (36, 'tensorflow.keras.backend.tile', 'k.tile', (['conv_height_index', '[conv_dims[1]]'], {}), True, 'from tensorflow.keras import backend as k\n'), (44, 'tensorflow.keras.backend.reshape', 'k.reshape', (['conv_index', '[1, conv_dims[0], conv_dims[1], 1, 2]'], {}), True, 'from tensorflow.keras import backend as k\n'), (32, 'tensorflow.keras.backend.shape', 'k.shape', (['feats'], {}), True, 'from tensorflow.keras import backend as k\n'), (41, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['conv_width_index', '(0)'], {}), True, 'from tensorflow.keras import backend as k\n'), (42, 'tensorflow.keras.backend.transpose', 'k.transpose', (['conv_width_index'], {}), True, 'from tensorflow.keras import backend as k\n'), (43, 'tensorflow.keras.backend.stack', 'k.stack', (['[conv_height_index, conv_width_index]'], {}), True, 'from tensorflow.keras import backend as k\n'), (45, 'tensorflow.keras.backend.dtype', 'k.dtype', (['feats'], {}), True, 'from tensorflow.keras import backend as k\n'), (47, 'tensorflow.keras.backend.reshape', 'k.reshape', (['conv_dims', '[1, 1, 1, 1, 2]'], {}), True, 'from tensorflow.keras import backend as k\n'), (47, 'tensorflow.keras.backend.dtype', 'k.dtype', (['feats'], {}), True, 'from tensorflow.keras import backend as k\n'), (60, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['response_mask'], {}), True, 'from tensorflow.keras import backend as k\n'), (66, 'tensorflow.keras.backend.reshape', 'k.reshape', (['label_box', '[-1, grid_h, grid_w, 1, 4]'], {}), True, 'from tensorflow.keras import backend as k\n'), (67, 'tensorflow.keras.backend.reshape', 'k.reshape', (['predict_box', '[-1, grid_h, grid_w, n_boxes, 4]'], {}), True, 'from tensorflow.keras import backend as k\n'), (70, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['label_xy', '(3)'], {}), True, 'from tensorflow.keras import backend as k\n'), (71, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['label_wh', '(3)'], {}), True, 'from tensorflow.keras import backend as k\n'), (75, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['predict_xy', '(4)'], {}), True, 'from tensorflow.keras import backend as k\n'), (76, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['predict_wh', '(4)'], {}), True, 'from tensorflow.keras import backend as k\n'), (81, 'tensorflow.keras.backend.max', 'k.max', (['iou_scores'], {'axis': '(4)'}), True, 'from tensorflow.keras import backend as k\n'), (82, 'tensorflow.keras.backend.max', 'k.max', (['best_ious'], {'axis': '(3)', 'keepdims': '(True)'}), True, 'from tensorflow.keras import backend as k\n'), (89, 'tensorflow.keras.backend.sum', 'k.sum', (['confidence_loss'], {}), True, 'from tensorflow.keras import backend as k\n'), (92, 'tensorflow.keras.backend.sum', 'k.sum', (['class_loss'], {}), True, 'from tensorflow.keras import backend as k\n'), (94, 'tensorflow.keras.backend.reshape', 'k.reshape', (['label_box', '[-1, 7, 7, 1, 4]'], {}), True, 'from tensorflow.keras import backend as k\n'), (95, 'tensorflow.keras.backend.reshape', 'k.reshape', (['predict_box', '[-1, 7, 7, 2, 4]'], {}), True, 'from tensorflow.keras import backend as k\n'), (100, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['box_mask'], {}), True, 'from tensorflow.keras import backend as k\n'), (101, 'tensorflow.keras.backend.expand_dims', 'k.expand_dims', (['response_mask'], {}), True, 'from tensorflow.keras import backend as k\n'), (105, 'tensorflow.keras.backend.sum', 'k.sum', (['box_loss'], {}), True, 'from tensorflow.keras import backend as k\n'), (84, 'tensorflow.keras.backend.dtype', 'k.dtype', (['best_ious'], {}), True, 'from tensorflow.keras import backend as k\n'), (86, 'tensorflow.keras.backend.square', 'k.square', (['(0 - predict_trust)'], {}), True, 'from tensorflow.keras import backend as k\n'), (87, 'tensorflow.keras.backend.square', 'k.square', (['(1 - predict_trust)'], {}), True, 'from tensorflow.keras import backend as k\n'), (91, 'tensorflow.keras.backend.square', 'k.square', (['(label_class - predict_class)'], {}), True, 'from tensorflow.keras import backend as k\n'), (103, 'tensorflow.keras.backend.square', 'k.square', (['((label_xy - predict_xy) / 448)'], {}), True, 'from tensorflow.keras import backend as k\n'), (126, 'numpy.argmax', 'np.argmax', (['class_matrix'], {}), True, 'import numpy as np\n'), (144, 'image_utils.non_max_suppression', 'non_max_suppression', (['boxes', '(0.3)'], {}), False, 'from image_utils import non_max_suppression\n'), (104, 'tensorflow.keras.backend.sqrt', 'k.sqrt', (['label_wh'], {}), True, 'from tensorflow.keras import backend as k\n'), (104, 'tensorflow.keras.backend.sqrt', 'k.sqrt', (['predict_wh'], {}), True, 'from tensorflow.keras import backend as k\n')] |
CodeProcessor/DeepLab-Training-Pipeline | e6ae556c252703817142828ba28ea51e8cce60e7 | # -*- coding: utf-8 -*-
""" Deeplabv3+ model for Keras.
This model is based on TF repo:
https://github.com/tensorflow/models/tree/master/research/deeplab
On Pascal VOC, original model gets to 84.56% mIOU
MobileNetv2 backbone is based on this repo:
https://github.com/JonathanCMitchell/mobilenet_v2_keras
# Reference
- [Encoder-Decoder with Atrous Separable Convolution
for Semantic Image Segmentation](https://arxiv.org/pdf/1802.02611.pdf)
- [Xception: Deep Learning with Depthwise Separable Convolutions]
(https://arxiv.org/abs/1610.02357)
- [Inverted Residuals and Linear Bottlenecks: Mobile Networks for
Classification, Detection and Segmentation](https://arxiv.org/abs/1801.04381)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras.applications.imagenet_utils import preprocess_input
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Add
from tensorflow.keras.layers import BatchNormalization
from tensorflow.keras.layers import Concatenate
from tensorflow.keras.layers import Conv2D
from tensorflow.keras.layers import DepthwiseConv2D
from tensorflow.keras.layers import Dropout
from tensorflow.keras.layers import GlobalAveragePooling2D
from tensorflow.keras.layers import Input
from tensorflow.keras.layers import Reshape
from tensorflow.keras.layers import ZeroPadding2D
from tensorflow.keras.models import Model
from tensorflow.python.keras.utils.data_utils import get_file
from tensorflow.python.keras.utils.layer_utils import get_source_inputs
WEIGHTS_PATH_X = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_MOBILE = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5"
WEIGHTS_PATH_X_CS = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5"
WEIGHTS_PATH_MOBILE_CS = "https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5"
def SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):
""" SepConv with BN between depthwise & pointwise. Optionally add activation after BN
Implements right "same" padding for even kernel sizes
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & poinwise convs
epsilon: epsilon to use in BN layer
"""
if stride == 1:
depth_padding = 'same'
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
depth_padding = 'valid'
if not depth_activation:
x = Activation(tf.nn.relu)(x)
x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),
padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)
x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(tf.nn.relu)(x)
x = Conv2D(filters, (1, 1), padding='same',
use_bias=False, name=prefix + '_pointwise')(x)
x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)
if depth_activation:
x = Activation(tf.nn.relu)(x)
return x
def _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):
"""Implements right 'same' padding for even kernel sizes
Without this there is a 1 pixel drift when stride = 2
Args:
x: input tensor
filters: num of filters in pointwise convolution
prefix: prefix before name
stride: stride at depthwise conv
kernel_size: kernel size for depthwise convolution
rate: atrous rate for depthwise convolution
"""
if stride == 1:
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='same', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
else:
kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)
pad_total = kernel_size_effective - 1
pad_beg = pad_total // 2
pad_end = pad_total - pad_beg
x = ZeroPadding2D((pad_beg, pad_end))(x)
return Conv2D(filters,
(kernel_size, kernel_size),
strides=(stride, stride),
padding='valid', use_bias=False,
dilation_rate=(rate, rate),
name=prefix)(x)
def _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,
rate=1, depth_activation=False, return_skip=False):
""" Basic building block of modified Xception network
Args:
inputs: input tensor
depth_list: number of filters in each SepConv layer. len(depth_list) == 3
prefix: prefix before name
skip_connection_type: one of {'conv','sum','none'}
stride: stride at last depthwise conv
rate: atrous rate for depthwise convolution
depth_activation: flag to use activation between depthwise & pointwise convs
return_skip: flag to return additional tensor after 2 SepConvs for decoder
"""
residual = inputs
for i in range(3):
residual = SepConv_BN(residual,
depth_list[i],
prefix + '_separable_conv{}'.format(i + 1),
stride=stride if i == 2 else 1,
rate=rate,
depth_activation=depth_activation)
if i == 1:
skip = residual
if skip_connection_type == 'conv':
shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',
kernel_size=1,
stride=stride)
shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)
outputs = layers.add([residual, shortcut])
elif skip_connection_type == 'sum':
outputs = layers.add([residual, inputs])
elif skip_connection_type == 'none':
outputs = residual
if return_skip:
return outputs, skip
else:
return outputs
def _make_divisible(v, divisor, min_value=None):
if min_value is None:
min_value = divisor
new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
# Make sure that round down does not go down by more than 10%.
if new_v < 0.9 * v:
new_v += divisor
return new_v
def _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):
in_channels = inputs.shape[-1] # .value # inputs._keras_shape[-1]
pointwise_conv_filters = int(filters * alpha)
pointwise_filters = _make_divisible(pointwise_conv_filters, 8)
x = inputs
prefix = 'expanded_conv_{}_'.format(block_id)
if block_id:
# Expand
x = Conv2D(expansion * in_channels, kernel_size=1, padding='same',
use_bias=False, activation=None,
name=prefix + 'expand')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'expand_BN')(x)
x = Activation(tf.nn.relu6, name=prefix + 'expand_relu')(x)
else:
prefix = 'expanded_conv_'
# Depthwise
x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,
use_bias=False, padding='same', dilation_rate=(rate, rate),
name=prefix + 'depthwise')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'depthwise_BN')(x)
x = Activation(tf.nn.relu6, name=prefix + 'depthwise_relu')(x)
# Project
x = Conv2D(pointwise_filters,
kernel_size=1, padding='same', use_bias=False, activation=None,
name=prefix + 'project')(x)
x = BatchNormalization(epsilon=1e-3, momentum=0.999,
name=prefix + 'project_BN')(x)
if skip_connection:
return Add(name=prefix + 'add')([inputs, x])
# if in_channels == pointwise_filters and stride == 1:
# return Add(name='res_connect_' + str(block_id))([inputs, x])
return x
def Deeplabv3(weights='pascal_voc', input_tensor=None, input_shape=(512, 512, 3), classes=21, backbone='mobilenetv2',
OS=16, alpha=1., activation=None):
""" Instantiates the Deeplabv3+ architecture
Optionally loads weights pre-trained
on PASCAL VOC or Cityscapes. This model is available for TensorFlow only.
# Arguments
weights: one of 'pascal_voc' (pre-trained on pascal voc),
'cityscapes' (pre-trained on cityscape) or None (random initialization)
input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)
to use as image input for the model.
input_shape: shape of input image. format HxWxC
PASCAL VOC model was trained on (512,512,3) images. None is allowed as shape/width
classes: number of desired classes. PASCAL VOC has 21 classes, Cityscapes has 19 classes.
If number of classes not aligned with the weights used, last layer is initialized randomly
backbone: backbone to use. one of {'xception','mobilenetv2'}
activation: optional activation to add to the top of the network.
One of 'softmax', 'sigmoid' or None
OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.
Used only for xception backbone.
alpha: controls the width of the MobileNetV2 network. This is known as the
width multiplier in the MobileNetV2 paper.
- If `alpha` < 1.0, proportionally decreases the number
of filters in each layer.
- If `alpha` > 1.0, proportionally increases the number
of filters in each layer.
- If `alpha` = 1, default number of filters from the paper
are used at each layer.
Used only for mobilenetv2 backbone. Pretrained is only available for alpha=1.
# Returns
A Keras model instance.
# Raises
RuntimeError: If attempting to run this model with a
backend that does not support separable convolutions.
ValueError: in case of invalid argument for `weights` or `backbone`
"""
if not (weights in {'pascal_voc', 'cityscapes', None}):
raise ValueError('The `weights` argument should be either '
'`None` (random initialization), `pascal_voc`, or `cityscapes` '
'(pre-trained on PASCAL VOC)')
if not (backbone in {'xception', 'mobilenetv2'}):
raise ValueError('The `backbone` argument should be either '
'`xception` or `mobilenetv2` ')
if input_tensor is None:
img_input = Input(shape=input_shape)
else:
img_input = input_tensor
if backbone == 'xception':
if OS == 8:
entry_block3_stride = 1
middle_block_rate = 2 # ! Not mentioned in paper, but required
exit_block_rates = (2, 4)
atrous_rates = (12, 24, 36)
else:
entry_block3_stride = 2
middle_block_rate = 1
exit_block_rates = (1, 2)
atrous_rates = (6, 12, 18)
x = Conv2D(32, (3, 3), strides=(2, 2),
name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)
x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)
x = Activation(tf.nn.relu)(x)
x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)
x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)
x = Activation(tf.nn.relu)(x)
x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',
skip_connection_type='conv', stride=2,
depth_activation=False)
x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',
skip_connection_type='conv', stride=2,
depth_activation=False, return_skip=True)
x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',
skip_connection_type='conv', stride=entry_block3_stride,
depth_activation=False)
for i in range(16):
x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),
skip_connection_type='sum', stride=1, rate=middle_block_rate,
depth_activation=False)
x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',
skip_connection_type='conv', stride=1, rate=exit_block_rates[0],
depth_activation=False)
x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',
skip_connection_type='none', stride=1, rate=exit_block_rates[1],
depth_activation=True)
else:
OS = 8
first_block_filters = _make_divisible(32 * alpha, 8)
x = Conv2D(first_block_filters,
kernel_size=3,
strides=(2, 2), padding='same', use_bias=False,
name='Conv' if input_shape[2] == 3 else 'Conv_')(img_input)
x = BatchNormalization(
epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)
x = Activation(tf.nn.relu6, name='Conv_Relu6')(x)
x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,
expansion=1, block_id=0, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,
expansion=6, block_id=1, skip_connection=False)
x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,
expansion=6, block_id=2, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,
expansion=6, block_id=3, skip_connection=False)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=4, skip_connection=True)
x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,
expansion=6, block_id=5, skip_connection=True)
# stride in block 6 changed from 2 -> 1, so we need to use rate = 2
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!
expansion=6, block_id=6, skip_connection=False)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=7, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=8, skip_connection=True)
x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=9, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=10, skip_connection=False)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=11, skip_connection=True)
x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,
expansion=6, block_id=12, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!
expansion=6, block_id=13, skip_connection=False)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=14, skip_connection=True)
x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=15, skip_connection=True)
x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,
expansion=6, block_id=16, skip_connection=False)
# end of feature extractor
# branching for Atrous Spatial Pyramid Pooling
# Image Feature branch
shape_before = tf.shape(x)
b4 = GlobalAveragePooling2D()(x)
b4_shape = tf.keras.backend.int_shape(b4)
# from (b_size, channels)->(b_size, 1, 1, channels)
b4 = Reshape((1, 1, b4_shape[1]))(b4)
b4 = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='image_pooling')(b4)
b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)
b4 = Activation(tf.nn.relu)(b4)
# upsample. have to use compat because of the option align_corners
size_before = tf.keras.backend.int_shape(x)
b4 = tf.keras.layers.experimental.preprocessing.Resizing(
*size_before[1:3], interpolation="bilinear"
)(b4)
# simple 1x1
b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)
b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)
b0 = Activation(tf.nn.relu, name='aspp0_activation')(b0)
# there are only 2 branches in mobilenetV2. not sure why
if backbone == 'xception':
# rate = 6 (12)
b1 = SepConv_BN(x, 256, 'aspp1',
rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)
# rate = 12 (24)
b2 = SepConv_BN(x, 256, 'aspp2',
rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)
# rate = 18 (36)
b3 = SepConv_BN(x, 256, 'aspp3',
rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)
# concatenate ASPP branches & project
x = Concatenate()([b4, b0, b1, b2, b3])
else:
x = Concatenate()([b4, b0])
x = Conv2D(256, (1, 1), padding='same',
use_bias=False, name='concat_projection')(x)
x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)
x = Activation(tf.nn.relu)(x)
x = Dropout(0.1)(x)
# DeepLab v.3+ decoder
if backbone == 'xception':
# Feature projection
# x4 (x2) block
skip_size = tf.keras.backend.int_shape(skip1)
x = tf.keras.layers.experimental.preprocessing.Resizing(
*skip_size[1:3], interpolation="bilinear"
)(x)
dec_skip1 = Conv2D(48, (1, 1), padding='same',
use_bias=False, name='feature_projection0')(skip1)
dec_skip1 = BatchNormalization(
name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)
dec_skip1 = Activation(tf.nn.relu)(dec_skip1)
x = Concatenate()([x, dec_skip1])
x = SepConv_BN(x, 256, 'decoder_conv0',
depth_activation=True, epsilon=1e-5)
x = SepConv_BN(x, 256, 'decoder_conv1',
depth_activation=True, epsilon=1e-5)
# you can use it with arbitary number of classes
if (weights == 'pascal_voc' and classes == 21) or (weights == 'cityscapes' and classes == 19):
last_layer_name = 'logits_semantic'
else:
last_layer_name = 'custom_logits_semantic'
x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name)(x)
size_before3 = tf.keras.backend.int_shape(img_input)
x = tf.keras.layers.experimental.preprocessing.Resizing(
*size_before3[1:3], interpolation="bilinear"
)(x)
# Ensure that the model takes into account
# any potential predecessors of `input_tensor`.
if input_tensor is not None:
inputs = get_source_inputs(input_tensor)
else:
inputs = img_input
if activation in {'softmax', 'sigmoid'}:
x = tf.keras.layers.Activation(activation)(x)
model = Model(inputs, x, name='deeplabv3plus')
# load weights
if weights == 'pascal_voc':
if backbone == 'xception':
weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_X,
cache_subdir='models')
else:
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',
WEIGHTS_PATH_MOBILE,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
elif weights == 'cityscapes':
if backbone == 'xception':
weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5',
WEIGHTS_PATH_X_CS,
cache_subdir='models')
else:
weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5',
WEIGHTS_PATH_MOBILE_CS,
cache_subdir='models')
model.load_weights(weights_path, by_name=True)
return model
def preprocess_input(x):
"""Preprocesses a numpy array encoding a batch of images.
# Arguments
x: a 4D numpy array consists of RGB values within [0, 255].
# Returns
Input array scaled to [-1.,1.]
"""
return preprocess_input(x, mode='tf')
| [
"tensorflow.keras.layers.ZeroPadding2D",
"tensorflow.keras.layers.Concatenate",
"tensorflow.keras.backend.int_shape",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.Add",
"tensorflow.python.keras.utils.data_utils.get_file",
"tensorflow.keras.layers.DepthwiseConv2D",
"tensorflow.keras.models.Model",
"tensorflow.shape",
"tensorflow.python.keras.utils.layer_utils.get_source_inputs",
"tensorflow.keras.layers.add",
"tensorflow.keras.layers.Reshape",
"tensorflow.keras.applications.imagenet_utils.preprocess_input",
"tensorflow.keras.layers.Activation",
"tensorflow.keras.layers.GlobalAveragePooling2D",
"tensorflow.keras.layers.BatchNormalization",
"tensorflow.keras.layers.Dropout",
"tensorflow.keras.layers.experimental.preprocessing.Resizing",
"tensorflow.keras.layers.Input"
] | deeplab/modelv2.py | [(360, 'tensorflow.shape', 'tf.shape', (['x'], {}), True, 'import tensorflow as tf\n'), (362, 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['b4'], {}), True, 'import tensorflow as tf\n'), (370, 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['x'], {}), True, 'import tensorflow as tf\n'), (428, 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['img_input'], {}), True, 'import tensorflow as tf\n'), (442, 'tensorflow.keras.models.Model', 'Model', (['inputs', 'x'], {'name': '"""deeplabv3plus"""'}), False, 'from tensorflow.keras.models import Model\n'), (476, 'tensorflow.keras.applications.imagenet_utils.preprocess_input', 'preprocess_input', (['x'], {'mode': '"""tf"""'}), False, 'from tensorflow.keras.applications.imagenet_utils import preprocess_input\n'), (72, 'tensorflow.keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', (['(kernel_size, kernel_size)'], {'strides': '(stride, stride)', 'dilation_rate': '(rate, rate)', 'padding': 'depth_padding', 'use_bias': '(False)', 'name': "(prefix + '_depthwise')"}), False, 'from tensorflow.keras.layers import DepthwiseConv2D\n'), (74, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(prefix + '_depthwise_BN')", 'epsilon': 'epsilon'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (77, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': "(prefix + '_pointwise')"}), False, 'from tensorflow.keras.layers import Conv2D\n'), (79, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(prefix + '_pointwise_BN')", 'epsilon': 'epsilon'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (146, 'tensorflow.keras.layers.add', 'layers.add', (['[residual, shortcut]'], {}), False, 'from tensorflow.keras import layers\n'), (185, 'tensorflow.keras.layers.DepthwiseConv2D', 'DepthwiseConv2D', ([], {'kernel_size': '(3)', 'strides': 'stride', 'activation': 'None', 'use_bias': '(False)', 'padding': '"""same"""', 'dilation_rate': '(rate, rate)', 'name': "(prefix + 'depthwise')"}), False, 'from tensorflow.keras.layers import DepthwiseConv2D\n'), (188, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': "(prefix + 'depthwise_BN')"}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (191, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu6'], {'name': "(prefix + 'depthwise_relu')"}), False, 'from tensorflow.keras.layers import Activation\n'), (194, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['pointwise_filters'], {'kernel_size': '(1)', 'padding': '"""same"""', 'use_bias': '(False)', 'activation': 'None', 'name': "(prefix + 'project')"}), False, 'from tensorflow.keras.layers import Conv2D\n'), (197, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': "(prefix + 'project_BN')"}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (255, 'tensorflow.keras.layers.Input', 'Input', ([], {'shape': 'input_shape'}), False, 'from tensorflow.keras.layers import Input\n'), (361, 'tensorflow.keras.layers.GlobalAveragePooling2D', 'GlobalAveragePooling2D', ([], {}), False, 'from tensorflow.keras.layers import GlobalAveragePooling2D\n'), (364, 'tensorflow.keras.layers.Reshape', 'Reshape', (['(1, 1, b4_shape[1])'], {}), False, 'from tensorflow.keras.layers import Reshape\n'), (365, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""image_pooling"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (367, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""image_pooling_BN"""', 'epsilon': '(1e-05)'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (368, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (371, 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*size_before[1:3]'], {'interpolation': '"""bilinear"""'}), True, 'import tensorflow as tf\n'), (375, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""aspp0"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (376, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""aspp0_BN"""', 'epsilon': '(1e-05)'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (377, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {'name': '"""aspp0_activation"""'}), False, 'from tensorflow.keras.layers import Activation\n'), (396, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(256)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""concat_projection"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (398, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""concat_projection_BN"""', 'epsilon': '(1e-05)'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (399, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (400, 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.1)'], {}), False, 'from tensorflow.keras.layers import Dropout\n'), (406, 'tensorflow.keras.backend.int_shape', 'tf.keras.backend.int_shape', (['skip1'], {}), True, 'import tensorflow as tf\n'), (427, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['classes', '(1, 1)'], {'padding': '"""same"""', 'name': 'last_layer_name'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (429, 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*size_before3[1:3]'], {'interpolation': '"""bilinear"""'}), True, 'import tensorflow as tf\n'), (435, 'tensorflow.python.keras.utils.layer_utils.get_source_inputs', 'get_source_inputs', (['input_tensor'], {}), False, 'from tensorflow.python.keras.utils.layer_utils import get_source_inputs\n'), (67, 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(pad_beg, pad_end)'], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D\n'), (71, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (76, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (81, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (98, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(kernel_size, kernel_size)'], {'strides': '(stride, stride)', 'padding': '"""same"""', 'use_bias': '(False)', 'dilation_rate': '(rate, rate)', 'name': 'prefix'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (109, 'tensorflow.keras.layers.ZeroPadding2D', 'ZeroPadding2D', (['(pad_beg, pad_end)'], {}), False, 'from tensorflow.keras.layers import ZeroPadding2D\n'), (110, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['filters', '(kernel_size, kernel_size)'], {'strides': '(stride, stride)', 'padding': '"""valid"""', 'use_bias': '(False)', 'dilation_rate': '(rate, rate)', 'name': 'prefix'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (145, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': "(prefix + '_shortcut_BN')"}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (148, 'tensorflow.keras.layers.add', 'layers.add', (['[residual, inputs]'], {}), False, 'from tensorflow.keras import layers\n'), (176, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(expansion * in_channels)'], {'kernel_size': '(1)', 'padding': '"""same"""', 'use_bias': '(False)', 'activation': 'None', 'name': "(prefix + 'expand')"}), False, 'from tensorflow.keras.layers import Conv2D\n'), (179, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': "(prefix + 'expand_BN')"}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (181, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu6'], {'name': "(prefix + 'expand_relu')"}), False, 'from tensorflow.keras.layers import Activation\n'), (201, 'tensorflow.keras.layers.Add', 'Add', ([], {'name': "(prefix + 'add')"}), False, 'from tensorflow.keras.layers import Add\n'), (271, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'strides': '(2, 2)', 'name': '"""entry_flow_conv1_1"""', 'use_bias': '(False)', 'padding': '"""same"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (273, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""entry_flow_conv1_1_BN"""'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (274, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (277, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""entry_flow_conv1_2_BN"""'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (278, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (305, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['first_block_filters'], {'kernel_size': '(3)', 'strides': '(2, 2)', 'padding': '"""same"""', 'use_bias': '(False)', 'name': "('Conv' if input_shape[2] == 3 else 'Conv_')"}), False, 'from tensorflow.keras.layers import Conv2D\n'), (309, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'epsilon': '(0.001)', 'momentum': '(0.999)', 'name': '"""Conv_BN"""'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (311, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu6'], {'name': '"""Conv_Relu6"""'}), False, 'from tensorflow.keras.layers import Activation\n'), (392, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Concatenate\n'), (394, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Concatenate\n'), (407, 'tensorflow.keras.layers.experimental.preprocessing.Resizing', 'tf.keras.layers.experimental.preprocessing.Resizing', (['*skip_size[1:3]'], {'interpolation': '"""bilinear"""'}), True, 'import tensorflow as tf\n'), (410, 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(48)', '(1, 1)'], {'padding': '"""same"""', 'use_bias': '(False)', 'name': '"""feature_projection0"""'}), False, 'from tensorflow.keras.layers import Conv2D\n'), (412, 'tensorflow.keras.layers.BatchNormalization', 'BatchNormalization', ([], {'name': '"""feature_projection0_BN"""', 'epsilon': '(1e-05)'}), False, 'from tensorflow.keras.layers import BatchNormalization\n'), (414, 'tensorflow.keras.layers.Activation', 'Activation', (['tf.nn.relu'], {}), False, 'from tensorflow.keras.layers import Activation\n'), (415, 'tensorflow.keras.layers.Concatenate', 'Concatenate', ([], {}), False, 'from tensorflow.keras.layers import Concatenate\n'), (440, 'tensorflow.keras.layers.Activation', 'tf.keras.layers.Activation', (['activation'], {}), True, 'import tensorflow as tf\n'), (448, 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['"""deeplabv3_xception_tf_dim_ordering_tf_kernels.h5"""', 'WEIGHTS_PATH_X'], {'cache_subdir': '"""models"""'}), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n'), (452, 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['"""deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5"""', 'WEIGHTS_PATH_MOBILE'], {'cache_subdir': '"""models"""'}), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n'), (458, 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['"""deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5"""', 'WEIGHTS_PATH_X_CS'], {'cache_subdir': '"""models"""'}), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n'), (462, 'tensorflow.python.keras.utils.data_utils.get_file', 'get_file', (['"""deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5"""', 'WEIGHTS_PATH_MOBILE_CS'], {'cache_subdir': '"""models"""'}), False, 'from tensorflow.python.keras.utils.data_utils import get_file\n')] |
youchangxin/DeepLabV3Plus | 2ee1495b7140a4dc3494e9d4b3557640e380d7b6 | # -*- coding: utf-8 -*-
import os
import tensorflow as tf
import shutil
from deeplabv3plus import model
from dataset import Dataset
from config import cfg
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
log = cfg.TRAIN.LOGDIR
EPOCHS = cfg.TRAIN.EPOCHS
save_every_n_epoch = cfg.TRAIN.SAVE_EPOCH
if os.path.exists(log): shutil.rmtree(log)
if __name__ == '__main__':
# GPU settings
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
TrainSet = Dataset('train')
model = model(depthwise=True, backbone='mobilenetv2')
if os.listdir('./saved_weights'):
latest_weight = tf.train.latest_checkpoint('./saved_weights')
# latest_weight = r"./saved_model/epoch-14"
model.load_weights(latest_weight)
# define loss and optimizer
loss_object = tf.keras.losses.SparseCategoricalCrossentropy()
optimizer = tf.keras.optimizers.Adam(lr=1e-5)
train_loss = tf.keras.metrics.Mean(name='train_loss')
train_accuracy = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')
miou = tf.keras.metrics.MeanIoU(num_classes=21, name='miou')
summary_writer = tf.summary.create_file_writer(logdir='tensorboard') # 实例化记录器
tf.summary.trace_on(profiler=True)
# @tf.function
def train_step(image_batch, label_batch):
with tf.GradientTape() as tape:
predictions = model(image_batch)
loss = loss_object(y_true=label_batch, y_pre=predictions)
gradients = tape.gradient(loss, model.trainable_variables)
optimizer.apply_gradients(grads_and_vars=zip(gradients, model.trainable_variables))
train_loss.update_state(values=loss)
train_accuracy.update_state(y_true=label_batch, y_pred=predictions)
miou.update_state(y_true=label_batch, y_pred=tf.argmax(predictions, axis=-1))
# start training
step = 0
for epoch in range(EPOCHS):
for img, labels in TrainSet:
train_step(img, labels)
print("Epoch: {}/{}, step:{}, loss: {:.5f}, accuracy: {:.5f}, miou: {:.5f}".format(epoch + 1,
EPOCHS,
step,
train_loss.result().numpy(),
train_accuracy.result().numpy(),
miou.result().numpy()))
with summary_writer.as_default():
tf.summary.scalar("loss", train_loss.result().numpy(), step=step)
step += 1
if (epoch+1) % save_every_n_epoch == 0:
model.save_weights(filepath='./saved_model' + "/epoch-{}".format(epoch), save_format='tf')
tf.saved_model.save(model, 'FCN8s.h5')
'''
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-6)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=[tf.keras.metrics.CategoricalAccuracy(name='accuracy'),
tf.keras.metrics.MeanIoU(num_classes=21, name="meanIoU")],
experimental_run_tf_function=False
)
model.fit_generator(TrainSet, steps_per_epoch=416, epochs=10)
model.save_weights('./deeplabv3plus')
'''
| [
"tensorflow.summary.trace_on",
"tensorflow.train.latest_checkpoint",
"tensorflow.config.experimental.set_memory_growth",
"tensorflow.keras.losses.SparseCategoricalCrossentropy",
"tensorflow.config.experimental.list_physical_devices",
"tensorflow.summary.create_file_writer",
"tensorflow.saved_model.save",
"tensorflow.GradientTape",
"tensorflow.keras.optimizers.Adam",
"tensorflow.keras.metrics.SparseCategoricalAccuracy",
"tensorflow.argmax",
"tensorflow.keras.metrics.Mean",
"tensorflow.keras.metrics.MeanIoU"
] | train.py | [(16, 'os.path.exists', 'os.path.exists', (['log'], {}), False, 'import os\n'), (16, 'shutil.rmtree', 'shutil.rmtree', (['log'], {}), False, 'import shutil\n'), (20, 'tensorflow.config.experimental.list_physical_devices', 'tf.config.experimental.list_physical_devices', (['"""GPU"""'], {}), True, 'import tensorflow as tf\n'), (25, 'dataset.Dataset', 'Dataset', (['"""train"""'], {}), False, 'from dataset import Dataset\n'), (28, 'os.listdir', 'os.listdir', (['"""./saved_weights"""'], {}), False, 'import os\n'), (34, 'tensorflow.keras.losses.SparseCategoricalCrossentropy', 'tf.keras.losses.SparseCategoricalCrossentropy', ([], {}), True, 'import tensorflow as tf\n'), (35, 'tensorflow.keras.optimizers.Adam', 'tf.keras.optimizers.Adam', ([], {'lr': '(1e-05)'}), True, 'import tensorflow as tf\n'), (37, 'tensorflow.keras.metrics.Mean', 'tf.keras.metrics.Mean', ([], {'name': '"""train_loss"""'}), True, 'import tensorflow as tf\n'), (38, 'tensorflow.keras.metrics.SparseCategoricalAccuracy', 'tf.keras.metrics.SparseCategoricalAccuracy', ([], {'name': '"""train_accuracy"""'}), True, 'import tensorflow as tf\n'), (39, 'tensorflow.keras.metrics.MeanIoU', 'tf.keras.metrics.MeanIoU', ([], {'num_classes': '(21)', 'name': '"""miou"""'}), True, 'import tensorflow as tf\n'), (41, 'tensorflow.summary.create_file_writer', 'tf.summary.create_file_writer', ([], {'logdir': '"""tensorboard"""'}), True, 'import tensorflow as tf\n'), (42, 'tensorflow.summary.trace_on', 'tf.summary.trace_on', ([], {'profiler': '(True)'}), True, 'import tensorflow as tf\n'), (74, 'tensorflow.saved_model.save', 'tf.saved_model.save', (['model', '"""FCN8s.h5"""'], {}), True, 'import tensorflow as tf\n'), (29, 'tensorflow.train.latest_checkpoint', 'tf.train.latest_checkpoint', (['"""./saved_weights"""'], {}), True, 'import tensorflow as tf\n'), (23, 'tensorflow.config.experimental.set_memory_growth', 'tf.config.experimental.set_memory_growth', (['gpu', '(True)'], {}), True, 'import tensorflow as tf\n'), (46, 'tensorflow.GradientTape', 'tf.GradientTape', ([], {}), True, 'import tensorflow as tf\n'), (54, 'tensorflow.argmax', 'tf.argmax', (['predictions'], {'axis': '(-1)'}), True, 'import tensorflow as tf\n')] |