seed
stringlengths 59
2.16k
| seed_api
stringlengths 14
101
| index
int64 0
523
|
---|---|---|
from tensorflow.python.ops import array_ops
def _create_definition_if_needed(self):
"""Creates the function definition if it's not created yet."""
if self._definition is not None:
return
# Create the func_def object.
temp_graph = _FuncGraph()
with temp_graph.as_default():
# List of placeholders for the function_def.
inputs = []
for (argname, argtype) in self._args:
argholder = array_ops.placeholder(argtype, name=argname)
inputs.append(argholder)
# Call func and gather the output tensors.
with vs.variable_scope("", custom_getter=temp_graph.getvar):
outputs = self._func(*inputs)
# If func only returned one value, make it a tuple.
if not isinstance(outputs, (list, tuple)):
outputs = (outputs,)
if any([_ is None for _ in outputs]):
raise ValueError("Function can not return None.")
# Ensures each output is a Tensor.
outputs = [ops.convert_to_tensor(_) for _ in outputs]
| tensorflow.python.ops.array_ops.placeholder | 500 |
import tensorflow as tf
# Nearest Neighbor calculation using L1 Distance
# Calculate L1 Distance
distance = tf.reduce_sum(tf.abs(tf.add(xtr, tf.negative(xte))),
reduction_indices=1)
| tensorflow.negative | 501 |
from tensorflow.python.summary import summary
if isinstance(features, dict):
return features
return {"": features}
def _get_optimizer(optimizer):
if callable(optimizer):
return optimizer()
else:
return optimizer
def _add_hidden_layer_summary(value, tag):
summary.scalar("%s_fraction_of_zero_values" % tag, nn.zero_fraction(value))
summary.histogram("%s_activation" % tag, value)
def _dnn_model_fn(features, labels, mode, params, config=None):
"""Deep Neural Net model_fn.
Args:
features: `Tensor` or dict of `Tensor` (depends on data passed to `fit`).
labels: `Tensor` of shape [batch_size, 1] or [batch_size] labels of
dtype `int32` or `int64` in the range `[0, n_classes)`.
mode: Defines whether this is training, evaluation or prediction.
See `ModeKeys`.
params: A dict of hyperparameters.
The following hyperparameters are expected:
| tensorflow.python.summary.summary.histogram | 502 |
from tensorflow.contrib.layers.python.layers import feature_column
# The given hash_bucket_size results in variables larger than the
# default min_slice_size attribute, so the variables are partitioned.
sparse_feature = feature_column.sparse_column_with_hash_bucket(
'language', hash_bucket_size=2e7)
embedding_feature = feature_column.embedding_column(
sparse_feature, dimension=1)
tf_config = {
| tensorflow.contrib.layers.python.layers.feature_column.embedding_column | 503 |
from tensorflow.contrib.learn.python.learn.datasets import base
train_path = os.path.join(data_dir, 'dbpedia_csv', 'train.csv')
test_path = os.path.join(data_dir, 'dbpedia_csv', 'test.csv')
if size == 'small':
# Reduce the size of original data by a factor of 1000.
base.shrink_csv(train_path, 1000)
base.shrink_csv(test_path, 1000)
train_path = train_path.replace('train.csv', 'train_small.csv')
test_path = test_path.replace('test.csv', 'test_small.csv')
else:
module_path = os.path.dirname(__file__)
| tensorflow.contrib.learn.python.learn.datasets.base.shrink_csv | 504 |
import tensorflow as tf
assert self.mean is not None
assert self.mean_sq is not None
out = tf.nn.relu(self._normalize(x, self.mean, self.mean_sq, "reference"))
self.reference_output = out
def __call__(self, x, update=False):
with tf.variable_scope(self.name) as scope:
if not update:
new_coeff = 1. / (self.batch_size + 1.)
old_coeff = 1. - new_coeff
new_mean = tf.reduce_mean(x, [1, 2], keep_dims=True)
new_mean_sq = tf.reduce_mean(tf.square(x), [1, 2], keep_dims=True)
| tensorflow.variable_scope | 505 |
from tensorflow.python.ops import math_ops
@distribution_util.AppendDocstring(_poisson_sample_note)
def _log_cdf(self, x):
return math_ops.log(self.cdf(x))
@distribution_util.AppendDocstring(_poisson_sample_note)
def _cdf(self, x):
x = self._assert_valid_sample(x, check_integer=False)
return math_ops.igammac(math_ops.floor(x + 1), self.rate)
def _log_normalization(self):
return self.rate
def _log_unnormalized_prob(self, x):
x = self._assert_valid_sample(x, check_integer=True)
return x * math_ops.log(self.rate) - math_ops.lgamma(x + 1)
| tensorflow.python.ops.math_ops.floor | 506 |
import tensorflow.contrib.graph_editor as ge
l = [(op.name if hasattr(op, "name") else str(op)) for op in ops]
if sort_outputs:
return sorted(l)
return l
else:
return ops.name if hasattr(ops, "name") else str(ops)
def my_add_control_inputs(wait_to_do_ops, inputs_to_do_before):
for op in wait_to_do_ops:
ci = [i for i in inputs_to_do_before if op.control_inputs is None or i not in op.control_inputs]
ge.add_control_inputs(op, ci)
| tensorflow.contrib.graph_editor.add_control_inputs | 507 |
from tensorflow.contrib import layers
def _dnn_logits(self, features):
net = layers.input_from_feature_columns(
features,
self._get_dnn_feature_columns(),
weight_collections=[self._dnn_weight_collection])
for layer_id, num_hidden_units in enumerate(self._dnn_hidden_units):
net = layers.legacy_fully_connected(
net,
num_hidden_units,
activation_fn=self._dnn_activation_fn,
weight_collections=[self._dnn_weight_collection],
bias_collections=[self._dnn_weight_collection],
name="hiddenlayer_%d" % layer_id)
| tensorflow.contrib.layers.legacy_fully_connected | 508 |
from tensorflow.contrib.rnn.python.ops import lstm_ops
test_configs = self._GetTestConfig()
for config_name, config in test_configs.items():
num_layers = config["num_layers"]
num_units = config["num_units"]
batch_size = config["batch_size"]
seq_length = config["seq_length"]
with ops.Graph().as_default(), ops.device("/device:GPU:0"):
inputs = seq_length * [
array_ops.zeros([batch_size, num_units], dtypes.float32)
]
cell = lambda: lstm_ops.LSTMBlockCell(num_units=num_units) # pylint: disable=cell-var-from-loop
multi_cell = rnn_cell.MultiRNNCell(
[cell() for _ in range(num_layers)])
outputs, final_state = core_rnn.static_rnn(
multi_cell, inputs, dtype=dtypes.float32)
trainable_variables = ops.get_collection(
ops.GraphKeys.TRAINABLE_VARIABLES)
gradients = gradients_impl.gradients([outputs, final_state],
trainable_variables)
training_op = control_flow_ops.group(*gradients)
| tensorflow.contrib.rnn.python.ops.lstm_ops.LSTMBlockCell | 509 |
import tensorflow as tf
Args:
shape_a: a list containing shape of the first tensor.
shape_b: a list containing shape of the second tensor.
Returns:
Either a tf.no_op() when shapes are all static and a tf.assert_equal() op
when the shapes are dynamic.
Raises:
ValueError: When shapes are both static and unequal.
"""
if (all(isinstance(dim, int) for dim in shape_a) and
all(isinstance(dim, int) for dim in shape_b)):
if shape_a != shape_b:
raise ValueError('Unequal shapes {}, {}'.format(shape_a, shape_b))
else: return tf.no_op()
else:
return tf.assert_equal(shape_a, shape_b)
def assert_shape_equal_along_first_dimension(shape_a, shape_b):
"""Asserts that shape_a and shape_b are the same along the 0th-dimension.
If the shapes are static, raises a ValueError when the shapes
mismatch.
If the shapes are dynamic, raises a tf InvalidArgumentError when the shapes
mismatch.
Args:
| tensorflow.no_op | 510 |
import tensorflow as tf
if name == "input_1":
return self.b.shape
if name == "input_2":
return self.c.shape
def test_tensor_rearrange():
tensor_rearrange = TensorRearrange(seed=713)
in_node_a = tensor_rearrange.get_placeholder("input_0")
in_node_b = tensor_rearrange.get_placeholder("input_1")
in_node_c = tensor_rearrange.get_placeholder("input_2")
stitched = tf.dynamic_stitch([[1, 10], [[0, 7, 9], [5, 8, 3]], [[6], [4], [2]]],
[in_node_a, in_node_b, in_node_c]) # should be 11,5,4
list_of_parts = tf.dynamic_partition(tf.transpose(stitched, perm=[1, 2, 0]),
[[0, 1, 2, 3], [1, 0, 2, 3], [2, 3, 1, 0], [2, 1, 0, 3], [0, 1, 2, 3]],
num_partitions=4) # after permute becomes 5,4,11, return all partitions 5,11
node_a = tf.div(list_of_parts[0], list_of_parts[1])
node_b = tf.divide(list_of_parts[2], list_of_parts[3])
trace_node = tf.trace(node_a) + node_b # there is a broadcast here
out_node = tf.cast(tf.count_nonzero(trace_node), dtype=tf.float32) + tf.Variable(tf.random_normal(shape=(2, 3)))
placeholders = [in_node_a, in_node_b, in_node_c]
| tensorflow.dynamic_stitch | 511 |
from tensorflow.contrib import layers
ValueError: If both linear_feature_columns and dnn_features_columns are
empty at the same time.
"""
target_column = layers.regression_target(
weight_column_name=weight_column_name,
target_dimension=target_dimension)
| tensorflow.contrib.layers.regression_target | 512 |
import tensorflow as tf
true_fn=lambda: tf.map_fn(_decode_png_mask, masks, dtype=tf.float32),
false_fn=lambda: tf.zeros([0, height, width], dtype=tf.float32))
| tensorflow.zeros | 513 |
from tensorflow.contrib.distributions.python.ops import distribution_util
Args:
x: `Tensor`.
name: `String`. The name to give this op.
Returns:
x: `Tensor`. Input transposed/reshaped to `B_+E_+S_`.
sample_shape: `Tensor` (1D, `int32`).
"""
with self._name_scope(name, values=[x]):
x = ops.convert_to_tensor(x, name="x")
sample_shape, batch_shape, event_shape = self.get_shape(x)
event_shape = distribution_util.pick_vector(
self._event_ndims_is_0, (1,), event_shape)
batch_shape = distribution_util.pick_vector(
self._batch_ndims_is_0, (1,), batch_shape)
new_shape = array_ops.concat(0, ((-1,), batch_shape, event_shape))
x = array_ops.reshape(x, shape=new_shape)
x = distribution_util.rotate_transpose(x, shift=-1)
return x, sample_shape
def undo_make_batch_of_event_sample_matrices(
self, x, sample_shape, name="undo_make_batch_of_event_sample_matrices"):
"""Reshapes/transposes `Distribution` `Tensor` from B_+E_+S_ to S+B+E.
| tensorflow.contrib.distributions.python.ops.distribution_util.pick_vector | 514 |
import tensorflow as tf
@pytest.mark.parametrize('white', [True, False])
def test_diags(session_tf, white, mu, sqrt_diag, K):
"""
The covariance of q(x) can be Cholesky matrices or diagonal matrices.
Here we make sure the behaviours overlap.
"""
# the chols are diagonal matrices, with the same entries as the diag representation.
chol_from_diag = tf.stack([tf.diag(sqrt_diag[:, i]) for i in range(Datum.N)]) # N x M x M
# run
kl_diag = gauss_kl(mu, sqrt_diag, K if white else None)
kl_dense = gauss_kl(mu, chol_from_diag, K if white else None)
np.testing.assert_allclose(kl_diag.eval(), kl_dense.eval())
| tensorflow.diag | 515 |
import tensorflow as tf
return tf.cond(
tf.logical_or(tf.greater(pad_h1, 0), tf.greater(pad_w1, 0)),
| tensorflow.greater | 516 |
import tensorflow as tf
validate_indices=False), [-1]),
model=model, num_nodes=num_nodes,
pos_weight=pos_weight,
norm=norm)
# Initialize session
sess = tf.Session()
sess.run(tf.global_variables_initializer())
cost_val = []
acc_val = []
def get_roc_score(edges_pos, edges_neg, emb=None):
| tensorflow.global_variables_initializer | 517 |
from tensorflow.contrib.distributions.python.ops import distribution_util
@distribution_util.AppendDocstring(
"""Note: when `rate` is an integer, there are actually two modes: `rate`
and `rate - 1`. In this case we return the larger, i.e., `rate`.""")
def _mode(self):
return math_ops.floor(self.rate)
def _assert_valid_sample(self, x, check_integer=True):
if not self.validate_args:
return x
dependencies = [check_ops.assert_non_negative(x)]
if check_integer:
dependencies += [distribution_util.assert_integer_form(
x, message="x has non-integer components.")]
return control_flow_ops.with_dependencies(dependencies, x)
| tensorflow.contrib.distributions.python.ops.distribution_util.assert_integer_form | 518 |
from tensorflow.python.ops import math_ops
def _next_array_size(required_size, growth_factor=1.5):
"""Calculate the next size for reallocating a dynamic array.
Args:
required_size: number or tf.Tensor specifying required array capacity.
growth_factor: optional number or tf.Tensor specifying the growth factor
between subsequent allocations.
Returns:
tf.Tensor with dtype=int32 giving the next array size.
"""
exponent = math_ops.ceil(
math_ops.log(math_ops.cast(required_size, dtypes.float32))
/ math_ops.log(math_ops.cast(growth_factor, dtypes.float32)))
return math_ops.cast(math_ops.ceil(growth_factor ** exponent), dtypes.int32)
def streaming_concat(values,
axis=0,
max_size=None,
metrics_collections=None,
updates_collections=None,
name=None):
"""Concatenate values along an axis across batches.
The function `streaming_concat` creates two local variables, `array` and
`size`, that are used to store concatenated values. Internally, `array` is
used as storage for a dynamic array (if `maxsize` is `None`), which ensures
that updates can be run in amortized constant time.
| tensorflow.python.ops.math_ops.ceil | 519 |
import tensorflow as tf
"""Define a single cell with variational dropout"""
def get_a_cell(state_size,input_prob,state_prob,num_input):
if cell_type == 'LSTM':
if activation == 'linear':
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.identity, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
elif activation == 'relu':
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, activation = tf.nn.relu, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
else: #tanh by default
lstm=tf.nn.rnn_cell.LSTMCell(num_units=state_size, state_is_tuple=True)
cell_drop=tf.contrib.rnn.DropoutWrapper(lstm,variational_recurrent=True,dtype=tf.float32, input_size=num_input,input_keep_prob=input_prob,state_keep_prob=state_prob)
elif cell_type == 'GRU':
| tensorflow.contrib.rnn.DropoutWrapper | 520 |
import tensorflow as tf
self.block4_2 = self.res_block_3_layers(self.block4_1, [512, 512, 2048], "res5b")# 7*7
self.block4_3 = self.res_block_3_layers(self.block4_2, [512, 512, 2048], "res5c")# 7*7
# upsample layer begins
self.deconv_1 = self.deconv_bn_relu(self.block4_3, name = 'deconv_1',kernel_size = 3, output_channels = 1024,
initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 14*14
self.deconv_2 = self.deconv_bn_relu(self.deconv_1, name = 'deconv_2',kernel_size = 3, output_channels = 512,
initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 28*28
self.deconv_3 = self.deconv_bn_relu(self.deconv_2, name = 'deconv_3',kernel_size = 3, output_channels = 256,
initializer = tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 56*56
self.deconv_4 = self.deconv_bn_relu(self.deconv_3, name = 'deconv_4',kernel_size = 3, output_channels = 128,
initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 112*112
self.deconv_5 = self.deconv_bn_relu(self.deconv_4, name = 'deconv_5',kernel_size = 3, output_channels = 64,
initializer =tf.contrib.layers.variance_scaling_initializer(), stride=2, bn=True, training=self.is_training)# 224*224
# self.final_layer = self.conv_layer(bottom = self.deconv_5, kernal_size = 1, in_channels = 64, out_channels = 3, stride = 1, name = 'final_layer')
self.final_layer = self.conv_bn_relu(bottom = self.deconv_5, name = 'final_layer', kernel_size = 1, output_channels = 3, initializer =tf.contrib.layers.variance_scaling_initializer(), bn = False, training = self.is_training, relu=False)
# self.pool5 = self.avg_pool(self.block4_3, 7, 1, "pool5")
#self.fc0 = self.fc_layer(self.pool5, 2048, 1024, "fc0")
#self.relu1 = tf.nn.relu(self.fc0)
#if train_mode is not None:
| tensorflow.contrib.layers.variance_scaling_initializer | 521 |
from tensorflow.contrib.framework import deprecated
def _at_k_name(name, k=None, class_id=None):
if k is not None:
name = '%s_at_%d' % (name, k)
else:
name = '%s_at_k' % (name)
if class_id is not None:
name = '%s_class%d' % (name, class_id)
return name
@deprecated('2016-11-08', 'Please use `streaming_sparse_recall_at_k`, '
'and reshape labels from [batch_size] to [batch_size, 1].')
@deprecated_args(IGNORE_MASK_DATE, IGNORE_MASK_INSTRUCTIONS, 'ignore_mask')
def streaming_recall_at_k(predictions, labels, k, ignore_mask=None,
weights=None, metrics_collections=None,
updates_collections=None, name=None):
"""Computes the recall@k of the predictions with respect to dense labels.
The `streaming_recall_at_k` function creates two local variables, `total` and
`count`, that are used to compute the recall@k frequency. This frequency is
ultimately returned as `recall_at_<k>`: an idempotent operation that simply
divides `total` by `count`.
| tensorflow.contrib.framework.deprecated | 522 |
from tensorflow.python.framework import dtypes
# _OverloadedFunction. We need to instantiate it with the
# right input types.
output_types = [
dtypes.DType(_.type)
for _ in defined.definition.signature.output_arg
]
| tensorflow.python.framework.dtypes.DType | 523 |
Subsets and Splits