Module keras.api.keras.backend
Public API for tf.keras.backend namespace.
Expand source code
# This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.backend namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from keras.backend import _v1_name_scope as name_scope
from keras.backend import abs
from keras.backend import all
from keras.backend import any
from keras.backend import arange
from keras.backend import argmax
from keras.backend import argmin
from keras.backend import backend
from keras.backend import batch_dot
from keras.backend import batch_flatten
from keras.backend import batch_get_value
from keras.backend import batch_normalization
from keras.backend import batch_set_value
from keras.backend import bias_add
from keras.backend import binary_crossentropy
from keras.backend import cast
from keras.backend import cast_to_floatx
from keras.backend import categorical_crossentropy
from keras.backend import clear_session
from keras.backend import clip
from keras.backend import concatenate
from keras.backend import constant
from keras.backend import conv1d
from keras.backend import conv2d
from keras.backend import conv2d_transpose
from keras.backend import conv3d
from keras.backend import cos
from keras.backend import count_params
from keras.backend import ctc_batch_cost
from keras.backend import ctc_decode
from keras.backend import ctc_label_dense_to_sparse
from keras.backend import cumprod
from keras.backend import cumsum
from keras.backend import depthwise_conv2d
from keras.backend import dot
from keras.backend import dropout
from keras.backend import dtype
from keras.backend import elu
from keras.backend import equal
from keras.backend import eval
from keras.backend import exp
from keras.backend import expand_dims
from keras.backend import eye
from keras.backend import flatten
from keras.backend import foldl
from keras.backend import foldr
from keras.backend import function
from keras.backend import gather
from keras.backend import get_session
from keras.backend import get_uid
from keras.backend import get_value
from keras.backend import gradients
from keras.backend import greater
from keras.backend import greater_equal
from keras.backend import hard_sigmoid
from keras.backend import in_test_phase
from keras.backend import in_top_k
from keras.backend import in_train_phase
from keras.backend import int_shape
from keras.backend import is_keras_tensor
from keras.backend import is_sparse
from keras.backend import l2_normalize
from keras.backend import learning_phase
from keras.backend import learning_phase_scope
from keras.backend import less
from keras.backend import less_equal
from keras.backend import local_conv1d
from keras.backend import local_conv2d
from keras.backend import log
from keras.backend import manual_variable_initialization
from keras.backend import map_fn
from keras.backend import max
from keras.backend import maximum
from keras.backend import mean
from keras.backend import min
from keras.backend import minimum
from keras.backend import moving_average_update
from keras.backend import ndim
from keras.backend import normalize_batch_in_training
from keras.backend import not_equal
from keras.backend import one_hot
from keras.backend import ones
from keras.backend import ones_like
from keras.backend import permute_dimensions
from keras.backend import placeholder
from keras.backend import pool2d
from keras.backend import pool3d
from keras.backend import pow
from keras.backend import print_tensor
from keras.backend import prod
from keras.backend import random_bernoulli
from keras.backend import random_binomial
from keras.backend import random_normal
from keras.backend import random_normal_variable
from keras.backend import random_uniform
from keras.backend import random_uniform_variable
from keras.backend import relu
from keras.backend import repeat
from keras.backend import repeat_elements
from keras.backend import reset_uids
from keras.backend import reshape
from keras.backend import resize_images
from keras.backend import resize_volumes
from keras.backend import reverse
from keras.backend import rnn
from keras.backend import round
from keras.backend import separable_conv2d
from keras.backend import set_learning_phase
from keras.backend import set_session
from keras.backend import set_value
from keras.backend import shape
from keras.backend import sigmoid
from keras.backend import sign
from keras.backend import sin
from keras.backend import softmax
from keras.backend import softplus
from keras.backend import softsign
from keras.backend import sparse_categorical_crossentropy
from keras.backend import spatial_2d_padding
from keras.backend import spatial_3d_padding
from keras.backend import sqrt
from keras.backend import square
from keras.backend import squeeze
from keras.backend import stack
from keras.backend import std
from keras.backend import stop_gradient
from keras.backend import sum
from keras.backend import switch
from keras.backend import tanh
from keras.backend import temporal_padding
from keras.backend import tile
from keras.backend import to_dense
from keras.backend import transpose
from keras.backend import truncated_normal
from keras.backend import update
from keras.backend import update_add
from keras.backend import update_sub
from keras.backend import var
from keras.backend import variable
from keras.backend import zeros
from keras.backend import zeros_like
from keras.backend_config import epsilon
from keras.backend_config import floatx
from keras.backend_config import image_data_format
from keras.backend_config import set_epsilon
from keras.backend_config import set_floatx
from keras.backend_config import set_image_data_format
del _print_function
from tensorflow.python.util import module_wrapper as _module_wrapper
if not isinstance(_sys.modules[__name__], _module_wrapper.TFModuleWrapper):
_sys.modules[__name__] = _module_wrapper.TFModuleWrapper(
_sys.modules[__name__], "keras.backend", public_apis=None, deprecation=True,
has_lite=False)
Functions
def abs(x)
-
Element-wise absolute value.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.abs') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def abs(x): """Element-wise absolute value. Args: x: Tensor or variable. Returns: A tensor. """ return tf.abs(x)
def all(x, axis=None, keepdims=False)
-
Bitwise reduction (logical AND).
Args
x
- Tensor or variable.
axis
- axis along which to perform the reduction.
keepdims
- whether the drop or broadcast the reduction axes.
Returns
A uint8 tensor (0s and 1s).
Expand source code
@keras_export('keras.backend.all') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def all(x, axis=None, keepdims=False): """Bitwise reduction (logical AND). Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ x = tf.cast(x, tf.bool) return tf.reduce_all(x, axis, keepdims)
def any(x, axis=None, keepdims=False)
-
Bitwise reduction (logical OR).
Args
x
- Tensor or variable.
axis
- axis along which to perform the reduction.
keepdims
- whether the drop or broadcast the reduction axes.
Returns
A uint8 tensor (0s and 1s).
Expand source code
@keras_export('keras.backend.any') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def any(x, axis=None, keepdims=False): """Bitwise reduction (logical OR). Args: x: Tensor or variable. axis: axis along which to perform the reduction. keepdims: whether the drop or broadcast the reduction axes. Returns: A uint8 tensor (0s and 1s). """ x = tf.cast(x, tf.bool) return tf.reduce_any(x, axis, keepdims)
def arange(start, stop=None, step=1, dtype='int32')
-
Creates a 1D tensor containing a sequence of integers.
The function arguments use the same convention as Theano's arange: if only one argument is provided, it is in fact the "stop" argument and "start" is 0.
The default type of the returned tensor is
'int32'
to match TensorFlow's default.Args
start
- Start value.
stop
- Stop value.
step
- Difference between two successive values.
dtype
- Integer dtype to use.
Returns
An integer tensor.
Example
>>> tf.keras.backend.arange(start=0, stop=10, step=1.5) <tf.Tensor: shape=(7,), dtype=float32, numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)>
Expand source code
@keras_export('keras.backend.arange') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def arange(start, stop=None, step=1, dtype='int32'): """Creates a 1D tensor containing a sequence of integers. The function arguments use the same convention as Theano's arange: if only one argument is provided, it is in fact the "stop" argument and "start" is 0. The default type of the returned tensor is `'int32'` to match TensorFlow's default. Args: start: Start value. stop: Stop value. step: Difference between two successive values. dtype: Integer dtype to use. Returns: An integer tensor. Example: >>> tf.keras.backend.arange(start=0, stop=10, step=1.5) <tf.Tensor: shape=(7,), dtype=float32, numpy=array([0. , 1.5, 3. , 4.5, 6. , 7.5, 9. ], dtype=float32)> """ # Match the behavior of numpy and Theano by returning an empty sequence. if stop is None and start < 0: start = 0 result = tf.range(start, limit=stop, delta=step, name='arange') if dtype != 'int32': result = cast(result, dtype) return result
def argmax(x, axis=-1)
-
Returns the index of the maximum value along an axis.
Args
x
- Tensor or variable.
axis
- axis along which to perform the reduction.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.argmax') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def argmax(x, axis=-1): """Returns the index of the maximum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. """ return tf.argmax(x, axis)
def argmin(x, axis=-1)
-
Returns the index of the minimum value along an axis.
Args
x
- Tensor or variable.
axis
- axis along which to perform the reduction.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.argmin') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def argmin(x, axis=-1): """Returns the index of the minimum value along an axis. Args: x: Tensor or variable. axis: axis along which to perform the reduction. Returns: A tensor. """ return tf.argmin(x, axis)
def backend()
-
Publicly accessible method for determining the current backend.
Only exists for API compatibility with multi-backend Keras.
Returns
The string "tensorflow".
Expand source code
@keras_export('keras.backend.backend') @doc_controls.do_not_generate_docs def backend(): """Publicly accessible method for determining the current backend. Only exists for API compatibility with multi-backend Keras. Returns: The string "tensorflow". """ return 'tensorflow'
def batch_dot(x, y, axes=None)
-
Batchwise dot product.
batch_dot()
is used to compute dot product ofx
andy
whenx
andy
are data in batch, i.e. in a shape of(batch_size, :)
.batch_dot()
results in a tensor or variable with less dimensions than the input. If the number of dimensions is reduced to 1, we useexpand_dims()
to make sure that ndim is at least 2.Args
x
- Keras tensor or variable with
ndim >= 2
. y
- Keras tensor or variable with
ndim >= 2
. axes
- Tuple or list of integers with target dimensions, or single integer.
The sizes of
x.shape[axes[0]]
andy.shape[axes[1]]
should be equal.
Returns
A tensor with shape equal to the concatenation of
x
's shape (less the dimension that was summed over) andy
's shape (less the batch dimension and the dimension that was summed over). If the final rank is 1, we reshape it to(batch_size, 1)
. Examples:>>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1)) >>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20)) >>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2)) >>> tf.keras.backend.int_shape(xy_batch_dot) (32, 1, 30)
Shape inference: Let
x
's shape be(100, 20)
andy
's shape be(100, 30, 20)
. Ifaxes
is (1, 2), to find the output shape of resultant tensor, loop through each dimension inx
's shape andy
's shape: *x.shape[0]
: 100 : append to output shape *x.shape[1]
: 20 : do not append to output shape, dimension 1 ofx
has been summed over. (dot_axes[0]
= 1) *y.shape[0]
: 100 : do not append to output shape, always ignore first dimension ofy
*y.shape[1]
: 30 : append to output shape *y.shape[2]
: 20 : do not append to output shape, dimension 2 ofy
has been summed over. (dot_axes[1]
= 2)output_shape
=(100, 30)
Expand source code
@keras_export('keras.backend.batch_dot') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_dot(x, y, axes=None): """Batchwise dot product. `batch_dot` is used to compute dot product of `x` and `y` when `x` and `y` are data in batch, i.e. in a shape of `(batch_size, :)`. `batch_dot` results in a tensor or variable with less dimensions than the input. If the number of dimensions is reduced to 1, we use `expand_dims` to make sure that ndim is at least 2. Args: x: Keras tensor or variable with `ndim >= 2`. y: Keras tensor or variable with `ndim >= 2`. axes: Tuple or list of integers with target dimensions, or single integer. The sizes of `x.shape[axes[0]]` and `y.shape[axes[1]]` should be equal. Returns: A tensor with shape equal to the concatenation of `x`'s shape (less the dimension that was summed over) and `y`'s shape (less the batch dimension and the dimension that was summed over). If the final rank is 1, we reshape it to `(batch_size, 1)`. Examples: >>> x_batch = tf.keras.backend.ones(shape=(32, 20, 1)) >>> y_batch = tf.keras.backend.ones(shape=(32, 30, 20)) >>> xy_batch_dot = tf.keras.backend.batch_dot(x_batch, y_batch, axes=(1, 2)) >>> tf.keras.backend.int_shape(xy_batch_dot) (32, 1, 30) Shape inference: Let `x`'s shape be `(100, 20)` and `y`'s shape be `(100, 30, 20)`. If `axes` is (1, 2), to find the output shape of resultant tensor, loop through each dimension in `x`'s shape and `y`'s shape: * `x.shape[0]` : 100 : append to output shape * `x.shape[1]` : 20 : do not append to output shape, dimension 1 of `x` has been summed over. (`dot_axes[0]` = 1) * `y.shape[0]` : 100 : do not append to output shape, always ignore first dimension of `y` * `y.shape[1]` : 30 : append to output shape * `y.shape[2]` : 20 : do not append to output shape, dimension 2 of `y` has been summed over. (`dot_axes[1]` = 2) `output_shape` = `(100, 30)` """ x_shape = int_shape(x) y_shape = int_shape(y) x_ndim = len(x_shape) y_ndim = len(y_shape) if x_ndim < 2 or y_ndim < 2: raise ValueError('Cannot do batch_dot on inputs ' 'with rank < 2. ' 'Received inputs with shapes ' + str(x_shape) + ' and ' + str(y_shape) + '.') x_batch_size = x_shape[0] y_batch_size = y_shape[0] if x_batch_size is not None and y_batch_size is not None: if x_batch_size != y_batch_size: raise ValueError('Cannot do batch_dot on inputs ' 'with different batch sizes. ' 'Received inputs with shapes ' + str(x_shape) + ' and ' + str(y_shape) + '.') if isinstance(axes, int): axes = [axes, axes] if axes is None: if y_ndim == 2: axes = [x_ndim - 1, y_ndim - 1] else: axes = [x_ndim - 1, y_ndim - 2] if py_any(isinstance(a, (list, tuple)) for a in axes): raise ValueError('Multiple target dimensions are not supported. ' + 'Expected: None, int, (int, int), ' + 'Provided: ' + str(axes)) # if tuple, convert to list. axes = list(axes) # convert negative indices. if axes[0] < 0: axes[0] += x_ndim if axes[1] < 0: axes[1] += y_ndim # sanity checks if 0 in axes: raise ValueError('Cannot perform batch_dot over axis 0. ' 'If your inputs are not batched, ' 'add a dummy batch dimension to your ' 'inputs using K.expand_dims(x, 0)') a0, a1 = axes d1 = x_shape[a0] d2 = y_shape[a1] if d1 is not None and d2 is not None and d1 != d2: raise ValueError('Cannot do batch_dot on inputs with shapes ' + str(x_shape) + ' and ' + str(y_shape) + ' with axes=' + str(axes) + '. x.shape[%d] != ' 'y.shape[%d] (%d != %d).' % (axes[0], axes[1], d1, d2)) # backup ndims. Need them later. orig_x_ndim = x_ndim orig_y_ndim = y_ndim # if rank is 2, expand to 3. if x_ndim == 2: x = tf.expand_dims(x, 1) a0 += 1 x_ndim += 1 if y_ndim == 2: y = tf.expand_dims(y, 2) y_ndim += 1 # bring x's dimension to be reduced to last axis. if a0 != x_ndim - 1: pattern = list(range(x_ndim)) for i in range(a0, x_ndim - 1): pattern[i] = pattern[i + 1] pattern[-1] = a0 x = tf.compat.v1.transpose(x, pattern) # bring y's dimension to be reduced to axis 1. if a1 != 1: pattern = list(range(y_ndim)) for i in range(a1, 1, -1): pattern[i] = pattern[i - 1] pattern[1] = a1 y = tf.compat.v1.transpose(y, pattern) # normalize both inputs to rank 3. if x_ndim > 3: # squash middle dimensions of x. x_shape = shape(x) x_mid_dims = x_shape[1:-1] x_squashed_shape = tf.stack( [x_shape[0], -1, x_shape[-1]]) x = tf.reshape(x, x_squashed_shape) x_squashed = True else: x_squashed = False if y_ndim > 3: # squash trailing dimensions of y. y_shape = shape(y) y_trail_dims = y_shape[2:] y_squashed_shape = tf.stack( [y_shape[0], y_shape[1], -1]) y = tf.reshape(y, y_squashed_shape) y_squashed = True else: y_squashed = False result = tf.matmul(x, y) # if inputs were squashed, we have to reshape the matmul output. output_shape = tf.shape(result) do_reshape = False if x_squashed: output_shape = tf.concat( [output_shape[:1], x_mid_dims, output_shape[-1:]], 0) do_reshape = True if y_squashed: output_shape = tf.concat([output_shape[:-1], y_trail_dims], 0) do_reshape = True if do_reshape: result = tf.reshape(result, output_shape) # if the inputs were originally rank 2, we remove the added 1 dim. if orig_x_ndim == 2: result = tf.squeeze(result, 1) elif orig_y_ndim == 2: result = tf.squeeze(result, -1) return result
def batch_flatten(x)
-
Turn a nD tensor into a 2D tensor with same 0th dimension.
In other words, it flattens each data samples of a batch.
Args
x
- A tensor or variable.
Returns
A tensor.
Examples
Flattening a 3D tensor to 2D by collapsing the last dimension.
>>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5)) >>> x_batch_flatten = batch_flatten(x_batch) >>> tf.keras.backend.int_shape(x_batch_flatten) (2, 60)
Expand source code
@keras_export('keras.backend.batch_flatten') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_flatten(x): """Turn a nD tensor into a 2D tensor with same 0th dimension. In other words, it flattens each data samples of a batch. Args: x: A tensor or variable. Returns: A tensor. Examples: Flattening a 3D tensor to 2D by collapsing the last dimension. >>> x_batch = tf.keras.backend.ones(shape=(2, 3, 4, 5)) >>> x_batch_flatten = batch_flatten(x_batch) >>> tf.keras.backend.int_shape(x_batch_flatten) (2, 60) """ x = tf.reshape(x, tf.stack([-1, prod(shape(x)[1:])])) return x
def batch_get_value(tensors)
-
Returns the value of more than one tensor variable.
Args
tensors
- list of ops to run.
Returns
A list of Numpy arrays.
Raises
RuntimeError
- If this method is called inside defun.
Expand source code
@keras_export('keras.backend.batch_get_value') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_get_value(tensors): """Returns the value of more than one tensor variable. Args: tensors: list of ops to run. Returns: A list of Numpy arrays. Raises: RuntimeError: If this method is called inside defun. """ if tf.executing_eagerly(): return [x.numpy() for x in tensors] elif tf.inside_function(): # pylint: disable=protected-access raise RuntimeError('Cannot get value inside Tensorflow graph function.') if tensors: return get_session(tensors).run(tensors) else: return []
def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=0.001)
-
Applies batch normalization on x given mean, var, beta and gamma.
I.e. returns:
output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta
Args
x
- Input tensor or variable.
mean
- Mean of batch.
var
- Variance of batch.
beta
- Tensor with which to center the input.
gamma
- Tensor by which to scale the input.
axis
- Integer, the axis that should be normalized. (typically the features axis).
epsilon
- Fuzz factor.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.batch_normalization') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_normalization(x, mean, var, beta, gamma, axis=-1, epsilon=1e-3): """Applies batch normalization on x given mean, var, beta and gamma. I.e. returns: `output = (x - mean) / (sqrt(var) + epsilon) * gamma + beta` Args: x: Input tensor or variable. mean: Mean of batch. var: Variance of batch. beta: Tensor with which to center the input. gamma: Tensor by which to scale the input. axis: Integer, the axis that should be normalized. (typically the features axis). epsilon: Fuzz factor. Returns: A tensor. """ if ndim(x) == 4: # The CPU implementation of `fused_batch_norm` only supports NHWC if axis == 1 or axis == -3: tf_data_format = 'NCHW' elif axis == 3 or axis == -1: tf_data_format = 'NHWC' else: tf_data_format = None if (tf_data_format == 'NHWC' or tf_data_format == 'NCHW' and _has_nchw_support()): # The mean / var / beta / gamma tensors may be broadcasted # so they may have extra axes of size 1, which should be squeezed. if ndim(mean) > 1: mean = tf.reshape(mean, [-1]) if ndim(var) > 1: var = tf.reshape(var, [-1]) if beta is None: beta = zeros_like(mean) elif ndim(beta) > 1: beta = tf.reshape(beta, [-1]) if gamma is None: gamma = ones_like(mean) elif ndim(gamma) > 1: gamma = tf.reshape(gamma, [-1]) y, _, _ = tf.compat.v1.nn.fused_batch_norm( x, gamma, beta, epsilon=epsilon, mean=mean, variance=var, data_format=tf_data_format, is_training=False ) return y return tf.nn.batch_normalization(x, mean, var, beta, gamma, epsilon)
def batch_set_value(tuples)
-
Sets the values of many tensor variables at once.
Args
tuples
- a list of tuples
(tensor, value)
.value
should be a Numpy array.
Expand source code
@keras_export('keras.backend.batch_set_value') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def batch_set_value(tuples): """Sets the values of many tensor variables at once. Args: tuples: a list of tuples `(tensor, value)`. `value` should be a Numpy array. """ if tf.compat.v1.executing_eagerly_outside_functions(): for x, value in tuples: x.assign(np.asarray(value, dtype=dtype_numpy(x))) else: with get_graph().as_default(): if tuples: assign_ops = [] feed_dict = {} for x, value in tuples: value = np.asarray(value, dtype=dtype_numpy(x)) tf_dtype = tf.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: # In order to support assigning weights to resizable variables in # Keras, we make a placeholder with the correct number of dimensions # but with None in each dimension. This way, we can assign weights # of any size (as long as they have the correct dimensionality). placeholder_shape = tf.TensorShape([None] * value.ndim) assign_placeholder = tf.compat.v1.placeholder( tf_dtype, shape=placeholder_shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op assign_ops.append(assign_op) feed_dict[assign_placeholder] = value get_session().run(assign_ops, feed_dict=feed_dict)
def bias_add(x, bias, data_format=None)
-
Adds a bias vector to a tensor.
Args
x
- Tensor or variable.
bias
- Bias tensor to add.
data_format
- string,
"channels_last"
or"channels_first"
.
Returns
Output tensor.
Raises
ValueError
- In one of the two cases below:
1. invalid
data_format
argument. 2. invalid bias shape. the bias should be either a vector or a tensor with ndim(x) - 1 dimension
Expand source code
@keras_export('keras.backend.bias_add') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def bias_add(x, bias, data_format=None): """Adds a bias vector to a tensor. Args: x: Tensor or variable. bias: Bias tensor to add. data_format: string, `"channels_last"` or `"channels_first"`. Returns: Output tensor. Raises: ValueError: In one of the two cases below: 1. invalid `data_format` argument. 2. invalid bias shape. the bias should be either a vector or a tensor with ndim(x) - 1 dimension """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) bias_shape = int_shape(bias) if len(bias_shape) != 1 and len(bias_shape) != ndim(x) - 1: raise ValueError( 'Unexpected bias dimensions %d, expect to be 1 or %d dimensions' % (len(bias_shape), ndim(x) - 1)) if len(bias_shape) == 1: if data_format == 'channels_first': return tf.nn.bias_add(x, bias, data_format='NCHW') return tf.nn.bias_add(x, bias, data_format='NHWC') if ndim(x) in (3, 4, 5): if data_format == 'channels_first': bias_reshape_axis = (1, bias_shape[-1]) + bias_shape[:-1] return x + reshape(bias, bias_reshape_axis) return x + reshape(bias, (1,) + bias_shape) return tf.nn.bias_add(x, bias)
def binary_crossentropy(target, output, from_logits=False)
-
Binary crossentropy between an output tensor and a target tensor.
Args
target
- A tensor with the same shape as
output
. output
- A tensor.
from_logits
- Whether
output
is expected to be a logits tensor. By default, we consider thatoutput
encodes a probability distribution.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.binary_crossentropy') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def binary_crossentropy(target, output, from_logits=False): """Binary crossentropy between an output tensor and a target tensor. Args: target: A tensor with the same shape as `output`. output: A tensor. from_logits: Whether `output` is expected to be a logits tensor. By default, we consider that `output` encodes a probability distribution. Returns: A tensor. """ target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, '_keras_logits'): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`binary_crossentropy` received `from_logits=True`, but the `output`' ' argument was produced by a sigmoid or softmax activation and thus ' 'does not represent logits. Was this intended?"') from_logits = True if from_logits: return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) if (not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == 'Sigmoid') and not hasattr(output, '_keras_history'): # When sigmoid activation function is used for output operation, we # use logits from the sigmoid function directly to compute loss in order # to prevent collapsing zero when training. assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.sigmoid_cross_entropy_with_logits(labels=target, logits=output) epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1. - epsilon_) # Compute cross entropy from probabilities. bce = target * tf.math.log(output + epsilon()) bce += (1 - target) * tf.math.log(1 - output + epsilon()) return -bce
def cast(x, dtype)
-
Casts a tensor to a different dtype and returns it.
You can cast a Keras variable but it still returns a Keras tensor.
Args
x
- Keras tensor (or variable).
dtype
- String, either (
'float16'
,'float32'
, or'float64'
).
Returns
Keras tensor with dtype
dtype()
.Examples
Cast a float32 variable to a float64 tensor
>>> input = tf.keras.backend.ones(shape=(1,3)) >>> print(input) <tf.Variable 'Variable:0' shape=(1, 3) dtype=float32, numpy=array([[1., 1., 1.]], dtype=float32)> >>> cast_input = tf.keras.backend.cast(input, dtype='float64') >>> print(cast_input) tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64)
Expand source code
@keras_export('keras.backend.cast') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cast(x, dtype): """Casts a tensor to a different dtype and returns it. You can cast a Keras variable but it still returns a Keras tensor. Args: x: Keras tensor (or variable). dtype: String, either (`'float16'`, `'float32'`, or `'float64'`). Returns: Keras tensor with dtype `dtype`. Examples: Cast a float32 variable to a float64 tensor >>> input = tf.keras.backend.ones(shape=(1,3)) >>> print(input) <tf.Variable 'Variable:0' shape=(1, 3) dtype=float32, numpy=array([[1., 1., 1.]], dtype=float32)> >>> cast_input = tf.keras.backend.cast(input, dtype='float64') >>> print(cast_input) tf.Tensor([[1. 1. 1.]], shape=(1, 3), dtype=float64) """ return tf.cast(x, dtype)
def cast_to_floatx(x)
-
Cast a Numpy array to the default Keras float type.
Args
x
- Numpy array or TensorFlow tensor.
Returns
The same array (Numpy array if
x
was a Numpy array, or TensorFlow tensor ifx
was a tensor), cast to its new type. Example:>>> tf.keras.backend.floatx() 'float32' >>> arr = np.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = cast_to_floatx(arr) >>> new_arr array([1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32')
Expand source code
@keras_export('keras.backend.cast_to_floatx') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cast_to_floatx(x): """Cast a Numpy array to the default Keras float type. Args: x: Numpy array or TensorFlow tensor. Returns: The same array (Numpy array if `x` was a Numpy array, or TensorFlow tensor if `x` was a tensor), cast to its new type. Example: >>> tf.keras.backend.floatx() 'float32' >>> arr = np.array([1.0, 2.0], dtype='float64') >>> arr.dtype dtype('float64') >>> new_arr = cast_to_floatx(arr) >>> new_arr array([1., 2.], dtype=float32) >>> new_arr.dtype dtype('float32') """ if isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor)): return tf.cast(x, dtype=floatx()) return np.asarray(x, dtype=floatx())
def categorical_crossentropy(target, output, from_logits=False, axis=-1)
-
Categorical crossentropy between an output tensor and a target tensor.
Args
target
- A tensor of the same shape as
output
. output
- A tensor resulting from a softmax
(unless
from_logits
is True, in which caseoutput
is expected to be the logits). from_logits
- Boolean, whether
output
is the result of a softmax, or is a tensor of logits. axis
- Int specifying the channels axis.
axis=-1
corresponds to data formatchannels_last
, andaxis=1
corresponds to data formatchannels_first
.
Returns
Output tensor.
Raises
ValueError
- if
axis
is neither -1 nor one of the axes ofoutput
.
Example:
>>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3]) >>> print(a) tf.Tensor( [[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]], shape=(3, 3), dtype=float32) >>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94], shape=[3,3]) >>> print(b) tf.Tensor( [[0.9 0.05 0.05] [0.05 0.89 0.06] [0.05 0.01 0.94]], shape=(3, 3), dtype=float32) >>> loss = tf.keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.11653 0.06188] >>> loss = tf.keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0.]
Expand source code
@keras_export('keras.backend.categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def categorical_crossentropy(target, output, from_logits=False, axis=-1): """Categorical crossentropy between an output tensor and a target tensor. Args: target: A tensor of the same shape as `output`. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. axis: Int specifying the channels axis. `axis=-1` corresponds to data format `channels_last`, and `axis=1` corresponds to data format `channels_first`. Returns: Output tensor. Raises: ValueError: if `axis` is neither -1 nor one of the axes of `output`. Example: >>> a = tf.constant([1., 0., 0., 0., 1., 0., 0., 0., 1.], shape=[3,3]) >>> print(a) tf.Tensor( [[1. 0. 0.] [0. 1. 0.] [0. 0. 1.]], shape=(3, 3), dtype=float32) >>> b = tf.constant([.9, .05, .05, .05, .89, .06, .05, .01, .94], shape=[3,3]) >>> print(b) tf.Tensor( [[0.9 0.05 0.05] [0.05 0.89 0.06] [0.05 0.01 0.94]], shape=(3, 3), dtype=float32) >>> loss = tf.keras.backend.categorical_crossentropy(a, b) >>> print(np.around(loss, 5)) [0.10536 0.11653 0.06188] >>> loss = tf.keras.backend.categorical_crossentropy(a, a) >>> print(np.around(loss, 5)) [0. 0. 0.] """ target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) target.shape.assert_is_compatible_with(output.shape) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, '_keras_logits'): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`categorical_crossentropy` received `from_logits=True`, but ' 'the `output` argument was produced by a sigmoid or softmax ' 'activation and thus does not represent logits. Was this intended?"') from_logits = True if from_logits: return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output, axis=axis) if (not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == 'Softmax') and not hasattr(output, '_keras_history'): # When softmax activation function is used for output operation, we # use logits from the softmax function directly to compute loss in order # to prevent collapsing zero when training. # See b/117284466 assert len(output.op.inputs) == 1 output = output.op.inputs[0] return tf.nn.softmax_cross_entropy_with_logits( labels=target, logits=output, axis=axis) # scale preds so that the class probas of each sample sum to 1 output = output / tf.reduce_sum(output, axis, True) # Compute cross entropy from probabilities. epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1. - epsilon_) return -tf.reduce_sum(target * tf.math.log(output), axis)
def clear_session()
-
Resets all state generated by Keras.
Keras manages a global state, which it uses to implement the Functional model-building API and to uniquify autogenerated layer names.
If you are creating many models in a loop, this global state will consume an increasing amount of memory over time, and you may want to clear it. Calling
clear_session()
releases the global state: this helps avoid clutter from old models and layers, especially when memory is limited.Example 1: calling
clear_session()
when creating models in a loopfor _ in range(100): # Without `clear_session()`, each iteration of this loop will # slightly increase the size of the global state managed by Keras model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)]) for _ in range(100): # With `clear_session()` called at the beginning, # Keras starts with a blank state at each iteration # and memory consumption is constant over time. tf.keras.backend.clear_session() model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)])
Example 2: resetting the layer name generation counter
>>> import tensorflow as tf >>> layers = [tf.keras.layers.Dense(10) for _ in range(10)] >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense_10 >>> tf.keras.backend.set_learning_phase(1) >>> print(tf.keras.backend.learning_phase()) 1 >>> tf.keras.backend.clear_session() >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense
Expand source code
@keras_export('keras.backend.clear_session') def clear_session(): """Resets all state generated by Keras. Keras manages a global state, which it uses to implement the Functional model-building API and to uniquify autogenerated layer names. If you are creating many models in a loop, this global state will consume an increasing amount of memory over time, and you may want to clear it. Calling `clear_session()` releases the global state: this helps avoid clutter from old models and layers, especially when memory is limited. Example 1: calling `clear_session()` when creating models in a loop ```python for _ in range(100): # Without `clear_session()`, each iteration of this loop will # slightly increase the size of the global state managed by Keras model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)]) for _ in range(100): # With `clear_session()` called at the beginning, # Keras starts with a blank state at each iteration # and memory consumption is constant over time. tf.keras.backend.clear_session() model = tf.keras.Sequential([tf.keras.layers.Dense(10) for _ in range(10)]) ``` Example 2: resetting the layer name generation counter >>> import tensorflow as tf >>> layers = [tf.keras.layers.Dense(10) for _ in range(10)] >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense_10 >>> tf.keras.backend.set_learning_phase(1) >>> print(tf.keras.backend.learning_phase()) 1 >>> tf.keras.backend.clear_session() >>> new_layer = tf.keras.layers.Dense(10) >>> print(new_layer.name) dense """ global _SESSION global _GRAPH_LEARNING_PHASES # pylint: disable=global-variable-not-assigned global _GRAPH_VARIABLES # pylint: disable=global-variable-not-assigned global _GRAPH_TF_OPTIMIZERS # pylint: disable=global-variable-not-assigned global _GRAPH _GRAPH.graph = None tf.compat.v1.reset_default_graph() reset_uids() _SESSION.session = None graph = get_graph() with graph.as_default(): _DUMMY_EAGER_GRAPH.learning_phase_is_set = False _GRAPH_LEARNING_PHASES.clear() # Create the learning phase placeholder in graph using the default factory. _GRAPH_LEARNING_PHASES.setdefault(graph) _GRAPH_VARIABLES.pop(graph, None) _GRAPH_TF_OPTIMIZERS.pop(graph, None) if tf.executing_eagerly(): # Clear pending nodes in eager executors, kernel caches and step_containers. context.context().clear_kernel_cache()
def clip(x, min_value, max_value)
-
Element-wise value clipping.
Args
x
- Tensor or variable.
min_value
- Python float, integer, or tensor.
max_value
- Python float, integer, or tensor.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.clip') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def clip(x, min_value, max_value): """Element-wise value clipping. Args: x: Tensor or variable. min_value: Python float, integer, or tensor. max_value: Python float, integer, or tensor. Returns: A tensor. """ if (isinstance(min_value, (int, float)) and isinstance(max_value, (int, float))): if max_value < min_value: max_value = min_value if min_value is None: min_value = -np.inf if max_value is None: max_value = np.inf return tf.clip_by_value(x, min_value, max_value)
def concatenate(tensors, axis=-1)
-
Concatenates a list of tensors alongside the specified axis.
Args
tensors
- list of tensors to concatenate.
axis
- concatenation axis.
Returns
A tensor.
Example
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]]) >>> tf.keras.backend.concatenate((a, b), axis=-1) <tf.Tensor: shape=(3, 6), dtype=int32, numpy= array([[ 1, 2, 3, 10, 20, 30], [ 4, 5, 6, 40, 50, 60], [ 7, 8, 9, 70, 80, 90]], dtype=int32)>
Expand source code
@keras_export('keras.backend.concatenate') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def concatenate(tensors, axis=-1): """Concatenates a list of tensors alongside the specified axis. Args: tensors: list of tensors to concatenate. axis: concatenation axis. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> b = tf.constant([[10, 20, 30], [40, 50, 60], [70, 80, 90]]) >>> tf.keras.backend.concatenate((a, b), axis=-1) <tf.Tensor: shape=(3, 6), dtype=int32, numpy= array([[ 1, 2, 3, 10, 20, 30], [ 4, 5, 6, 40, 50, 60], [ 7, 8, 9, 70, 80, 90]], dtype=int32)> """ if axis < 0: rank = ndim(tensors[0]) if rank: axis %= rank else: axis = 0 if py_all(is_sparse(x) for x in tensors): return tf.compat.v1.sparse_concat(axis, tensors) elif py_all(isinstance(x, tf.RaggedTensor) for x in tensors): return tf.concat(tensors, axis) else: return tf.concat([to_dense(x) for x in tensors], axis)
def constant(value, dtype=None, shape=None, name=None)
-
Creates a constant tensor.
Args
value
- A constant value (or list)
dtype
- The type of the elements of the resulting tensor.
shape
- Optional dimensions of resulting tensor.
name
- Optional name for the tensor.
Returns
A Constant Tensor.
Expand source code
@keras_export('keras.backend.constant') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def constant(value, dtype=None, shape=None, name=None): """Creates a constant tensor. Args: value: A constant value (or list) dtype: The type of the elements of the resulting tensor. shape: Optional dimensions of resulting tensor. name: Optional name for the tensor. Returns: A Constant Tensor. """ if dtype is None: dtype = floatx() return tf.constant(value, dtype=dtype, shape=shape, name=name)
def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1)
-
1D convolution.
Args
x
- Tensor or variable.
kernel
- kernel tensor.
strides
- stride integer.
padding
- string,
"same"
,"causal"
or"valid"
. data_format
- string, one of "channels_last", "channels_first".
dilation_rate
- integer dilate rate.
Returns
A tensor, result of 1D convolution.
Raises
ValueError
- if
data_format
is neitherchannels_last
or
channels_first
.Expand source code
@keras_export('keras.backend.conv1d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv1d(x, kernel, strides=1, padding='valid', data_format=None, dilation_rate=1): """1D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: stride integer. padding: string, `"same"`, `"causal"` or `"valid"`. data_format: string, one of "channels_last", "channels_first". dilation_rate: integer dilate rate. Returns: A tensor, result of 1D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) kernel_shape = kernel.shape.as_list() if padding == 'causal': # causal (dilated) convolution: left_pad = dilation_rate * (kernel_shape[0] - 1) x = temporal_padding(x, (left_pad, 0)) padding = 'valid' padding = _preprocess_padding(padding) x, tf_data_format = _preprocess_conv1d_input(x, data_format) x = tf.compat.v1.nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NWC': x = tf.compat.v1.transpose(x, (0, 2, 1)) # NWC -> NCW return x
def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
-
2D convolution.
Args
x
- Tensor or variable.
kernel
- kernel tensor.
strides
- strides tuple.
padding
- string,
"same"
or"valid"
. data_format
"channels_last"
or"channels_first"
.dilation_rate
- tuple of 2 integers.
Returns
A tensor, result of 2D convolution.
Raises
ValueError
- if
data_format
is neitherchannels_last
or
channels_first
.Expand source code
@keras_export('keras.backend.conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv2d(x, kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: `"channels_last"` or `"channels_first"`. dilation_rate: tuple of 2 integers. Returns: A tensor, result of 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) x = tf.compat.v1.nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x
def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
-
2D deconvolution (i.e.
transposed convolution).
Args
x
- Tensor or variable.
kernel
- kernel tensor.
output_shape
- 1D int tensor for the output shape.
strides
- strides tuple.
padding
- string,
"same"
or"valid"
. data_format
- string,
"channels_last"
or"channels_first"
. dilation_rate
- Tuple of 2 integers.
Returns
A tensor, result of transposed 2D convolution.
Raises
ValueError
- if
data_format
is neitherchannels_last
or
channels_first
.Expand source code
@keras_export('keras.backend.conv2d_transpose') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv2d_transpose(x, kernel, output_shape, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D deconvolution (i.e. transposed convolution). Args: x: Tensor or variable. kernel: kernel tensor. output_shape: 1D int tensor for the output shape. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: Tuple of 2 integers. Returns: A tensor, result of transposed 2D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) # `atrous_conv2d_transpose` only supports NHWC format, even on GPU. if data_format == 'channels_first' and dilation_rate != (1, 1): force_transpose = True else: force_transpose = False x, tf_data_format = _preprocess_conv2d_input(x, data_format, force_transpose) if data_format == 'channels_first' and tf_data_format == 'NHWC': output_shape = (output_shape[0], output_shape[2], output_shape[3], output_shape[1]) if output_shape[0] is None: output_shape = (shape(x)[0],) + tuple(output_shape[1:]) if isinstance(output_shape, (tuple, list)): output_shape = tf.stack(list(output_shape)) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides if dilation_rate == (1, 1): x = tf.compat.v1.nn.conv2d_transpose(x, kernel, output_shape, strides, padding=padding, data_format=tf_data_format) else: assert dilation_rate[0] == dilation_rate[1] x = tf.nn.atrous_conv2d_transpose( x, kernel, output_shape, rate=dilation_rate[0], padding=padding) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x
def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1))
-
3D convolution.
Args
x
- Tensor or variable.
kernel
- kernel tensor.
strides
- strides tuple.
padding
- string,
"same"
or"valid"
. data_format
- string,
"channels_last"
or"channels_first"
. dilation_rate
- tuple of 3 integers.
Returns
A tensor, result of 3D convolution.
Raises
ValueError
- if
data_format
is neitherchannels_last
or
channels_first
.Expand source code
@keras_export('keras.backend.conv3d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def conv3d(x, kernel, strides=(1, 1, 1), padding='valid', data_format=None, dilation_rate=(1, 1, 1)): """3D convolution. Args: x: Tensor or variable. kernel: kernel tensor. strides: strides tuple. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of 3 integers. Returns: A tensor, result of 3D convolution. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv3d_input(x, data_format) padding = _preprocess_padding(padding) x = tf.compat.v1.nn.convolution( input=x, filter=kernel, dilation_rate=dilation_rate, strides=strides, padding=padding, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = tf.compat.v1.transpose(x, (0, 4, 1, 2, 3)) return x
def cos(x)
-
Computes cos of x element-wise.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.cos') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cos(x): """Computes cos of x element-wise. Args: x: Tensor or variable. Returns: A tensor. """ return tf.cos(x)
def count_params(x)
-
Returns the static number of elements in a variable or tensor.
Args
x
- Variable or tensor.
Returns
Integer, the number of scalars in
x
. Example:>>> kvar = tf.keras.backend.zeros((2,3)) >>> tf.keras.backend.count_params(kvar) 6 >>> tf.keras.backend.eval(kvar) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32)
Expand source code
@keras_export('keras.backend.count_params') @doc_controls.do_not_generate_docs def count_params(x): """Returns the static number of elements in a variable or tensor. Args: x: Variable or tensor. Returns: Integer, the number of scalars in `x`. Example: >>> kvar = tf.keras.backend.zeros((2,3)) >>> tf.keras.backend.count_params(kvar) 6 >>> tf.keras.backend.eval(kvar) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) """ return np.prod(x.shape.as_list())
def ctc_batch_cost(y_true, y_pred, input_length, label_length)
-
Runs CTC loss algorithm on each batch element.
Args
y_true
- tensor
(samples, max_string_length)
containing the truth labels. y_pred
- tensor
(samples, time_steps, num_categories)
containing the prediction, or output of the softmax. input_length
- tensor
(samples, 1)
containing the sequence length for each batch item iny_pred
. label_length
- tensor
(samples, 1)
containing the sequence length for each batch item iny_true
.
Returns
Tensor with shape (samples,1) containing the CTC loss of each element.
Expand source code
@keras_export('keras.backend.ctc_batch_cost') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ctc_batch_cost(y_true, y_pred, input_length, label_length): """Runs CTC loss algorithm on each batch element. Args: y_true: tensor `(samples, max_string_length)` containing the truth labels. y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_pred`. label_length: tensor `(samples, 1)` containing the sequence length for each batch item in `y_true`. Returns: Tensor with shape (samples,1) containing the CTC loss of each element. """ label_length = tf.cast( tf.squeeze(label_length, axis=-1), tf.int32) input_length = tf.cast( tf.squeeze(input_length, axis=-1), tf.int32) sparse_labels = tf.cast( ctc_label_dense_to_sparse(y_true, label_length), tf.int32) y_pred = tf.math.log(tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon()) return tf.expand_dims( tf.compat.v1.nn.ctc_loss( inputs=y_pred, labels=sparse_labels, sequence_length=input_length), 1)
def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1)
-
Decodes the output of a softmax.
Can use either greedy search (also known as best path) or a constrained dictionary search.
Args
y_pred
- tensor
(samples, time_steps, num_categories)
containing the prediction, or output of the softmax. input_length
- tensor
(samples, )
containing the sequence length for each batch item iny_pred
. greedy
- perform much faster best-path search if
true
. This does not use a dictionary. beam_width
- if
greedy
isfalse
: a beam search decoder will be used with a beam of this width. top_paths
- if
greedy
isfalse
, how many of the most probable paths will be returned.
Returns
- Tuple:
List
- if
greedy
istrue
, returns a list of one element that contains the decoded sequence. Iffalse
, returns thetop_paths
most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as-1
. Tensor(top_paths, )
that contains the log probability of each decoded sequence.
Expand source code
@keras_export('keras.backend.ctc_decode') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ctc_decode(y_pred, input_length, greedy=True, beam_width=100, top_paths=1): """Decodes the output of a softmax. Can use either greedy search (also known as best path) or a constrained dictionary search. Args: y_pred: tensor `(samples, time_steps, num_categories)` containing the prediction, or output of the softmax. input_length: tensor `(samples, )` containing the sequence length for each batch item in `y_pred`. greedy: perform much faster best-path search if `true`. This does not use a dictionary. beam_width: if `greedy` is `false`: a beam search decoder will be used with a beam of this width. top_paths: if `greedy` is `false`, how many of the most probable paths will be returned. Returns: Tuple: List: if `greedy` is `true`, returns a list of one element that contains the decoded sequence. If `false`, returns the `top_paths` most probable decoded sequences. Each decoded sequence has shape (samples, time_steps). Important: blank labels are returned as `-1`. Tensor `(top_paths, )` that contains the log probability of each decoded sequence. """ input_shape = shape(y_pred) num_samples, num_steps = input_shape[0], input_shape[1] y_pred = tf.math.log(tf.compat.v1.transpose(y_pred, perm=[1, 0, 2]) + epsilon()) input_length = tf.cast(input_length, tf.int32) if greedy: (decoded, log_prob) = tf.nn.ctc_greedy_decoder( inputs=y_pred, sequence_length=input_length) else: (decoded, log_prob) = tf.compat.v1.nn.ctc_beam_search_decoder( inputs=y_pred, sequence_length=input_length, beam_width=beam_width, top_paths=top_paths) decoded_dense = [] for st in decoded: st = tf.SparseTensor( st.indices, st.values, (num_samples, num_steps)) decoded_dense.append( tf.sparse.to_dense(sp_input=st, default_value=-1)) return (decoded_dense, log_prob)
def ctc_label_dense_to_sparse(labels, label_lengths)
-
Converts CTC labels from dense to sparse.
Args
labels
- dense CTC labels.
label_lengths
- length of the labels.
Returns
A sparse tensor representation of the labels.
Expand source code
@keras_export('keras.backend.ctc_label_dense_to_sparse') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ctc_label_dense_to_sparse(labels, label_lengths): """Converts CTC labels from dense to sparse. Args: labels: dense CTC labels. label_lengths: length of the labels. Returns: A sparse tensor representation of the labels. """ label_shape = tf.shape(labels) num_batches_tns = tf.stack([label_shape[0]]) max_num_labels_tns = tf.stack([label_shape[1]]) def range_less_than(old_input, current_input): return tf.expand_dims( tf.range(tf.shape(old_input)[1]), 0) < tf.fill( max_num_labels_tns, current_input) init = tf.cast( tf.fill([1, label_shape[1]], 0), tf.bool) dense_mask = tf.compat.v1.scan( range_less_than, label_lengths, initializer=init, parallel_iterations=1) dense_mask = dense_mask[:, 0, :] label_array = tf.reshape( tf.tile(tf.range(0, label_shape[1]), num_batches_tns), label_shape) label_ind = tf.compat.v1.boolean_mask(label_array, dense_mask) batch_array = tf.compat.v1.transpose( tf.reshape( tf.tile(tf.range(0, label_shape[0]), max_num_labels_tns), reverse(label_shape, 0))) batch_ind = tf.compat.v1.boolean_mask(batch_array, dense_mask) indices = tf.compat.v1.transpose( tf.reshape(concatenate([batch_ind, label_ind], axis=0), [2, -1])) vals_sparse = tf.compat.v1.gather_nd(labels, indices) return tf.SparseTensor( tf.cast(indices, tf.int64), vals_sparse, tf.cast(label_shape, tf.int64))
def cumprod(x, axis=0)
-
Cumulative product of the values in a tensor, alongside the specified axis.
Args
x
- A tensor or variable.
axis
- An integer, the axis to compute the product.
Returns
A tensor of the cumulative product of values of
x
alongaxis
.Expand source code
@keras_export('keras.backend.cumprod') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cumprod(x, axis=0): """Cumulative product of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the product. Returns: A tensor of the cumulative product of values of `x` along `axis`. """ return tf.math.cumprod(x, axis=axis)
def cumsum(x, axis=0)
-
Cumulative sum of the values in a tensor, alongside the specified axis.
Args
x
- A tensor or variable.
axis
- An integer, the axis to compute the sum.
Returns
A tensor of the cumulative sum of values of
x
alongaxis
.Expand source code
@keras_export('keras.backend.cumsum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def cumsum(x, axis=0): """Cumulative sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the sum. Returns: A tensor of the cumulative sum of values of `x` along `axis`. """ return tf.cumsum(x, axis=axis)
def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
-
2D convolution with separable filters.
Args
x
- input tensor
depthwise_kernel
- convolution kernel for the depthwise convolution.
strides
- strides tuple (length 2).
padding
- string,
"same"
or"valid"
. data_format
- string,
"channels_last"
or"channels_first"
. dilation_rate
- tuple of integers, dilation rates for the separable convolution.
Returns
Output tensor.
Raises
ValueError
- if
data_format
is neitherchannels_last
or
channels_first
.Expand source code
@keras_export('keras.backend.depthwise_conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def depthwise_conv2d(x, depthwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. strides: strides tuple (length 2). padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = tf.compat.v1.nn.depthwise_conv2d( x, depthwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x
def dot(x, y)
-
Multiplies 2 tensors (and/or variables) and returns a tensor.
This operation corresponds to
numpy.dot(a, b, out=None)
.Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A tensor, dot product of
x
andy
. Examples:If inputs
x
andy
are 2-D arrays, then it is equivalent totf.matmul
.>>> x = tf.keras.backend.placeholder(shape=(2, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy <KerasTensor: shape=(2, 4) dtype=float32 ...>
>>> x = tf.keras.backend.placeholder(shape=(32, 28, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy <KerasTensor: shape=(32, 28, 4) dtype=float32 ...>
If
x
is an N-D array andy
is an M-D array (where M>=2), it is a sum product over the last axis ofx
and the second-to-last axis ofy
.>>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1) >>> y = tf.keras.backend.ones((4, 3, 5)) >>> xy = tf.keras.backend.dot(x, y) >>> tf.keras.backend.int_shape(xy) (2, 4, 5)
Expand source code
@keras_export('keras.backend.dot') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def dot(x, y): """Multiplies 2 tensors (and/or variables) and returns a tensor. This operation corresponds to `numpy.dot(a, b, out=None)`. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor, dot product of `x` and `y`. Examples: If inputs `x` and `y` are 2-D arrays, then it is equivalent to `tf.matmul`. >>> x = tf.keras.backend.placeholder(shape=(2, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy <KerasTensor: shape=(2, 4) dtype=float32 ...> >>> x = tf.keras.backend.placeholder(shape=(32, 28, 3)) >>> y = tf.keras.backend.placeholder(shape=(3, 4)) >>> xy = tf.keras.backend.dot(x, y) >>> xy <KerasTensor: shape=(32, 28, 4) dtype=float32 ...> If `x` is an N-D array and `y` is an M-D array (where M>=2), it is a sum product over the last axis of `x` and the second-to-last axis of `y`. >>> x = tf.keras.backend.random_uniform_variable(shape=(2, 3), low=0, high=1) >>> y = tf.keras.backend.ones((4, 3, 5)) >>> xy = tf.keras.backend.dot(x, y) >>> tf.keras.backend.int_shape(xy) (2, 4, 5) """ if ndim(x) is not None and (ndim(x) > 2 or ndim(y) > 2): x_shape = [] for i, s in zip(int_shape(x), tf.unstack(tf.shape(x))): if i is not None: x_shape.append(i) else: x_shape.append(s) x_shape = tuple(x_shape) y_shape = [] for i, s in zip(int_shape(y), tf.unstack(tf.shape(y))): if i is not None: y_shape.append(i) else: y_shape.append(s) y_shape = tuple(y_shape) y_permute_dim = list(range(ndim(y))) y_permute_dim = [y_permute_dim.pop(-2)] + y_permute_dim xt = tf.reshape(x, [-1, x_shape[-1]]) yt = tf.reshape( tf.compat.v1.transpose(y, perm=y_permute_dim), [y_shape[-2], -1]) return tf.reshape( tf.matmul(xt, yt), x_shape[:-1] + y_shape[:-2] + y_shape[-1:]) if is_sparse(x): out = tf.sparse.sparse_dense_matmul(x, y) else: out = tf.matmul(x, y) return out
def dropout(x, level, noise_shape=None, seed=None)
-
Sets entries in
x
to zero at random, while scaling the entire tensor.Args
x
- tensor
level
- fraction of the entries in the tensor that will be set to 0.
noise_shape
- shape for randomly generated keep/drop flags,
must be broadcastable to the shape of
x
seed
- random seed to ensure determinism.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.dropout') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def dropout(x, level, noise_shape=None, seed=None): """Sets entries in `x` to zero at random, while scaling the entire tensor. Args: x: tensor level: fraction of the entries in the tensor that will be set to 0. noise_shape: shape for randomly generated keep/drop flags, must be broadcastable to the shape of `x` seed: random seed to ensure determinism. Returns: A tensor. """ if seed is None: seed = np.random.randint(10e6) return tf.nn.dropout(x, rate=level, noise_shape=noise_shape, seed=seed)
def dtype(x)
-
Returns the dtype of a Keras tensor or variable, as a string.
Args
x
- Tensor or variable.
Returns
String, dtype of
x
. Examples:>>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5))) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float32')) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float64')) 'float64' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]])) >>> tf.keras.backend.dtype(kvar) 'float32' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.dtype(kvar) 'float32'
Expand source code
@keras_export('keras.backend.dtype') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def dtype(x): """Returns the dtype of a Keras tensor or variable, as a string. Args: x: Tensor or variable. Returns: String, dtype of `x`. Examples: >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5))) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float32')) 'float32' >>> tf.keras.backend.dtype(tf.keras.backend.placeholder(shape=(2,4,5), ... dtype='float64')) 'float64' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]])) >>> tf.keras.backend.dtype(kvar) 'float32' >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.dtype(kvar) 'float32' """ return x.dtype.base_dtype.name
def elu(x, alpha=1.0)
-
Exponential linear unit.
Args
x
- A tensor or variable to compute the activation function for.
alpha
- A scalar, slope of negative section.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.elu') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def elu(x, alpha=1.): """Exponential linear unit. Args: x: A tensor or variable to compute the activation function for. alpha: A scalar, slope of negative section. Returns: A tensor. """ res = tf.nn.elu(x) if alpha == 1: return res else: return tf.where(x > 0, res, alpha * res)
def epsilon()
-
Returns the value of the fuzz factor used in numeric expressions.
Returns
A float. Example:
>>> tf.keras.backend.epsilon() 1e-07
Expand source code
@keras_export('keras.backend.epsilon') @tf.__internal__.dispatch.add_dispatch_support def epsilon(): """Returns the value of the fuzz factor used in numeric expressions. Returns: A float. Example: >>> tf.keras.backend.epsilon() 1e-07 """ return _EPSILON
def equal(x, y)
-
Element-wise equality between two tensors.
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A bool tensor.
Expand source code
@keras_export('keras.backend.equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def equal(x, y): """Element-wise equality between two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.equal(x, y)
def eval(x)
-
Evaluates the value of a variable.
Args
x
- A variable.
Returns
A Numpy array. Examples:
>>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.eval(kvar) array([[1., 2.], [3., 4.]], dtype=float32)
Expand source code
@keras_export('keras.backend.eval') @doc_controls.do_not_generate_docs def eval(x): """Evaluates the value of a variable. Args: x: A variable. Returns: A Numpy array. Examples: >>> kvar = tf.keras.backend.variable(np.array([[1, 2], [3, 4]]), ... dtype='float32') >>> tf.keras.backend.eval(kvar) array([[1., 2.], [3., 4.]], dtype=float32) """ return get_value(to_dense(x))
def exp(x)
-
Element-wise exponential.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.exp') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def exp(x): """Element-wise exponential. Args: x: Tensor or variable. Returns: A tensor. """ return tf.exp(x)
def expand_dims(x, axis=-1)
-
Adds a 1-sized dimension at index "axis".
Args
x
- A tensor or variable.
axis
- Position where to add a new axis.
Returns
A tensor with expanded dimensions.
Expand source code
@keras_export('keras.backend.expand_dims') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def expand_dims(x, axis=-1): """Adds a 1-sized dimension at index "axis". Args: x: A tensor or variable. axis: Position where to add a new axis. Returns: A tensor with expanded dimensions. """ return tf.expand_dims(x, axis)
def eye(size, dtype=None, name=None)
-
Instantiate an identity matrix and returns it.
Args
size
- Integer, number of rows/columns.
dtype
- String, data type of returned Keras variable.
name
- String, name of returned Keras variable.
Returns
A Keras variable, an identity matrix.
Example
>>> kvar = tf.keras.backend.eye(3) >>> tf.keras.backend.eval(kvar) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32)
Expand source code
@keras_export('keras.backend.eye') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def eye(size, dtype=None, name=None): """Instantiate an identity matrix and returns it. Args: size: Integer, number of rows/columns. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, an identity matrix. Example: >>> kvar = tf.keras.backend.eye(3) >>> tf.keras.backend.eval(kvar) array([[1., 0., 0.], [0., 1., 0.], [0., 0., 1.]], dtype=float32) """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) return variable(tf.eye(size, dtype=tf_dtype), dtype, name)
def flatten(x)
-
Flatten a tensor.
Args
x
- A tensor or variable.
Returns
A tensor, reshaped into 1-D
Example
>>> b = tf.constant([[1, 2], [3, 4]]) >>> b <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[1, 2], [3, 4]], dtype=int32)> >>> tf.keras.backend.flatten(b) <tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4], dtype=int32)>
Expand source code
@keras_export('keras.backend.flatten') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def flatten(x): """Flatten a tensor. Args: x: A tensor or variable. Returns: A tensor, reshaped into 1-D Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[1, 2], [3, 4]], dtype=int32)> >>> tf.keras.backend.flatten(b) <tf.Tensor: shape=(4,), dtype=int32, numpy=array([1, 2, 3, 4], dtype=int32)> """ return tf.reshape(x, [-1])
def floatx()
-
Returns the default float type, as a string.
E.g.
'float16'
,'float32'
,'float64'
.Returns
String, the current default float type. Example:
>>> tf.keras.backend.floatx() 'float32'
Expand source code
@keras_export('keras.backend.floatx') def floatx(): """Returns the default float type, as a string. E.g. `'float16'`, `'float32'`, `'float64'`. Returns: String, the current default float type. Example: >>> tf.keras.backend.floatx() 'float32' """ return _FLOATX
def foldl(fn, elems, initializer=None, name=None)
-
Reduce elems using fn to combine them from left to right.
Args
fn
- Callable that will be called upon each element in elems and an
accumulator, for instance
lambda acc, x: acc + x
elems
- tensor
initializer
- The first value used (
elems[0]
in case of None) name
- A string name for the foldl node in the graph
Returns
Tensor with same type and shape as
initializer
.Expand source code
@keras_export('keras.backend.foldl') @doc_controls.do_not_generate_docs def foldl(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from left to right. Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[0]` in case of None) name: A string name for the foldl node in the graph Returns: Tensor with same type and shape as `initializer`. """ return tf.compat.v1.foldl(fn, elems, initializer=initializer, name=name)
def foldr(fn, elems, initializer=None, name=None)
-
Reduce elems using fn to combine them from right to left.
Args
fn
- Callable that will be called upon each element in elems and an
accumulator, for instance
lambda acc, x: acc + x
elems
- tensor
initializer
- The first value used (
elems[-1]
in case of None) name
- A string name for the foldr node in the graph
Returns
Same type and shape as initializer
Expand source code
@keras_export('keras.backend.foldr') @doc_controls.do_not_generate_docs def foldr(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from right to left. Args: fn: Callable that will be called upon each element in elems and an accumulator, for instance `lambda acc, x: acc + x` elems: tensor initializer: The first value used (`elems[-1]` in case of None) name: A string name for the foldr node in the graph Returns: Same type and shape as initializer """ return tf.compat.v1.foldr(fn, elems, initializer=initializer, name=name)
def function(inputs, outputs, updates=None, name=None, **kwargs)
-
Instantiates a Keras function.
Args
inputs
- List of placeholder tensors.
outputs
- List of output tensors.
updates
- List of update ops.
name
- String, name of function.
**kwargs
- Passed to
tf.Session.run
.
Returns
Output values as Numpy arrays.
Raises
ValueError
- if invalid kwargs are passed in or if in eager execution.
Expand source code
@keras_export('keras.backend.function') @doc_controls.do_not_generate_docs def function(inputs, outputs, updates=None, name=None, **kwargs): """Instantiates a Keras function. Args: inputs: List of placeholder tensors. outputs: List of output tensors. updates: List of update ops. name: String, name of function. **kwargs: Passed to `tf.Session.run`. Returns: Output values as Numpy arrays. Raises: ValueError: if invalid kwargs are passed in or if in eager execution. """ if tf.compat.v1.executing_eagerly_outside_functions(): if kwargs: raise ValueError('Session keyword arguments are not supported during ' 'eager execution. You passed: %s' % (kwargs,)) if updates: raise ValueError('`updates` argument is not supported during ' 'eager execution. You passed: %s' % (updates,)) from keras import models # pylint: disable=g-import-not-at-top from keras.utils import tf_utils # pylint: disable=g-import-not-at-top model = models.Model(inputs=inputs, outputs=outputs) wrap_outputs = isinstance(outputs, list) and len(outputs) == 1 def func(model_inputs): outs = model(model_inputs) if wrap_outputs: outs = [outs] return tf_utils.sync_to_numpy_or_python_type(outs) return func if kwargs: for key in kwargs: if (key not in tf_inspect.getfullargspec(tf.compat.v1.Session.run)[0] and key not in ['inputs', 'outputs', 'updates', 'name']): msg = ('Invalid argument "%s" passed to K.function with TensorFlow ' 'backend') % key raise ValueError(msg) return GraphExecutionFunction( inputs, outputs, updates=updates, name=name, **kwargs)
def gather(reference, indices)
-
Retrieves the elements of indices
indices
in the tensorreference
.Args
reference
- A tensor.
indices
- An integer tensor of indices.
Returns
A tensor of same type as
reference
. Examples:>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [1]) >>> tf.keras.backend.eval(var_gathered) array([[4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0,1,0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.], [4., 5., 6.], [1., 2., 3.]], dtype=float32)
Expand source code
@keras_export('keras.backend.gather') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def gather(reference, indices): """Retrieves the elements of indices `indices` in the tensor `reference`. Args: reference: A tensor. indices: An integer tensor of indices. Returns: A tensor of same type as `reference`. Examples: >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [1]) >>> tf.keras.backend.eval(var_gathered) array([[4., 5., 6.]], dtype=float32) >>> var_gathered = tf.keras.backend.gather(var, [0,1,0]) >>> tf.keras.backend.eval(var_gathered) array([[1., 2., 3.], [4., 5., 6.], [1., 2., 3.]], dtype=float32) """ return tf.compat.v1.gather(reference, indices)
def get_session(op_input_list=())
-
Returns the TF session to be used by the backend.
If a default TensorFlow session is available, we will return it.
Else, we will return the global Keras session assuming it matches the current graph.
If no global Keras session exists at this point: we will create a new global session.
Note that you can manually set the global session via
K.set_session(sess)
.Args
op_input_list
- An option sequence of tensors or ops, which will be used to determine the current graph. Otherwise the default graph will be used.
Returns
A TensorFlow session.
Expand source code
@keras_export(v1=['keras.backend.get_session']) def get_session(op_input_list=()): """Returns the TF session to be used by the backend. If a default TensorFlow session is available, we will return it. Else, we will return the global Keras session assuming it matches the current graph. If no global Keras session exists at this point: we will create a new global session. Note that you can manually set the global session via `K.set_session(sess)`. Args: op_input_list: An option sequence of tensors or ops, which will be used to determine the current graph. Otherwise the default graph will be used. Returns: A TensorFlow session. """ session = _get_session(op_input_list) if not _MANUAL_VAR_INIT: with session.graph.as_default(): _initialize_variables(session) return session
def get_uid(prefix='')
-
Associates a string prefix with an integer counter in a TensorFlow graph.
Args
prefix
- String prefix to index.
Returns
Unique integer ID. Example:
>>> get_uid('dense') 1 >>> get_uid('dense') 2
Expand source code
@keras_export('keras.backend.get_uid') def get_uid(prefix=''): """Associates a string prefix with an integer counter in a TensorFlow graph. Args: prefix: String prefix to index. Returns: Unique integer ID. Example: >>> get_uid('dense') 1 >>> get_uid('dense') 2 """ graph = get_graph() if graph not in PER_GRAPH_OBJECT_NAME_UIDS: PER_GRAPH_OBJECT_NAME_UIDS[graph] = collections.defaultdict(int) layer_name_uids = PER_GRAPH_OBJECT_NAME_UIDS[graph] layer_name_uids[prefix] += 1 return layer_name_uids[prefix]
def get_value(x)
-
Returns the value of a variable.
backend.get_value
is the complement ofbackend.set_value
, and provides a generic interface for reading from variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics.>>> K = tf.keras.backend # Common keras convention >>> v = K.variable(1.)
>>> # reassign >>> K.set_value(v, 2.) >>> print(K.get_value(v)) 2.0
>>> # increment >>> K.set_value(v, K.get_value(v) + 1) >>> print(K.get_value(v)) 3.0
Variable semantics in TensorFlow 2 are eager execution friendly. The above code is roughly equivalent to:
>>> v = tf.Variable(1.)
>>> v.assign(2.) >>> print(v.numpy()) 2.0
>>> v.assign_add(1.) >>> print(v.numpy()) 3.0
Args
x
- input variable.
Returns
A Numpy array.
Expand source code
@keras_export('keras.backend.get_value') @doc_controls.do_not_generate_docs def get_value(x): """Returns the value of a variable. `backend.get_value` is the complement of `backend.set_value`, and provides a generic interface for reading from variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: input variable. Returns: A Numpy array. """ if not tf.is_tensor(x): return x if tf.executing_eagerly() or isinstance(x, tf.__internal__.EagerTensor): return x.numpy() if not getattr(x, '_in_graph_mode', True): # This is a variable which was created in an eager context, but is being # evaluated from a Graph. with tf.__internal__.eager_context.eager_mode(): return x.numpy() if tf.compat.v1.executing_eagerly_outside_functions(): # This method of evaluating works inside the Keras FuncGraph. with tf.init_scope(): return x.numpy() with x.graph.as_default(): return x.eval(session=get_session((x,)))
def gradients(loss, variables)
-
Returns the gradients of
loss
w.r.t.variables
.Args
loss
- Scalar tensor to minimize.
variables
- List of variables.
Returns
A gradients tensor.
Expand source code
@keras_export('keras.backend.gradients') @doc_controls.do_not_generate_docs def gradients(loss, variables): """Returns the gradients of `loss` w.r.t. `variables`. Args: loss: Scalar tensor to minimize. variables: List of variables. Returns: A gradients tensor. """ return tf.compat.v1.gradients( loss, variables, colocate_gradients_with_ops=True)
def greater(x, y)
-
Element-wise truth value of (x > y).
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A bool tensor.
Expand source code
@keras_export('keras.backend.greater') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def greater(x, y): """Element-wise truth value of (x > y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.greater(x, y)
def greater_equal(x, y)
-
Element-wise truth value of (x >= y).
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A bool tensor.
Expand source code
@keras_export('keras.backend.greater_equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def greater_equal(x, y): """Element-wise truth value of (x >= y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.greater_equal(x, y)
def hard_sigmoid(x)
-
Segment-wise linear approximation of sigmoid.
Faster than sigmoid. Returns
0.
ifx < -2.5
,1.
ifx > 2.5
. In-2.5 <= x <= 2.5
, returns0.2 * x + 0.5
.Args
x
- A tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.hard_sigmoid') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def hard_sigmoid(x): """Segment-wise linear approximation of sigmoid. Faster than sigmoid. Returns `0.` if `x < -2.5`, `1.` if `x > 2.5`. In `-2.5 <= x <= 2.5`, returns `0.2 * x + 0.5`. Args: x: A tensor or variable. Returns: A tensor. """ point_two = _constant_to_tensor(0.2, x.dtype.base_dtype) point_five = _constant_to_tensor(0.5, x.dtype.base_dtype) x = tf.multiply(x, point_two) x = tf.add(x, point_five) x = tf.clip_by_value(x, 0., 1.) return x
def image_data_format()
-
Returns the default image data format convention.
Returns
A string, either
'channels_first'
or'channels_last'
Example:>>> tf.keras.backend.image_data_format() 'channels_last'
Expand source code
@keras_export('keras.backend.image_data_format') @tf.__internal__.dispatch.add_dispatch_support def image_data_format(): """Returns the default image data format convention. Returns: A string, either `'channels_first'` or `'channels_last'` Example: >>> tf.keras.backend.image_data_format() 'channels_last' """ return _IMAGE_DATA_FORMAT
def in_test_phase(x, alt, training=None)
-
Selects
x
in test phase, andalt
otherwise.Note that
alt
should have the same shape asx
.Args
x
- What to return in test phase (tensor or callable that returns a tensor).
alt
- What to return otherwise (tensor or callable that returns a tensor).
training
- Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase.
Returns
Either
x
oralt
based onK.learning_phase
.Expand source code
@keras_export('keras.backend.in_test_phase') @doc_controls.do_not_generate_docs def in_test_phase(x, alt, training=None): """Selects `x` in test phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Args: x: What to return in test phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on `K.learning_phase`. """ return in_train_phase(alt, x, training=training)
def in_top_k(predictions, targets, k)
-
Returns whether the
targets
are in the topk
predictions
.Args
predictions
- A tensor of shape
(batch_size, classes)
and typefloat32
. targets
- A 1D tensor of length
batch_size
and typeint32
orint64
. k
- An
int
, number of top elements to consider.
Returns
A 1D tensor of length
batch_size
and typebool
.output[i]
isTrue
ifpredictions[i, targets[i]]
is within top-k
values ofpredictions[i]
.Expand source code
@keras_export('keras.backend.in_top_k') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def in_top_k(predictions, targets, k): """Returns whether the `targets` are in the top `k` `predictions`. Args: predictions: A tensor of shape `(batch_size, classes)` and type `float32`. targets: A 1D tensor of length `batch_size` and type `int32` or `int64`. k: An `int`, number of top elements to consider. Returns: A 1D tensor of length `batch_size` and type `bool`. `output[i]` is `True` if `predictions[i, targets[i]]` is within top-`k` values of `predictions[i]`. """ return tf.compat.v1.math.in_top_k(predictions, targets, k)
def in_train_phase(x, alt, training=None)
-
Selects
x
in train phase, andalt
otherwise.Note that
alt
should have the same shape asx
.Args
x
- What to return in train phase (tensor or callable that returns a tensor).
alt
- What to return otherwise (tensor or callable that returns a tensor).
training
- Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase.
Returns
Either
x
oralt
based on thetraining
flag. thetraining
flag defaults toK.learning_phase()
.Expand source code
@keras_export('keras.backend.in_train_phase') @doc_controls.do_not_generate_docs def in_train_phase(x, alt, training=None): """Selects `x` in train phase, and `alt` otherwise. Note that `alt` should have the *same shape* as `x`. Args: x: What to return in train phase (tensor or callable that returns a tensor). alt: What to return otherwise (tensor or callable that returns a tensor). training: Optional scalar tensor (or Python boolean, or Python integer) specifying the learning phase. Returns: Either `x` or `alt` based on the `training` flag. the `training` flag defaults to `K.learning_phase()`. """ from keras.engine import base_layer_utils # pylint: disable=g-import-not-at-top if training is None: training = base_layer_utils.call_context().training if training is None: training = learning_phase() # TODO(b/138862903): Handle the case when training is tensor. if not tf.is_tensor(training): if training == 1 or training is True: if callable(x): return x() else: return x elif training == 0 or training is False: if callable(alt): return alt() else: return alt # else: assume learning phase is a placeholder tensor. x = switch(training, x, alt) return x
def int_shape(x)
-
Returns the shape of tensor or variable as a tuple of int or None entries.
Args
x
- Tensor or variable.
Returns
A tuple of integers (or None entries). Examples:
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.int_shape(input) (2, 4, 5) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.int_shape(kvar) (2, 2)
Expand source code
@keras_export('keras.backend.int_shape') @doc_controls.do_not_generate_docs def int_shape(x): """Returns the shape of tensor or variable as a tuple of int or None entries. Args: x: Tensor or variable. Returns: A tuple of integers (or None entries). Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.int_shape(input) (2, 4, 5) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.int_shape(kvar) (2, 2) """ try: shape = x.shape if not isinstance(shape, tuple): shape = tuple(shape.as_list()) return shape except ValueError: return None
def is_keras_tensor(x)
-
Returns whether
x
is a Keras tensor.A "Keras tensor" is a tensor that was returned by a Keras layer, (
Layer
class) or byInput
.Args
x
- A candidate tensor.
Returns
A boolean
- Whether the argument is a Keras tensor.
Raises
ValueError
- In case
x
is not a symbolic tensor.
Examples:
>>> np_var = np.array([1, 2]) >>> # A numpy array is not a symbolic tensor. >>> tf.keras.backend.is_keras_tensor(np_var) Traceback (most recent call last): ... ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`. Expected a symbolic tensor instance. >>> keras_var = tf.keras.backend.variable(np_var) >>> # A variable created with the keras backend is not a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_var) False >>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> # A placeholder is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_placeholder) True >>> keras_input = tf.keras.layers.Input([10]) >>> # An Input is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_input) True >>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input) >>> # Any Keras layer output is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_layer_output) True
Expand source code
@keras_export('keras.backend.is_keras_tensor') def is_keras_tensor(x): """Returns whether `x` is a Keras tensor. A "Keras tensor" is a tensor that was returned by a Keras layer, (`Layer` class) or by `Input`. Args: x: A candidate tensor. Returns: A boolean: Whether the argument is a Keras tensor. Raises: ValueError: In case `x` is not a symbolic tensor. Examples: >>> np_var = np.array([1, 2]) >>> # A numpy array is not a symbolic tensor. >>> tf.keras.backend.is_keras_tensor(np_var) Traceback (most recent call last): ... ValueError: Unexpectedly found an instance of type `<class 'numpy.ndarray'>`. Expected a symbolic tensor instance. >>> keras_var = tf.keras.backend.variable(np_var) >>> # A variable created with the keras backend is not a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_var) False >>> keras_placeholder = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> # A placeholder is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_placeholder) True >>> keras_input = tf.keras.layers.Input([10]) >>> # An Input is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_input) True >>> keras_layer_output = tf.keras.layers.Dense(10)(keras_input) >>> # Any Keras layer output is a Keras tensor. >>> tf.keras.backend.is_keras_tensor(keras_layer_output) True """ if not isinstance(x, (tf.Tensor, tf.Variable, tf.SparseTensor, tf.RaggedTensor, keras_tensor.KerasTensor)): raise ValueError('Unexpectedly found an instance of type `' + str(type(x)) + '`. Expected a symbolic tensor instance.') if tf.compat.v1.executing_eagerly_outside_functions(): return isinstance(x, keras_tensor.KerasTensor) return hasattr(x, '_keras_history')
def is_sparse(tensor)
-
Returns whether a tensor is a sparse tensor.
Args
tensor
- A tensor instance.
Returns
A boolean.
Example
>>> a = tf.keras.backend.placeholder((2, 2), sparse=False) >>> print(tf.keras.backend.is_sparse(a)) False >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True
Expand source code
@keras_export('keras.backend.is_sparse') @doc_controls.do_not_generate_docs def is_sparse(tensor): """Returns whether a tensor is a sparse tensor. Args: tensor: A tensor instance. Returns: A boolean. Example: >>> a = tf.keras.backend.placeholder((2, 2), sparse=False) >>> print(tf.keras.backend.is_sparse(a)) False >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True """ spec = getattr(tensor, '_type_spec', None) if spec is not None: return isinstance(spec, tf.SparseTensorSpec) return isinstance(tensor, tf.SparseTensor)
def l2_normalize(x, axis=None)
-
Normalizes a tensor wrt the L2 norm alongside the specified axis.
Args
x
- Tensor or variable.
axis
- axis along which to perform normalization.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.l2_normalize') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def l2_normalize(x, axis=None): """Normalizes a tensor wrt the L2 norm alongside the specified axis. Args: x: Tensor or variable. axis: axis along which to perform normalization. Returns: A tensor. """ return tf.linalg.l2_normalize(x, axis=axis)
def learning_phase()
-
Returns the learning phase flag.
The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time.
Returns
Learning phase (scalar integer tensor or Python integer).
Expand source code
@keras_export('keras.backend.learning_phase') @doc_controls.do_not_generate_docs def learning_phase(): """Returns the learning phase flag. The learning phase flag is a bool tensor (0 = test, 1 = train) to be passed as input to any Keras function that uses a different behavior at train time and test time. Returns: Learning phase (scalar integer tensor or Python integer). """ graph = tf.compat.v1.get_default_graph() if graph is getattr(_GRAPH, 'graph', None): # Don't enter an init_scope for the learning phase if eager execution # is enabled but we're inside the Keras workspace graph. learning_phase = symbolic_learning_phase() else: with tf.init_scope(): # We always check & set the learning phase inside the init_scope, # otherwise the wrong default_graph will be used to look up the learning # phase inside of functions & defuns. # # This is because functions & defuns (both in graph & in eager mode) # will always execute non-eagerly using a function-specific default # subgraph. learning_phase = _GRAPH_LEARNING_PHASES[None] _mark_func_graph_as_unsaveable(graph, learning_phase) return learning_phase
def learning_phase_scope(value)
-
Provides a scope within which the learning phase is equal to
value
.The learning phase gets restored to its original value upon exiting the scope.
Args
value
- Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train
Yields
None.
Raises
ValueError
- if
value
is neither0
nor1
.
Expand source code
@keras_export('keras.backend.learning_phase_scope') @tf_contextlib.contextmanager @doc_controls.do_not_generate_docs def learning_phase_scope(value): """Provides a scope within which the learning phase is equal to `value`. The learning phase gets restored to its original value upon exiting the scope. Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Yields: None. Raises: ValueError: if `value` is neither `0` nor `1`. """ warnings.warn('`tf.keras.backend.learning_phase_scope` is deprecated and ' 'will be removed after 2020-10-11. To update it, simply ' 'pass a True/False value to the `training` argument of the ' '`__call__` method of your layer or model.') with deprecated_internal_learning_phase_scope(value): try: yield finally: pass
def less(x, y)
-
Element-wise truth value of (x < y).
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A bool tensor.
Expand source code
@keras_export('keras.backend.less') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def less(x, y): """Element-wise truth value of (x < y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.less(x, y)
def less_equal(x, y)
-
Element-wise truth value of (x <= y).
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A bool tensor.
Expand source code
@keras_export('keras.backend.less_equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def less_equal(x, y): """Element-wise truth value of (x <= y). Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.less_equal(x, y)
def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None)
-
Apply 1D conv with un-shared weights.
Args
inputs
- 3D tensor with shape: (batch_size, steps, input_dim) if data_format is "channels_last" or (batch_size, input_dim, steps) if data_format is "channels_first".
kernel
- the unshared weight for convolution, with shape (output_length, feature_dim, filters).
kernel_size
- a tuple of a single integer, specifying the length of the 1D convolution window.
strides
- a tuple of a single integer, specifying the stride length of the convolution.
data_format
- the data format, channels_first or channels_last.
Returns
A 3d tensor with shape: (batch_size, output_length, filters) if data_format='channels_first' or 3D tensor with shape: (batch_size, filters, output_length) if data_format='channels_last'.
Expand source code
@keras_export('keras.backend.local_conv1d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def local_conv1d(inputs, kernel, kernel_size, strides, data_format=None): """Apply 1D conv with un-shared weights. Args: inputs: 3D tensor with shape: (batch_size, steps, input_dim) if data_format is "channels_last" or (batch_size, input_dim, steps) if data_format is "channels_first". kernel: the unshared weight for convolution, with shape (output_length, feature_dim, filters). kernel_size: a tuple of a single integer, specifying the length of the 1D convolution window. strides: a tuple of a single integer, specifying the stride length of the convolution. data_format: the data format, channels_first or channels_last. Returns: A 3d tensor with shape: (batch_size, output_length, filters) if data_format='channels_first' or 3D tensor with shape: (batch_size, filters, output_length) if data_format='channels_last'. """ output_shape = (kernel.shape[0],) return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)
def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None)
-
Apply 2D conv with un-shared weights.
Args
inputs
- 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'.
kernel
- the unshared weight for convolution, with shape (output_items, feature_dim, filters).
kernel_size
- a tuple of 2 integers, specifying the width and height of the 2D convolution window.
strides
- a tuple of 2 integers, specifying the strides of the convolution along the width and height.
output_shape
- a tuple with (output_row, output_col).
data_format
- the data format, channels_first or channels_last.
Returns
A 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'.
Expand source code
@keras_export('keras.backend.local_conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def local_conv2d(inputs, kernel, kernel_size, strides, output_shape, data_format=None): """Apply 2D conv with un-shared weights. Args: inputs: 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. kernel: the unshared weight for convolution, with shape (output_items, feature_dim, filters). kernel_size: a tuple of 2 integers, specifying the width and height of the 2D convolution window. strides: a tuple of 2 integers, specifying the strides of the convolution along the width and height. output_shape: a tuple with (output_row, output_col). data_format: the data format, channels_first or channels_last. Returns: A 4D tensor with shape: (batch_size, filters, new_rows, new_cols) if data_format='channels_first' or 4D tensor with shape: (batch_size, new_rows, new_cols, filters) if data_format='channels_last'. """ return local_conv(inputs, kernel, kernel_size, strides, output_shape, data_format)
def log(x)
-
Element-wise log.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.log') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def log(x): """Element-wise log. Args: x: Tensor or variable. Returns: A tensor. """ return tf.math.log(x)
def manual_variable_initialization(value)
-
Sets the manual variable initialization flag.
This boolean flag determines whether variables should be initialized as they are instantiated (default), or if the user should handle the initialization (e.g. via
tf.compat.v1.initialize_all_variables()
).Args
value
- Python boolean.
Expand source code
@keras_export('keras.backend.manual_variable_initialization') @doc_controls.do_not_generate_docs def manual_variable_initialization(value): """Sets the manual variable initialization flag. This boolean flag determines whether variables should be initialized as they are instantiated (default), or if the user should handle the initialization (e.g. via `tf.compat.v1.initialize_all_variables()`). Args: value: Python boolean. """ global _MANUAL_VAR_INIT _MANUAL_VAR_INIT = value
def map_fn(fn, elems, name=None, dtype=None)
-
Map the function fn over the elements elems and return the outputs.
Args
fn
- Callable that will be called upon each element in elems
elems
- tensor
name
- A string name for the map node in the graph
dtype
- Output data type.
Returns
Tensor with dtype
dtype()
.Expand source code
@keras_export('keras.backend.map_fn') @doc_controls.do_not_generate_docs def map_fn(fn, elems, name=None, dtype=None): """Map the function fn over the elements elems and return the outputs. Args: fn: Callable that will be called upon each element in elems elems: tensor name: A string name for the map node in the graph dtype: Output data type. Returns: Tensor with dtype `dtype`. """ return tf.compat.v1.map_fn(fn, elems, name=name, dtype=dtype)
def max(x, axis=None, keepdims=False)
-
Maximum value in a tensor.
Args
x
- A tensor or variable.
axis
- An integer, the axis to find maximum values.
keepdims
- A boolean, whether to keep the dimensions or not.
If
keepdims
isFalse
, the rank of the tensor is reduced by 1. Ifkeepdims
isTrue
, the reduced dimension is retained with length 1.
Returns
A tensor with maximum values of
x
.Expand source code
@keras_export('keras.backend.max') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def max(x, axis=None, keepdims=False): """Maximum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find maximum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with maximum values of `x`. """ return tf.reduce_max(x, axis, keepdims)
def maximum(x, y)
-
Element-wise maximum of two tensors.
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A tensor with the element wise maximum value(s) of
x
andy
. Examples:>>> x = tf.Variable([[1, 2], [3, 4]]) >>> y = tf.Variable([[2, 1], [0, -1]]) >>> m = tf.keras.backend.maximum(x, y) >>> m <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[2, 2], [3, 4]], dtype=int32)>
Expand source code
@keras_export('keras.backend.maximum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def maximum(x, y): """Element-wise maximum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor with the element wise maximum value(s) of `x` and `y`. Examples: >>> x = tf.Variable([[1, 2], [3, 4]]) >>> y = tf.Variable([[2, 1], [0, -1]]) >>> m = tf.keras.backend.maximum(x, y) >>> m <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[2, 2], [3, 4]], dtype=int32)> """ return tf.maximum(x, y)
def mean(x, axis=None, keepdims=False)
-
Mean of a tensor, alongside the specified axis.
Args
x
- A tensor or variable.
axis
- A list of integer. Axes to compute the mean.
keepdims
- A boolean, whether to keep the dimensions or not.
If
keepdims
isFalse
, the rank of the tensor is reduced by 1 for each entry inaxis
. Ifkeepdims
isTrue
, the reduced dimensions are retained with length 1.
Returns
A tensor with the mean of elements of
x
.Expand source code
@keras_export('keras.backend.mean') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def mean(x, axis=None, keepdims=False): """Mean of a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: A list of integer. Axes to compute the mean. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1 for each entry in `axis`. If `keepdims` is `True`, the reduced dimensions are retained with length 1. Returns: A tensor with the mean of elements of `x`. """ if x.dtype.base_dtype == tf.bool: x = tf.cast(x, floatx()) return tf.reduce_mean(x, axis, keepdims)
def min(x, axis=None, keepdims=False)
-
Minimum value in a tensor.
Args
x
- A tensor or variable.
axis
- An integer, the axis to find minimum values.
keepdims
- A boolean, whether to keep the dimensions or not.
If
keepdims
isFalse
, the rank of the tensor is reduced by 1. Ifkeepdims
isTrue
, the reduced dimension is retained with length 1.
Returns
A tensor with minimum values of
x
.Expand source code
@keras_export('keras.backend.min') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def min(x, axis=None, keepdims=False): """Minimum value in a tensor. Args: x: A tensor or variable. axis: An integer, the axis to find minimum values. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with minimum values of `x`. """ return tf.reduce_min(x, axis, keepdims)
def minimum(x, y)
-
Element-wise minimum of two tensors.
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.minimum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def minimum(x, y): """Element-wise minimum of two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A tensor. """ return tf.minimum(x, y)
def moving_average_update(x, value, momentum)
-
Compute the exponential moving average of a value.
The moving average 'x' is updated with 'value' following:
x = x * momentum + value * (1 - momentum)
For example:
>>> x = tf.Variable(0.0) >>> momentum=0.9 >>> moving_average_update(x, value = 2.0, momentum=momentum).numpy() >>> x.numpy() 0.2
The result will be biased towards the initial value of the variable.
If the variable was initialized to zero, you can divide by
1 - momentum ** num_updates
to debias it (Section 3 of Kingma et al., 2015):>>> num_updates = 1.0 >>> x_zdb = x/(1 - momentum**num_updates) >>> x_zdb.numpy() 2.0
Args
x
- A Variable, the moving average.
value
- A tensor with the same shape as
x
, the new value to be averaged in. momentum
- The moving average momentum.
Returns
The updated variable.
Expand source code
@keras_export('keras.backend.moving_average_update') @doc_controls.do_not_generate_docs def moving_average_update(x, value, momentum): """Compute the exponential moving average of a value. The moving average 'x' is updated with 'value' following: ``` x = x * momentum + value * (1 - momentum) ``` For example: >>> x = tf.Variable(0.0) >>> momentum=0.9 >>> moving_average_update(x, value = 2.0, momentum=momentum).numpy() >>> x.numpy() 0.2 The result will be biased towards the initial value of the variable. If the variable was initialized to zero, you can divide by `1 - momentum ** num_updates` to debias it (Section 3 of [Kingma et al., 2015](https://arxiv.org/abs/1412.6980)): >>> num_updates = 1.0 >>> x_zdb = x/(1 - momentum**num_updates) >>> x_zdb.numpy() 2.0 Args: x: A Variable, the moving average. value: A tensor with the same shape as `x`, the new value to be averaged in. momentum: The moving average momentum. Returns: The updated variable. """ if tf.__internal__.tf2.enabled(): momentum = tf.cast(momentum, x.dtype) value = tf.cast(value, x.dtype) return x.assign(x * momentum + value * (1 - momentum)) else: return tf.__internal__.train.assign_moving_average( x, value, momentum, zero_debias=True)
def ndim(x)
-
Returns the number of axes in a tensor, as an integer.
Args
x
- Tensor or variable.
Returns
Integer (scalar), number of axes.
Examples
>>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.ndim(input) 3 >>> tf.keras.backend.ndim(kvar) 2
Expand source code
@keras_export('keras.backend.ndim') @doc_controls.do_not_generate_docs def ndim(x): """Returns the number of axes in a tensor, as an integer. Args: x: Tensor or variable. Returns: Integer (scalar), number of axes. Examples: >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.ndim(input) 3 >>> tf.keras.backend.ndim(kvar) 2 """ return x.shape.rank
def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=0.001)
-
Computes mean and std for batch then apply batch_normalization on batch.
Args
x
- Input tensor or variable.
gamma
- Tensor by which to scale the input.
beta
- Tensor with which to center the input.
reduction_axes
- iterable of integers, axes over which to normalize.
epsilon
- Fuzz factor.
Returns
A tuple length of 3,
(normalized_tensor, mean(), variance)
.Expand source code
@keras_export('keras.backend.normalize_batch_in_training') @doc_controls.do_not_generate_docs def normalize_batch_in_training(x, gamma, beta, reduction_axes, epsilon=1e-3): """Computes mean and std for batch then apply batch_normalization on batch. Args: x: Input tensor or variable. gamma: Tensor by which to scale the input. beta: Tensor with which to center the input. reduction_axes: iterable of integers, axes over which to normalize. epsilon: Fuzz factor. Returns: A tuple length of 3, `(normalized_tensor, mean, variance)`. """ if ndim(x) == 4 and list(reduction_axes) in [[0, 1, 2], [0, 2, 3]]: if not _has_nchw_support() and list(reduction_axes) == [0, 2, 3]: return _broadcast_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon) return _fused_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon) else: if sorted(reduction_axes) == list(range(ndim(x)))[:-1]: return _regular_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon) else: return _broadcast_normalize_batch_in_training( x, gamma, beta, reduction_axes, epsilon=epsilon)
def not_equal(x, y)
-
Element-wise inequality between two tensors.
Args
x
- Tensor or variable.
y
- Tensor or variable.
Returns
A bool tensor.
Expand source code
@keras_export('keras.backend.not_equal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def not_equal(x, y): """Element-wise inequality between two tensors. Args: x: Tensor or variable. y: Tensor or variable. Returns: A bool tensor. """ return tf.not_equal(x, y)
def one_hot(indices, num_classes)
-
Computes the one-hot representation of an integer tensor.
Args
indices
- nD integer tensor of shape
(batch_size, dim1, dim2, ... dim(n-1))
num_classes
- Integer, number of classes to consider.
Returns
(n + 1)D one hot representation of the input with shape
(batch_size, dim1, dim2, ... dim(n-1), num_classes)
Returns
The one-hot tensor.
Expand source code
@keras_export('keras.backend.one_hot') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def one_hot(indices, num_classes): """Computes the one-hot representation of an integer tensor. Args: indices: nD integer tensor of shape `(batch_size, dim1, dim2, ... dim(n-1))` num_classes: Integer, number of classes to consider. Returns: (n + 1)D one hot representation of the input with shape `(batch_size, dim1, dim2, ... dim(n-1), num_classes)` Returns: The one-hot tensor. """ return tf.one_hot(indices, depth=num_classes, axis=-1)
def ones(shape, dtype=None, name=None)
-
Instantiates an all-ones variable and returns it.
Args
shape
- Tuple of integers, shape of returned Keras variable.
dtype
- String, data type of returned Keras variable.
name
- String, name of returned Keras variable.
Returns
A Keras variable, filled with
1.0
. Note that ifshape()
was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead.Example
>>> kvar = tf.keras.backend.ones((3,4)) >>> tf.keras.backend.eval(kvar) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], dtype=float32)
Expand source code
@keras_export('keras.backend.ones') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ones(shape, dtype=None, name=None): """Instantiates an all-ones variable and returns it. Args: shape: Tuple of integers, shape of returned Keras variable. dtype: String, data type of returned Keras variable. name: String, name of returned Keras variable. Returns: A Keras variable, filled with `1.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.ones((3,4)) >>> tf.keras.backend.eval(kvar) array([[1., 1., 1., 1.], [1., 1., 1., 1.], [1., 1., 1., 1.]], dtype=float32) """ with tf.init_scope(): if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) v = tf.ones(shape=shape, dtype=tf_dtype, name=name) if py_all(v.shape.as_list()): return variable(v, dtype=dtype, name=name) return v
def ones_like(x, dtype=None, name=None)
-
Instantiates an all-ones variable of the same shape as another tensor.
Args
x
- Keras variable or tensor.
dtype
- String, dtype of returned Keras variable. None uses the dtype of x.
name
- String, name for the variable to create.
Returns
A Keras variable with the shape of x filled with ones. Example:
>>> kvar = tf.keras.backend.variable(np.random.random((2,3))) >>> kvar_ones = tf.keras.backend.ones_like(kvar) >>> tf.keras.backend.eval(kvar_ones) array([[1., 1., 1.], [1., 1., 1.]], dtype=float32)
Expand source code
@keras_export('keras.backend.ones_like') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def ones_like(x, dtype=None, name=None): """Instantiates an all-ones variable of the same shape as another tensor. Args: x: Keras variable or tensor. dtype: String, dtype of returned Keras variable. None uses the dtype of x. name: String, name for the variable to create. Returns: A Keras variable with the shape of x filled with ones. Example: >>> kvar = tf.keras.backend.variable(np.random.random((2,3))) >>> kvar_ones = tf.keras.backend.ones_like(kvar) >>> tf.keras.backend.eval(kvar_ones) array([[1., 1., 1.], [1., 1., 1.]], dtype=float32) """ return tf.ones_like(x, dtype=dtype, name=name)
def permute_dimensions(x, pattern)
-
Permutes axes in a tensor.
Args
x
- Tensor or variable.
pattern
- A tuple of
dimension indices, e.g.
(0, 2, 1)
.
Returns
A tensor.
Example
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[ 1, 2, 3], [ 4, 5, 6], [ 7, 8, 9], [10, 11, 12]], dtype=int32)> >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0)) <tf.Tensor: shape=(3, 4), dtype=int32, numpy= array([[ 1, 4, 7, 10], [ 2, 5, 8, 11], [ 3, 6, 9, 12]], dtype=int32)>
Expand source code
@keras_export('keras.backend.permute_dimensions') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def permute_dimensions(x, pattern): """Permutes axes in a tensor. Args: x: Tensor or variable. pattern: A tuple of dimension indices, e.g. `(0, 2, 1)`. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[ 1, 2, 3], [ 4, 5, 6], [ 7, 8, 9], [10, 11, 12]], dtype=int32)> >>> tf.keras.backend.permute_dimensions(a, pattern=(1, 0)) <tf.Tensor: shape=(3, 4), dtype=int32, numpy= array([[ 1, 4, 7, 10], [ 2, 5, 8, 11], [ 3, 6, 9, 12]], dtype=int32)> """ return tf.compat.v1.transpose(x, perm=pattern)
def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None, ragged=False)
-
Instantiates a placeholder tensor and returns it.
Args
shape
- Shape of the placeholder
(integer tuple, may include
None
entries). ndim
- Number of axes of the tensor.
At least one of {
shape()
,ndim()
} must be specified. If both are specified,shape()
is used. dtype
- Placeholder type.
sparse
- Boolean, whether the placeholder should have a sparse type.
name
- Optional name string for the placeholder.
ragged
- Boolean, whether the placeholder should have a ragged type. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this guide.
Raises
ValueError
- If called with sparse = True and ragged = True.
Returns
Tensor instance (with Keras metadata included).
Examples
>>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> input_ph <KerasTensor: shape=(2, 4, 5) dtype=float32 (created by layer ...)>
Expand source code
@keras_export('keras.backend.placeholder') @doc_controls.do_not_generate_docs def placeholder(shape=None, ndim=None, dtype=None, sparse=False, name=None, ragged=False): """Instantiates a placeholder tensor and returns it. Args: shape: Shape of the placeholder (integer tuple, may include `None` entries). ndim: Number of axes of the tensor. At least one of {`shape`, `ndim`} must be specified. If both are specified, `shape` is used. dtype: Placeholder type. sparse: Boolean, whether the placeholder should have a sparse type. name: Optional name string for the placeholder. ragged: Boolean, whether the placeholder should have a ragged type. In this case, values of 'None' in the 'shape' argument represent ragged dimensions. For more information about RaggedTensors, see this [guide](https://www.tensorflow.org/guide/ragged_tensors). Raises: ValueError: If called with sparse = True and ragged = True. Returns: Tensor instance (with Keras metadata included). Examples: >>> input_ph = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> input_ph <KerasTensor: shape=(2, 4, 5) dtype=float32 (created by layer ...)> """ if sparse and ragged: raise ValueError( 'Cannot set both sparse and ragged to True when creating a placeholder.' ) if dtype is None: dtype = floatx() if not shape: if ndim: shape = (None,) * ndim if tf.compat.v1.executing_eagerly_outside_functions(): if sparse: spec = tf.SparseTensorSpec( shape=shape, dtype=dtype) elif ragged: ragged_rank = 0 for i in range(1, len(shape)): # Hacky because could be tensorshape or tuple maybe? # Or just tensorshape? if shape[i] is None or ( hasattr(shape[i], 'value') and shape[i].value is None): ragged_rank = i spec = tf.RaggedTensorSpec( shape=shape, dtype=dtype, ragged_rank=ragged_rank) else: spec = tf.TensorSpec( shape=shape, dtype=dtype, name=name) x = keras_tensor.keras_tensor_from_type_spec(spec, name=name) else: with get_graph().as_default(): if sparse: x = tf.compat.v1.sparse_placeholder(dtype, shape=shape, name=name) elif ragged: ragged_rank = 0 for i in range(1, len(shape)): if shape[i] is None: ragged_rank = i type_spec = tf.RaggedTensorSpec( shape=shape, dtype=dtype, ragged_rank=ragged_rank) def tensor_spec_to_placeholder(tensorspec): return tf.compat.v1.placeholder(tensorspec.dtype, tensorspec.shape) x = tf.nest.map_structure(tensor_spec_to_placeholder, type_spec, expand_composites=True) else: x = tf.compat.v1.placeholder(dtype, shape=shape, name=name) if tf.executing_eagerly(): # Add keras_history connectivity information to the placeholder # when the placeholder is built in a top-level eager context # (intended to be used with keras.backend.function) from keras.engine import input_layer # pylint: disable=g-import-not-at-top x = input_layer.Input(tensor=x) x._is_backend_placeholder = True return x
def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max')
-
2D Pooling.
Args
x
- Tensor or variable.
pool_size
- tuple of 2 integers.
strides
- tuple of 2 integers.
padding
- string,
"same"
or"valid"
. data_format
- string,
"channels_last"
or"channels_first"
. pool_mode
- string,
"max"
or"avg"
.
Returns
A tensor, result of 2D pooling.
Raises
ValueError
- if
data_format
is neither"channels_last"
or "channels_first"
.ValueError
- if
pool_size
is not a tuple of 2 integers. ValueError
- if
strides
is not a tuple of 2 integers. ValueError
- if
pool_mode
is neither"max"
or"avg"
.
Expand source code
@keras_export('keras.backend.pool2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def pool2d(x, pool_size, strides=(1, 1), padding='valid', data_format=None, pool_mode='max'): """2D Pooling. Args: x: Tensor or variable. pool_size: tuple of 2 integers. strides: tuple of 2 integers. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. pool_mode: string, `"max"` or `"avg"`. Returns: A tensor, result of 2D pooling. Raises: ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. ValueError: if `pool_size` is not a tuple of 2 integers. ValueError: if `strides` is not a tuple of 2 integers. ValueError: if `pool_mode` is neither `"max"` or `"avg"`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if len(pool_size) != 2: raise ValueError('`pool_size` must be a tuple of 2 integers.') if len(strides) != 2: raise ValueError('`strides` must be a tuple of 2 integers.') x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) pool_size = (1,) + pool_size + (1,) else: strides = (1, 1) + strides pool_size = (1, 1) + pool_size if pool_mode == 'max': x = tf.compat.v1.nn.max_pool( x, pool_size, strides, padding=padding, data_format=tf_data_format) elif pool_mode == 'avg': x = tf.compat.v1.nn.avg_pool( x, pool_size, strides, padding=padding, data_format=tf_data_format) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x
def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max')
-
3D Pooling.
Args
x
- Tensor or variable.
pool_size
- tuple of 3 integers.
strides
- tuple of 3 integers.
padding
- string,
"same"
or"valid"
. data_format
- string,
"channels_last"
or"channels_first"
. pool_mode
- string,
"max"
or"avg"
.
Returns
A tensor, result of 3D pooling.
Raises
ValueError
- if
data_format
is neither"channels_last"
or "channels_first"
.ValueError
- if
pool_mode
is neither"max"
or"avg"
.
Expand source code
@keras_export('keras.backend.pool3d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def pool3d(x, pool_size, strides=(1, 1, 1), padding='valid', data_format=None, pool_mode='max'): """3D Pooling. Args: x: Tensor or variable. pool_size: tuple of 3 integers. strides: tuple of 3 integers. padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. pool_mode: string, `"max"` or `"avg"`. Returns: A tensor, result of 3D pooling. Raises: ValueError: if `data_format` is neither `"channels_last"` or `"channels_first"`. ValueError: if `pool_mode` is neither `"max"` or `"avg"`. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) x, tf_data_format = _preprocess_conv3d_input(x, data_format) padding = _preprocess_padding(padding) if tf_data_format == 'NDHWC': strides = (1,) + strides + (1,) pool_size = (1,) + pool_size + (1,) else: strides = (1, 1) + strides pool_size = (1, 1) + pool_size if pool_mode == 'max': x = tf.nn.max_pool3d( x, pool_size, strides, padding=padding, data_format=tf_data_format) elif pool_mode == 'avg': x = tf.nn.avg_pool3d( x, pool_size, strides, padding=padding, data_format=tf_data_format) else: raise ValueError('Invalid pooling mode: ' + str(pool_mode)) if data_format == 'channels_first' and tf_data_format == 'NDHWC': x = tf.compat.v1.transpose(x, (0, 4, 1, 2, 3)) return x
def pow(x, a)
-
Element-wise exponentiation.
Args
x
- Tensor or variable.
a
- Python integer.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.pow') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def pow(x, a): """Element-wise exponentiation. Args: x: Tensor or variable. a: Python integer. Returns: A tensor. """ return tf.pow(x, a)
def print_tensor(x, message='', summarize=3)
-
Prints
message
and the tensor value when evaluated.Note that
print_tensor()
returns a new tensor identical tox
which should be used in the following code. Otherwise the print operation is not taken into account during evaluation.Example:
>>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> tf.keras.backend.print_tensor(x) <tf.Tensor: shape=(2, 2), dtype=float32, numpy= array([[1., 2.], [3., 4.]], dtype=float32)>
Args
x
- Tensor to print.
message
- Message to print jointly with the tensor.
summarize
- The first and last
summarize
elements within each dimension are recursively printed per Tensor. If None, then the first 3 and last 3 elements of each dimension are printed for each tensor. If set to -1, it will print all elements of every tensor.
Returns
The same tensor
x
, unchanged.Expand source code
@keras_export('keras.backend.print_tensor') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def print_tensor(x, message='', summarize=3): """Prints `message` and the tensor value when evaluated. Note that `print_tensor` returns a new tensor identical to `x` which should be used in the following code. Otherwise the print operation is not taken into account during evaluation. Example: >>> x = tf.constant([[1.0, 2.0], [3.0, 4.0]]) >>> tf.keras.backend.print_tensor(x) <tf.Tensor: shape=(2, 2), dtype=float32, numpy= array([[1., 2.], [3., 4.]], dtype=float32)> Args: x: Tensor to print. message: Message to print jointly with the tensor. summarize: The first and last `summarize` elements within each dimension are recursively printed per Tensor. If None, then the first 3 and last 3 elements of each dimension are printed for each tensor. If set to -1, it will print all elements of every tensor. Returns: The same tensor `x`, unchanged. """ if isinstance(x, tf.Tensor) and hasattr(x, 'graph'): with get_graph().as_default(): op = tf.print( message, x, output_stream=sys.stdout, summarize=summarize) with tf.control_dependencies([op]): return tf.identity(x) else: tf.print( message, x, output_stream=sys.stdout, summarize=summarize) return x
def prod(x, axis=None, keepdims=False)
-
Multiplies the values in a tensor, alongside the specified axis.
Args
x
- A tensor or variable.
axis
- An integer, the axis to compute the product.
keepdims
- A boolean, whether to keep the dimensions or not.
If
keepdims
isFalse
, the rank of the tensor is reduced by 1. Ifkeepdims
isTrue
, the reduced dimension is retained with length 1.
Returns
A tensor with the product of elements of
x
.Expand source code
@keras_export('keras.backend.prod') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def prod(x, axis=None, keepdims=False): """Multiplies the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the product. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the product of elements of `x`. """ return tf.reduce_prod(x, axis, keepdims)
def random_bernoulli(shape, p=0.0, dtype=None, seed=None)
-
Returns a tensor with random bernoulli distribution of values.
Args
shape
- A tuple of integers, the shape of tensor to create.
p
- A float,
0. <= p <= 1
, probability of bernoulli distribution. dtype
- String, dtype of returned tensor.
seed
- Integer, random seed.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.random_bernoulli') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_bernoulli(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random bernoulli distribution of values. Args: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of bernoulli distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.where( tf.random.uniform(shape, dtype=dtype, seed=seed) <= p, tf.ones(shape, dtype=dtype), tf.zeros(shape, dtype=dtype))
def random_binomial(shape, p=0.0, dtype=None, seed=None)
-
Returns a tensor with random binomial distribution of values.
DEPRECATED, use
tf.keras.backend.random_bernoulli
instead.The binomial distribution with parameters
n
andp
is the probability distribution of the number of successful Bernoulli process. Only supportsn
= 1 for now.Args
shape
- A tuple of integers, the shape of tensor to create.
p
- A float,
0. <= p <= 1
, probability of binomial distribution. dtype
- String, dtype of returned tensor.
seed
- Integer, random seed.
Returns
A tensor. Example:
>>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3), ... p=0.5) >>> random_binomial_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)>
Expand source code
@keras_export('keras.backend.random_binomial') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_binomial(shape, p=0.0, dtype=None, seed=None): """Returns a tensor with random binomial distribution of values. DEPRECATED, use `tf.keras.backend.random_bernoulli` instead. The binomial distribution with parameters `n` and `p` is the probability distribution of the number of successful Bernoulli process. Only supports `n` = 1 for now. Args: shape: A tuple of integers, the shape of tensor to create. p: A float, `0. <= p <= 1`, probability of binomial distribution. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. Example: >>> random_binomial_tensor = tf.keras.backend.random_binomial(shape=(2,3), ... p=0.5) >>> random_binomial_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)> """ warnings.warn('`tf.keras.backend.random_binomial` is deprecated, ' 'and will be removed in a future version.' 'Please use `tf.keras.backend.random_bernoulli` instead.') return random_bernoulli(shape, p, dtype, seed)
def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None)
-
Returns a tensor with normal distribution of values.
It is an alias to
tf.random.normal
.Args
shape
- A tuple of integers, the shape of tensor to create.
mean
- A float, the mean value of the normal distribution to draw samples. Default to 0.0.
stddev
- A float, the standard deviation of the normal distribution to draw samples. Default to 1.0.
dtype
tf.dtypes.DType
, dtype of returned tensor. Default to use Keras backend dtype which is float32.seed
- Integer, random seed. Will use a random numpy integer when not specified.
Returns
A tensor with normal distribution of values. Example:
>>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3), ... mean=0.0, stddev=1.0) >>> random_normal_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)>
Expand source code
@keras_export('keras.backend.random_normal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with normal distribution of values. It is an alias to `tf.random.normal`. Args: shape: A tuple of integers, the shape of tensor to create. mean: A float, the mean value of the normal distribution to draw samples. Default to 0.0. stddev: A float, the standard deviation of the normal distribution to draw samples. Default to 1.0. dtype: `tf.dtypes.DType`, dtype of returned tensor. Default to use Keras backend dtype which is float32. seed: Integer, random seed. Will use a random numpy integer when not specified. Returns: A tensor with normal distribution of values. Example: >>> random_normal_tensor = tf.keras.backend.random_normal(shape=(2,3), ... mean=0.0, stddev=1.0) >>> random_normal_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.random.normal( shape, mean=mean, stddev=stddev, dtype=dtype, seed=seed)
def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None)
-
Instantiates a variable with values drawn from a normal distribution.
Args
shape
- Tuple of integers, shape of returned Keras variable.
mean
- Float, mean of the normal distribution.
scale
- Float, standard deviation of the normal distribution.
dtype
- String, dtype of returned Keras variable.
name
- String, name of returned Keras variable.
seed
- Integer, random seed.
Returns
A Keras variable, filled with drawn samples. Example:
>>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3), ... mean=0.0, scale=1.0) >>> kvar <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=..., dtype=float32)>
Expand source code
@keras_export('keras.backend.random_normal_variable') @doc_controls.do_not_generate_docs def random_normal_variable(shape, mean, scale, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a normal distribution. Args: shape: Tuple of integers, shape of returned Keras variable. mean: Float, mean of the normal distribution. scale: Float, standard deviation of the normal distribution. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_normal_variable(shape=(2,3), ... mean=0.0, scale=1.0) >>> kvar <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = tf.compat.v1.random_normal_initializer( mean, scale, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name)
def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None)
-
Returns a tensor with uniform distribution of values.
Args
shape
- A tuple of integers, the shape of tensor to create.
minval
- A float, lower boundary of the uniform distribution to draw samples.
maxval
- A float, upper boundary of the uniform distribution to draw samples.
dtype
- String, dtype of returned tensor.
seed
- Integer, random seed.
Returns
A tensor. Example:
>>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3), ... minval=0.0, maxval=1.0) >>> random_uniform_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)>
Expand source code
@keras_export('keras.backend.random_uniform') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def random_uniform(shape, minval=0.0, maxval=1.0, dtype=None, seed=None): """Returns a tensor with uniform distribution of values. Args: shape: A tuple of integers, the shape of tensor to create. minval: A float, lower boundary of the uniform distribution to draw samples. maxval: A float, upper boundary of the uniform distribution to draw samples. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. Example: >>> random_uniform_tensor = tf.keras.backend.random_uniform(shape=(2,3), ... minval=0.0, maxval=1.0) >>> random_uniform_tensor <tf.Tensor: shape=(2, 3), dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.random.uniform( shape, minval=minval, maxval=maxval, dtype=dtype, seed=seed)
def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None)
-
Instantiates a variable with values drawn from a uniform distribution.
Args
shape
- Tuple of integers, shape of returned Keras variable.
low
- Float, lower boundary of the output interval.
high
- Float, upper boundary of the output interval.
dtype
- String, dtype of returned Keras variable.
name
- String, name of returned Keras variable.
seed
- Integer, random seed.
Returns
A Keras variable, filled with drawn samples. Example:
>>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3), ... low=0.0, high=1.0) >>> kvar <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=..., dtype=float32)>
Expand source code
@keras_export('keras.backend.random_uniform_variable') @doc_controls.do_not_generate_docs def random_uniform_variable(shape, low, high, dtype=None, name=None, seed=None): """Instantiates a variable with values drawn from a uniform distribution. Args: shape: Tuple of integers, shape of returned Keras variable. low: Float, lower boundary of the output interval. high: Float, upper boundary of the output interval. dtype: String, dtype of returned Keras variable. name: String, name of returned Keras variable. seed: Integer, random seed. Returns: A Keras variable, filled with drawn samples. Example: >>> kvar = tf.keras.backend.random_uniform_variable(shape=(2,3), ... low=0.0, high=1.0) >>> kvar <tf.Variable 'Variable:0' shape=(2, 3) dtype=float32, numpy=..., dtype=float32)> """ if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) if seed is None: # ensure that randomness is conditioned by the Numpy RNG seed = np.random.randint(10e8) value = tf.compat.v1.random_uniform_initializer( low, high, dtype=tf_dtype, seed=seed)(shape) return variable(value, dtype=dtype, name=name)
def relu(x, alpha=0.0, max_value=None, threshold=0)
-
Rectified linear unit.
With default values, it returns element-wise
max()(x, 0)
.Otherwise, it follows:
f(x) = max_value
forx >= max_value
,f(x) = x
forthreshold <= x < max_value
,f(x) = alpha * (x - threshold)
otherwise.Args
x
- A tensor or variable.
alpha
- A scalar, slope of negative section (default=
0.
). max_value
- float. Saturation threshold.
threshold
- float. Threshold value for thresholded activation.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.relu') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def relu(x, alpha=0., max_value=None, threshold=0): """Rectified linear unit. With default values, it returns element-wise `max(x, 0)`. Otherwise, it follows: `f(x) = max_value` for `x >= max_value`, `f(x) = x` for `threshold <= x < max_value`, `f(x) = alpha * (x - threshold)` otherwise. Args: x: A tensor or variable. alpha: A scalar, slope of negative section (default=`0.`). max_value: float. Saturation threshold. threshold: float. Threshold value for thresholded activation. Returns: A tensor. """ # While x can be a tensor or variable, we also see cases where # numpy arrays, lists, tuples are passed as well. # lists, tuples do not have 'dtype' attribute. dtype = getattr(x, 'dtype', floatx()) if alpha != 0.: if max_value is None and threshold == 0: return tf.nn.leaky_relu(x, alpha=alpha) if threshold != 0: negative_part = tf.nn.relu(-x + threshold) else: negative_part = tf.nn.relu(-x) clip_max = max_value is not None if threshold != 0: # computes x for x > threshold else 0 x = x * tf.cast(tf.greater(x, threshold), dtype=dtype) elif max_value == 6: # if no threshold, then can use nn.relu6 native TF op for performance x = tf.nn.relu6(x) clip_max = False else: x = tf.nn.relu(x) if clip_max: max_value = _constant_to_tensor(max_value, x.dtype.base_dtype) zero = _constant_to_tensor(0, x.dtype.base_dtype) x = tf.clip_by_value(x, zero, max_value) if alpha != 0.: alpha = _to_tensor(alpha, x.dtype.base_dtype) x -= alpha * negative_part return x
def repeat(x, n)
-
Repeats a 2D tensor.
if
x
has shape (samples, dim) andn
is2
, the output will have shape(samples, 2, dim)
.Args
x
- Tensor or variable.
n
- Python integer, number of times to repeat.
Returns
A tensor.
Example
>>> b = tf.constant([[1, 2], [3, 4]]) >>> b <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[1, 2], [3, 4]], dtype=int32)> >>> tf.keras.backend.repeat(b, n=2) <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy= array([[[1, 2], [1, 2]], [[3, 4], [3, 4]]], dtype=int32)>
Expand source code
@keras_export('keras.backend.repeat') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def repeat(x, n): """Repeats a 2D tensor. if `x` has shape (samples, dim) and `n` is `2`, the output will have shape `(samples, 2, dim)`. Args: x: Tensor or variable. n: Python integer, number of times to repeat. Returns: A tensor. Example: >>> b = tf.constant([[1, 2], [3, 4]]) >>> b <tf.Tensor: shape=(2, 2), dtype=int32, numpy= array([[1, 2], [3, 4]], dtype=int32)> >>> tf.keras.backend.repeat(b, n=2) <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy= array([[[1, 2], [1, 2]], [[3, 4], [3, 4]]], dtype=int32)> """ assert ndim(x) == 2 x = tf.expand_dims(x, 1) pattern = tf.stack([1, n, 1]) return tf.tile(x, pattern)
def repeat_elements(x, rep, axis)
-
Repeats the elements of a tensor along an axis, like
np.repeat
.If
x
has shape(s1, s2, s3)
andaxis
is1
, the output will have shape(s1, s2 * rep, s3)
.Args
x
- Tensor or variable.
rep
- Python integer, number of times to repeat.
axis
- Axis along which to repeat.
Returns
A tensor.
Example
>>> b = tf.constant([1, 2, 3]) >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0) <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)>
Expand source code
@keras_export('keras.backend.repeat_elements') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def repeat_elements(x, rep, axis): """Repeats the elements of a tensor along an axis, like `np.repeat`. If `x` has shape `(s1, s2, s3)` and `axis` is `1`, the output will have shape `(s1, s2 * rep, s3)`. Args: x: Tensor or variable. rep: Python integer, number of times to repeat. axis: Axis along which to repeat. Returns: A tensor. Example: >>> b = tf.constant([1, 2, 3]) >>> tf.keras.backend.repeat_elements(b, rep=2, axis=0) <tf.Tensor: shape=(6,), dtype=int32, numpy=array([1, 1, 2, 2, 3, 3], dtype=int32)> """ x_shape = x.shape.as_list() # For static axis if x_shape[axis] is not None: # slices along the repeat axis splits = tf.split(value=x, num_or_size_splits=x_shape[axis], axis=axis) # repeat each slice the given number of reps x_rep = [s for s in splits for _ in range(rep)] return concatenate(x_rep, axis) # Here we use tf.tile to mimic behavior of np.repeat so that # we can handle dynamic shapes (that include None). # To do that, we need an auxiliary axis to repeat elements along # it and then merge them along the desired axis. # Repeating auxiliary_axis = axis + 1 x_shape = tf.shape(x) x_rep = tf.expand_dims(x, axis=auxiliary_axis) reps = np.ones(len(x.shape) + 1) reps[auxiliary_axis] = rep x_rep = tf.tile(x_rep, reps) # Merging reps = np.delete(reps, auxiliary_axis) reps[axis] = rep reps = tf.constant(reps, dtype='int32') x_shape *= reps x_rep = tf.reshape(x_rep, x_shape) # Fix shape representation x_shape = x.shape.as_list() x_rep.set_shape(x_shape) x_rep._keras_shape = tuple(x_shape) return x_rep
def reset_uids()
-
Resets graph identifiers.
Expand source code
@keras_export('keras.backend.reset_uids') def reset_uids(): """Resets graph identifiers. """ PER_GRAPH_OBJECT_NAME_UIDS.clear() OBSERVED_NAMES.clear()
def reshape(x, shape)
-
Reshapes a tensor to the specified shape.
Args
x
- Tensor or variable.
shape
- Target shape tuple.
Returns
A tensor.
Example
>>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[ 1, 2, 3], [ 4, 5, 6], [ 7, 8, 9], [10, 11, 12]], dtype=int32)> >>> tf.keras.backend.reshape(a, shape=(2, 6)) <tf.Tensor: shape=(2, 6), dtype=int32, numpy= array([[ 1, 2, 3, 4, 5, 6], [ 7, 8, 9, 10, 11, 12]], dtype=int32)>
Expand source code
@keras_export('keras.backend.reshape') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def reshape(x, shape): """Reshapes a tensor to the specified shape. Args: x: Tensor or variable. shape: Target shape tuple. Returns: A tensor. Example: >>> a = tf.constant([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]) >>> a <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[ 1, 2, 3], [ 4, 5, 6], [ 7, 8, 9], [10, 11, 12]], dtype=int32)> >>> tf.keras.backend.reshape(a, shape=(2, 6)) <tf.Tensor: shape=(2, 6), dtype=int32, numpy= array([[ 1, 2, 3, 4, 5, 6], [ 7, 8, 9, 10, 11, 12]], dtype=int32)> """ return tf.reshape(x, shape)
def resize_images(x, height_factor, width_factor, data_format, interpolation='nearest')
-
Resizes the images contained in a 4D tensor.
Args
x
- Tensor or variable to resize.
height_factor
- Positive integer.
width_factor
- Positive integer.
data_format
- One of
"channels_first"
,"channels_last"
. interpolation
- A string, one of
nearest
orbilinear
.
Returns
A tensor.
Raises
ValueError
- in case of incorrect value for
data_format
orinterpolation
.
Expand source code
@keras_export('keras.backend.resize_images') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def resize_images(x, height_factor, width_factor, data_format, interpolation='nearest'): """Resizes the images contained in a 4D tensor. Args: x: Tensor or variable to resize. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. interpolation: A string, one of `nearest` or `bilinear`. Returns: A tensor. Raises: ValueError: in case of incorrect value for `data_format` or `interpolation`. """ if data_format == 'channels_first': rows, cols = 2, 3 elif data_format == 'channels_last': rows, cols = 1, 2 else: raise ValueError('Invalid `data_format` argument: %s' % (data_format,)) new_shape = x.shape[rows:cols + 1] if new_shape.is_fully_defined(): new_shape = tf.constant(new_shape.as_list(), dtype='int32') else: new_shape = tf.shape(x)[rows:cols + 1] new_shape *= tf.constant( np.array([height_factor, width_factor], dtype='int32')) if data_format == 'channels_first': x = permute_dimensions(x, [0, 2, 3, 1]) if interpolation == 'nearest': x = tf.image.resize( x, new_shape, method=tf.image.ResizeMethod.NEAREST_NEIGHBOR) elif interpolation == 'bilinear': x = tf.image.resize(x, new_shape, method=tf.image.ResizeMethod.BILINEAR) else: raise ValueError('interpolation should be one ' 'of "nearest" or "bilinear".') if data_format == 'channels_first': x = permute_dimensions(x, [0, 3, 1, 2]) return x
def resize_volumes(x, depth_factor, height_factor, width_factor, data_format)
-
Resizes the volume contained in a 5D tensor.
Args
x
- Tensor or variable to resize.
depth_factor
- Positive integer.
height_factor
- Positive integer.
width_factor
- Positive integer.
data_format
- One of
"channels_first"
,"channels_last"
.
Returns
A tensor.
Raises
ValueError
- if
data_format
is neitherchannels_last
orchannels_first
.
Expand source code
@keras_export('keras.backend.resize_volumes') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def resize_volumes(x, depth_factor, height_factor, width_factor, data_format): """Resizes the volume contained in a 5D tensor. Args: x: Tensor or variable to resize. depth_factor: Positive integer. height_factor: Positive integer. width_factor: Positive integer. data_format: One of `"channels_first"`, `"channels_last"`. Returns: A tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ if data_format == 'channels_first': output = repeat_elements(x, depth_factor, axis=2) output = repeat_elements(output, height_factor, axis=3) output = repeat_elements(output, width_factor, axis=4) return output elif data_format == 'channels_last': output = repeat_elements(x, depth_factor, axis=1) output = repeat_elements(output, height_factor, axis=2) output = repeat_elements(output, width_factor, axis=3) return output else: raise ValueError('Invalid data_format: ' + str(data_format))
def reverse(x, axes)
-
Reverse a tensor along the specified axes.
Args
x
- Tensor to reverse.
axes
- Integer or iterable of integers. Axes to reverse.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.reverse') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def reverse(x, axes): """Reverse a tensor along the specified axes. Args: x: Tensor to reverse. axes: Integer or iterable of integers. Axes to reverse. Returns: A tensor. """ if isinstance(axes, int): axes = [axes] return tf.reverse(x, axes)
def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False)
-
Iterates over the time dimension of a tensor.
Args
step_function
- RNN step function.
Args;
input; Tensor with shape
(samples, …)
(no time dimension), representing input for the batch of samples at a certain time step. states; List of tensors. Returns; output; Tensor with shape(samples, output_dim)
(no time dimension). new_states; List of tensors, same length and shapes as 'states'. The first state in the list must be the output tensor at the previous timestep. inputs
- Tensor of temporal data of shape
(samples, time, …)
(at least 3D), or nested tensors, and each of which has shape(samples, time, …)
. initial_states
- Tensor with shape
(samples, state_size)
(no time dimension), containing the initial values for the states used in the step function. In the case that state_size is in a nested shape, the shape of initial_states will also follow the nested structure. go_backwards
- Boolean. If True, do the iteration over the time dimension in reverse order and return the reversed sequence.
mask
- Binary tensor with shape
(samples, time, 1)
, with a zero for every element that is masked. constants
- List of constant values passed at each step.
unroll
- Whether to unroll the RNN or to use a symbolic
while_loop
. input_length
- An integer or a 1-D Tensor, depending on whether the time dimension is fixed-length or not. In case of variable length input, it is used for masking in case there's no mask specified.
time_major
- Boolean. If true, the inputs and outputs will be in shape
(timesteps, batch, …)
, whereas in the False case, it will be(batch, timesteps, …)
. Usingtime_major = True
is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. zero_output_for_mask
- Boolean. If True, the output for masked timestep will be zeros, whereas in the False case, output from previous timestep is returned.
Returns
- A tuple,
(last_output, outputs, new_states)
. last_output
- the latest output of the rnn, of shape
(samples, …)
outputs: tensor with shape(samples, time, …)
where each entryoutputs[s, t]
is the output of the step function at timet
for samples
. new_states: list of tensors, latest states returned by the step function, of shape(samples, …)
.
Raises
ValueError
- if input dimension is less than 3.
ValueError
- if
unroll
isTrue
but input timestep is not a fixed - number.
ValueError
- if
mask
is provided (notNone
) but states is not provided (len(states)
== 0).
Expand source code
@keras_export('keras.backend.rnn') @tf.__internal__.dispatch.add_dispatch_support def rnn(step_function, inputs, initial_states, go_backwards=False, mask=None, constants=None, unroll=False, input_length=None, time_major=False, zero_output_for_mask=False): """Iterates over the time dimension of a tensor. Args: step_function: RNN step function. Args; input; Tensor with shape `(samples, ...)` (no time dimension), representing input for the batch of samples at a certain time step. states; List of tensors. Returns; output; Tensor with shape `(samples, output_dim)` (no time dimension). new_states; List of tensors, same length and shapes as 'states'. The first state in the list must be the output tensor at the previous timestep. inputs: Tensor of temporal data of shape `(samples, time, ...)` (at least 3D), or nested tensors, and each of which has shape `(samples, time, ...)`. initial_states: Tensor with shape `(samples, state_size)` (no time dimension), containing the initial values for the states used in the step function. In the case that state_size is in a nested shape, the shape of initial_states will also follow the nested structure. go_backwards: Boolean. If True, do the iteration over the time dimension in reverse order and return the reversed sequence. mask: Binary tensor with shape `(samples, time, 1)`, with a zero for every element that is masked. constants: List of constant values passed at each step. unroll: Whether to unroll the RNN or to use a symbolic `while_loop`. input_length: An integer or a 1-D Tensor, depending on whether the time dimension is fixed-length or not. In case of variable length input, it is used for masking in case there's no mask specified. time_major: Boolean. If true, the inputs and outputs will be in shape `(timesteps, batch, ...)`, whereas in the False case, it will be `(batch, timesteps, ...)`. Using `time_major = True` is a bit more efficient because it avoids transposes at the beginning and end of the RNN calculation. However, most TensorFlow data is batch-major, so by default this function accepts input and emits output in batch-major form. zero_output_for_mask: Boolean. If True, the output for masked timestep will be zeros, whereas in the False case, output from previous timestep is returned. Returns: A tuple, `(last_output, outputs, new_states)`. last_output: the latest output of the rnn, of shape `(samples, ...)` outputs: tensor with shape `(samples, time, ...)` where each entry `outputs[s, t]` is the output of the step function at time `t` for sample `s`. new_states: list of tensors, latest states returned by the step function, of shape `(samples, ...)`. Raises: ValueError: if input dimension is less than 3. ValueError: if `unroll` is `True` but input timestep is not a fixed number. ValueError: if `mask` is provided (not `None`) but states is not provided (`len(states)` == 0). """ def swap_batch_timestep(input_t): # Swap the batch and timestep dim for the incoming tensor. axes = list(range(len(input_t.shape))) axes[0], axes[1] = 1, 0 return tf.compat.v1.transpose(input_t, axes) if not time_major: inputs = tf.nest.map_structure(swap_batch_timestep, inputs) flatted_inputs = tf.nest.flatten(inputs) time_steps = flatted_inputs[0].shape[0] batch = flatted_inputs[0].shape[1] time_steps_t = tf.shape(flatted_inputs[0])[0] for input_ in flatted_inputs: input_.shape.with_rank_at_least(3) if mask is not None: if mask.dtype != tf.bool: mask = tf.cast(mask, tf.bool) if len(mask.shape) == 2: mask = expand_dims(mask) if not time_major: mask = swap_batch_timestep(mask) if constants is None: constants = [] # tf.where needs its condition tensor to be the same shape as its two # result tensors, but in our case the condition (mask) tensor is # (nsamples, 1), and inputs are (nsamples, ndimensions) or even more. # So we need to broadcast the mask to match the shape of inputs. # That's what the tile call does, it just repeats the mask along its # second dimension n times. def _expand_mask(mask_t, input_t, fixed_dim=1): if tf.nest.is_nested(mask_t): raise ValueError('mask_t is expected to be tensor, but got %s' % mask_t) if tf.nest.is_nested(input_t): raise ValueError('input_t is expected to be tensor, but got %s' % input_t) rank_diff = len(input_t.shape) - len(mask_t.shape) for _ in range(rank_diff): mask_t = tf.expand_dims(mask_t, -1) multiples = [1] * fixed_dim + input_t.shape.as_list()[fixed_dim:] return tf.tile(mask_t, multiples) if unroll: if not time_steps: raise ValueError('Unrolling requires a fixed number of timesteps.') states = tuple(initial_states) successive_states = [] successive_outputs = [] # Process the input tensors. The input tensor need to be split on the # time_step dim, and reverse if go_backwards is True. In the case of nested # input, the input is flattened and then transformed individually. # The result of this will be a tuple of lists, each of the item in tuple is # list of the tensor with shape (batch, feature) def _process_single_input_t(input_t): input_t = tf.unstack(input_t) # unstack for time_step dim if go_backwards: input_t.reverse() return input_t if tf.nest.is_nested(inputs): processed_input = tf.nest.map_structure(_process_single_input_t, inputs) else: processed_input = (_process_single_input_t(inputs),) def _get_input_tensor(time): inp = [t_[time] for t_ in processed_input] return tf.nest.pack_sequence_as(inputs, inp) if mask is not None: mask_list = tf.unstack(mask) if go_backwards: mask_list.reverse() for i in range(time_steps): inp = _get_input_tensor(i) mask_t = mask_list[i] output, new_states = step_function(inp, tuple(states) + tuple(constants)) tiled_mask_t = _expand_mask(mask_t, output) if not successive_outputs: prev_output = zeros_like(output) else: prev_output = successive_outputs[-1] output = tf.where(tiled_mask_t, output, prev_output) flat_states = tf.nest.flatten(states) flat_new_states = tf.nest.flatten(new_states) tiled_mask_t = tuple(_expand_mask(mask_t, s) for s in flat_states) flat_final_states = tuple( tf.where(m, s, ps) for m, s, ps in zip(tiled_mask_t, flat_new_states, flat_states)) states = tf.nest.pack_sequence_as(states, flat_final_states) successive_outputs.append(output) successive_states.append(states) last_output = successive_outputs[-1] new_states = successive_states[-1] outputs = tf.stack(successive_outputs) if zero_output_for_mask: last_output = tf.where( _expand_mask(mask_list[-1], last_output), last_output, zeros_like(last_output)) outputs = tf.where( _expand_mask(mask, outputs, fixed_dim=2), outputs, zeros_like(outputs)) else: # mask is None for i in range(time_steps): inp = _get_input_tensor(i) output, states = step_function(inp, tuple(states) + tuple(constants)) successive_outputs.append(output) successive_states.append(states) last_output = successive_outputs[-1] new_states = successive_states[-1] outputs = tf.stack(successive_outputs) else: # Unroll == False states = tuple(initial_states) # Create input tensor array, if the inputs is nested tensors, then it will # be flattened first, and tensor array will be created one per flattened # tensor. input_ta = tuple( tf.TensorArray( dtype=inp.dtype, size=time_steps_t, tensor_array_name='input_ta_%s' % i) for i, inp in enumerate(flatted_inputs)) input_ta = tuple( ta.unstack(input_) if not go_backwards else ta .unstack(reverse(input_, 0)) for ta, input_ in zip(input_ta, flatted_inputs)) # Get the time(0) input and compute the output for that, the output will be # used to determine the dtype of output tensor array. Don't read from # input_ta due to TensorArray clear_after_read default to True. input_time_zero = tf.nest.pack_sequence_as(inputs, [inp[0] for inp in flatted_inputs]) # output_time_zero is used to determine the cell output shape and its dtype. # the value is discarded. output_time_zero, _ = step_function( input_time_zero, tuple(initial_states) + tuple(constants)) output_ta = tuple( tf.TensorArray( dtype=out.dtype, size=time_steps_t, element_shape=out.shape, tensor_array_name='output_ta_%s' % i) for i, out in enumerate(tf.nest.flatten(output_time_zero))) time = tf.constant(0, dtype='int32', name='time') # We only specify the 'maximum_iterations' when building for XLA since that # causes slowdowns on GPU in TF. if (not tf.executing_eagerly() and control_flow_util.GraphOrParentsInXlaContext(tf.compat.v1.get_default_graph())): max_iterations = tf.reduce_max(input_length) else: max_iterations = None while_loop_kwargs = { 'cond': lambda time, *_: time < time_steps_t, 'maximum_iterations': max_iterations, 'parallel_iterations': 32, 'swap_memory': True, } if mask is not None: if go_backwards: mask = reverse(mask, 0) mask_ta = tf.TensorArray( dtype=tf.bool, size=time_steps_t, tensor_array_name='mask_ta') mask_ta = mask_ta.unstack(mask) def masking_fn(time): return mask_ta.read(time) def compute_masked_output(mask_t, flat_out, flat_mask): tiled_mask_t = tuple( _expand_mask(mask_t, o, fixed_dim=len(mask_t.shape)) for o in flat_out) return tuple( tf.where(m, o, fm) for m, o, fm in zip(tiled_mask_t, flat_out, flat_mask)) elif isinstance(input_length, tf.Tensor): if go_backwards: max_len = tf.reduce_max(input_length, axis=0) rev_input_length = tf.subtract(max_len - 1, input_length) def masking_fn(time): return tf.less(rev_input_length, time) else: def masking_fn(time): return tf.greater(input_length, time) def compute_masked_output(mask_t, flat_out, flat_mask): return tuple( tf.compat.v1.where(mask_t, o, zo) for (o, zo) in zip(flat_out, flat_mask)) else: masking_fn = None if masking_fn is not None: # Mask for the T output will be base on the output of T - 1. In the case # T = 0, a zero filled tensor will be used. flat_zero_output = tuple(tf.zeros_like(o) for o in tf.nest.flatten(output_time_zero)) def _step(time, output_ta_t, prev_output, *states): """RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. prev_output: tuple of outputs from time - 1. *states: List of states. Returns: Tuple: `(time + 1, output_ta_t, output) + tuple(new_states)` """ current_input = tuple(ta.read(time) for ta in input_ta) # maybe set shape. current_input = tf.nest.pack_sequence_as(inputs, current_input) mask_t = masking_fn(time) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) # mask output flat_output = tf.nest.flatten(output) flat_mask_output = (flat_zero_output if zero_output_for_mask else tf.nest.flatten(prev_output)) flat_new_output = compute_masked_output(mask_t, flat_output, flat_mask_output) # mask states flat_state = tf.nest.flatten(states) flat_new_state = tf.nest.flatten(new_states) for state, new_state in zip(flat_state, flat_new_state): if isinstance(new_state, tf.Tensor): new_state.set_shape(state.shape) flat_final_state = compute_masked_output(mask_t, flat_new_state, flat_state) new_states = tf.nest.pack_sequence_as(new_states, flat_final_state) output_ta_t = tuple( ta.write(time, out) for ta, out in zip(output_ta_t, flat_new_output)) return (time + 1, output_ta_t, tuple(flat_new_output)) + tuple(new_states) final_outputs = tf.compat.v1.while_loop( body=_step, loop_vars=(time, output_ta, flat_zero_output) + states, **while_loop_kwargs) # Skip final_outputs[2] which is the output for final timestep. new_states = final_outputs[3:] else: def _step(time, output_ta_t, *states): """RNN step function. Args: time: Current timestep value. output_ta_t: TensorArray. *states: List of states. Returns: Tuple: `(time + 1,output_ta_t) + tuple(new_states)` """ current_input = tuple(ta.read(time) for ta in input_ta) current_input = tf.nest.pack_sequence_as(inputs, current_input) output, new_states = step_function(current_input, tuple(states) + tuple(constants)) flat_state = tf.nest.flatten(states) flat_new_state = tf.nest.flatten(new_states) for state, new_state in zip(flat_state, flat_new_state): if isinstance(new_state, tf.Tensor): new_state.set_shape(state.shape) flat_output = tf.nest.flatten(output) output_ta_t = tuple( ta.write(time, out) for ta, out in zip(output_ta_t, flat_output)) new_states = tf.nest.pack_sequence_as(initial_states, flat_new_state) return (time + 1, output_ta_t) + tuple(new_states) final_outputs = tf.compat.v1.while_loop( body=_step, loop_vars=(time, output_ta) + states, **while_loop_kwargs) new_states = final_outputs[2:] output_ta = final_outputs[1] outputs = tuple(o.stack() for o in output_ta) last_output = tuple(o[-1] for o in outputs) outputs = tf.nest.pack_sequence_as(output_time_zero, outputs) last_output = tf.nest.pack_sequence_as(output_time_zero, last_output) # static shape inference def set_shape(output_): if isinstance(output_, tf.Tensor): shape = output_.shape.as_list() shape[0] = time_steps shape[1] = batch output_.set_shape(shape) return output_ outputs = tf.nest.map_structure(set_shape, outputs) if not time_major: outputs = tf.nest.map_structure(swap_batch_timestep, outputs) return last_output, outputs, new_states
def round(x)
-
Element-wise rounding to the closest integer.
In case of tie, the rounding mode used is "half to even".
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.round') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def round(x): """Element-wise rounding to the closest integer. In case of tie, the rounding mode used is "half to even". Args: x: Tensor or variable. Returns: A tensor. """ return tf.round(x)
def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1))
-
2D convolution with separable filters.
Args
x
- input tensor
depthwise_kernel
- convolution kernel for the depthwise convolution.
pointwise_kernel
- kernel for the 1x1 convolution.
strides
- strides tuple (length 2).
padding
- string,
"same"
or"valid"
. data_format
- string,
"channels_last"
or"channels_first"
. dilation_rate
- tuple of integers, dilation rates for the separable convolution.
Returns
Output tensor.
Raises
ValueError
- if
data_format
is neitherchannels_last
or channels_first
.ValueError
- if
strides
is not a tuple of 2 integers.
Expand source code
@keras_export('keras.backend.separable_conv2d') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def separable_conv2d(x, depthwise_kernel, pointwise_kernel, strides=(1, 1), padding='valid', data_format=None, dilation_rate=(1, 1)): """2D convolution with separable filters. Args: x: input tensor depthwise_kernel: convolution kernel for the depthwise convolution. pointwise_kernel: kernel for the 1x1 convolution. strides: strides tuple (length 2). padding: string, `"same"` or `"valid"`. data_format: string, `"channels_last"` or `"channels_first"`. dilation_rate: tuple of integers, dilation rates for the separable convolution. Returns: Output tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. ValueError: if `strides` is not a tuple of 2 integers. """ if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if len(strides) != 2: raise ValueError('`strides` must be a tuple of 2 integers.') x, tf_data_format = _preprocess_conv2d_input(x, data_format) padding = _preprocess_padding(padding) if not isinstance(strides, tuple): strides = tuple(strides) if tf_data_format == 'NHWC': strides = (1,) + strides + (1,) else: strides = (1, 1) + strides x = tf.compat.v1.nn.separable_conv2d( x, depthwise_kernel, pointwise_kernel, strides=strides, padding=padding, rate=dilation_rate, data_format=tf_data_format) if data_format == 'channels_first' and tf_data_format == 'NHWC': x = tf.compat.v1.transpose(x, (0, 3, 1, 2)) # NHWC -> NCHW return x
def set_epsilon(value)
-
Sets the value of the fuzz factor used in numeric expressions.
Args
value
- float. New value of epsilon.
Example:
>>> tf.keras.backend.epsilon() 1e-07 >>> tf.keras.backend.set_epsilon(1e-5) >>> tf.keras.backend.epsilon() 1e-05 >>> tf.keras.backend.set_epsilon(1e-7)
Expand source code
@keras_export('keras.backend.set_epsilon') def set_epsilon(value): """Sets the value of the fuzz factor used in numeric expressions. Args: value: float. New value of epsilon. Example: >>> tf.keras.backend.epsilon() 1e-07 >>> tf.keras.backend.set_epsilon(1e-5) >>> tf.keras.backend.epsilon() 1e-05 >>> tf.keras.backend.set_epsilon(1e-7) """ global _EPSILON _EPSILON = value
def set_floatx(value)
-
Sets the default float type.
Note: It is not recommended to set this to float16 for training, as this will likely cause numeric stability issues. Instead, mixed precision, which is using a mix of float16 and float32, can be used by calling
tf.keras.mixed_precision.experimental.set_policy('mixed_float16')
. See the mixed precision guide for details.Args
value
- String;
'float16'
,'float32'
, or'float64'
.
Example:
>>> tf.keras.backend.floatx() 'float32' >>> tf.keras.backend.set_floatx('float64') >>> tf.keras.backend.floatx() 'float64' >>> tf.keras.backend.set_floatx('float32')
Raises
ValueError
- In case of invalid value.
Expand source code
@keras_export('keras.backend.set_floatx') def set_floatx(value): """Sets the default float type. Note: It is not recommended to set this to float16 for training, as this will likely cause numeric stability issues. Instead, mixed precision, which is using a mix of float16 and float32, can be used by calling `tf.keras.mixed_precision.experimental.set_policy('mixed_float16')`. See the [mixed precision guide]( https://www.tensorflow.org/guide/keras/mixed_precision) for details. Args: value: String; `'float16'`, `'float32'`, or `'float64'`. Example: >>> tf.keras.backend.floatx() 'float32' >>> tf.keras.backend.set_floatx('float64') >>> tf.keras.backend.floatx() 'float64' >>> tf.keras.backend.set_floatx('float32') Raises: ValueError: In case of invalid value. """ global _FLOATX if value not in {'float16', 'float32', 'float64'}: raise ValueError('Unknown floatx type: ' + str(value)) _FLOATX = str(value)
def set_image_data_format(data_format)
-
Sets the value of the image data format convention.
Args
data_format
- string.
'channels_first'
or'channels_last'
.
Example:
>>> tf.keras.backend.image_data_format() 'channels_last' >>> tf.keras.backend.set_image_data_format('channels_first') >>> tf.keras.backend.image_data_format() 'channels_first' >>> tf.keras.backend.set_image_data_format('channels_last')
Raises
ValueError
- In case of invalid
data_format
value.
Expand source code
@keras_export('keras.backend.set_image_data_format') def set_image_data_format(data_format): """Sets the value of the image data format convention. Args: data_format: string. `'channels_first'` or `'channels_last'`. Example: >>> tf.keras.backend.image_data_format() 'channels_last' >>> tf.keras.backend.set_image_data_format('channels_first') >>> tf.keras.backend.image_data_format() 'channels_first' >>> tf.keras.backend.set_image_data_format('channels_last') Raises: ValueError: In case of invalid `data_format` value. """ global _IMAGE_DATA_FORMAT if data_format not in {'channels_last', 'channels_first'}: raise ValueError('Unknown data_format: ' + str(data_format)) _IMAGE_DATA_FORMAT = str(data_format)
def set_learning_phase(value)
-
Sets the learning phase to a fixed value.
The backend learning phase affects any code that calls
backend.learning_phase()
In particular, all Keras built-in layers use the learning phase as the default for thetraining
arg toLayer.__call__
.User-written layers and models can achieve the same behavior with code that looks like:
def call(self, inputs, training=None): if training is None: training = backend.learning_phase()
Args
value
- Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train
Raises
ValueError
- if
value
is neither0
nor1
.
Expand source code
@keras_export('keras.backend.set_learning_phase') @doc_controls.do_not_generate_docs def set_learning_phase(value): """Sets the learning phase to a fixed value. The backend learning phase affects any code that calls `backend.learning_phase()` In particular, all Keras built-in layers use the learning phase as the default for the `training` arg to `Layer.__call__`. User-written layers and models can achieve the same behavior with code that looks like: ```python def call(self, inputs, training=None): if training is None: training = backend.learning_phase() ``` Args: value: Learning phase value, either 0 or 1 (integers). 0 = test, 1 = train Raises: ValueError: if `value` is neither `0` nor `1`. """ warnings.warn('`tf.keras.backend.set_learning_phase` is deprecated and ' 'will be removed after 2020-10-11. To update it, simply ' 'pass a True/False value to the `training` argument of the ' '`__call__` method of your layer or model.') deprecated_internal_set_learning_phase(value)
def set_session(session)
-
Sets the global TensorFlow session.
Args
session
- A TF Session.
Expand source code
@keras_export(v1=['keras.backend.set_session']) def set_session(session): """Sets the global TensorFlow session. Args: session: A TF Session. """ global _SESSION _SESSION.session = session
def set_value(x, value)
-
Sets the value of a variable, from a Numpy array.
backend.set_value
is the complement ofbackend.get_value
, and provides a generic interface for assigning to variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics.>>> K = tf.keras.backend # Common keras convention >>> v = K.variable(1.)
>>> # reassign >>> K.set_value(v, 2.) >>> print(K.get_value(v)) 2.0
>>> # increment >>> K.set_value(v, K.get_value(v) + 1) >>> print(K.get_value(v)) 3.0
Variable semantics in TensorFlow 2 are eager execution friendly. The above code is roughly equivalent to:
>>> v = tf.Variable(1.)
>>> v.assign(2.) >>> print(v.numpy()) 2.0
>>> v.assign_add(1.) >>> print(v.numpy()) 3.0
Args
x
- Variable to set to a new value.
value
- Value to set the tensor to, as a Numpy array (of the same shape).
Expand source code
@keras_export('keras.backend.set_value') @doc_controls.do_not_generate_docs def set_value(x, value): """Sets the value of a variable, from a Numpy array. `backend.set_value` is the complement of `backend.get_value`, and provides a generic interface for assigning to variables while abstracting away the differences between TensorFlow 1.x and 2.x semantics. {snippet} Args: x: Variable to set to a new value. value: Value to set the tensor to, as a Numpy array (of the same shape). """ value = np.asarray(value, dtype=dtype_numpy(x)) if tf.compat.v1.executing_eagerly_outside_functions(): x.assign(value) else: with get_graph().as_default(): tf_dtype = tf.as_dtype(x.dtype.name.split('_')[0]) if hasattr(x, '_assign_placeholder'): assign_placeholder = x._assign_placeholder assign_op = x._assign_op else: # In order to support assigning weights to resizable variables in # Keras, we make a placeholder with the correct number of dimensions # but with None in each dimension. This way, we can assign weights # of any size (as long as they have the correct dimensionality). placeholder_shape = tf.TensorShape([None] * value.ndim) assign_placeholder = tf.compat.v1.placeholder( tf_dtype, shape=placeholder_shape) assign_op = x.assign(assign_placeholder) x._assign_placeholder = assign_placeholder x._assign_op = assign_op get_session().run(assign_op, feed_dict={assign_placeholder: value})
def shape(x)
-
Returns the symbolic shape of a tensor or variable.
Args
x
- A tensor or variable.
Returns
A symbolic shape (which is itself a tensor). Examples:
>>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.shape(kvar) <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)> >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.shape(input) <KerasTensor: shape=(3,) dtype=int32 inferred_value=[2, 4, 5] ...>
Expand source code
@keras_export('keras.backend.shape') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def shape(x): """Returns the symbolic shape of a tensor or variable. Args: x: A tensor or variable. Returns: A symbolic shape (which is itself a tensor). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val) >>> tf.keras.backend.shape(kvar) <tf.Tensor: shape=(2,), dtype=int32, numpy=array([2, 2], dtype=int32)> >>> input = tf.keras.backend.placeholder(shape=(2, 4, 5)) >>> tf.keras.backend.shape(input) <KerasTensor: shape=(3,) dtype=int32 inferred_value=[2, 4, 5] ...> """ return tf.shape(x)
def sigmoid(x)
-
Element-wise sigmoid.
Args
x
- A tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.sigmoid') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sigmoid(x): """Element-wise sigmoid. Args: x: A tensor or variable. Returns: A tensor. """ return tf.sigmoid(x)
def sign(x)
-
Element-wise sign.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.sign') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sign(x): """Element-wise sign. Args: x: Tensor or variable. Returns: A tensor. """ return tf.sign(x)
def sin(x)
-
Computes sin of x element-wise.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.sin') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sin(x): """Computes sin of x element-wise. Args: x: Tensor or variable. Returns: A tensor. """ return tf.sin(x)
def softmax(x, axis=-1)
-
Softmax of a tensor.
Args
x
- A tensor or variable.
axis
- The dimension softmax would be performed on. The default is -1 which indicates the last dimension.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.softmax') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def softmax(x, axis=-1): """Softmax of a tensor. Args: x: A tensor or variable. axis: The dimension softmax would be performed on. The default is -1 which indicates the last dimension. Returns: A tensor. """ return tf.nn.softmax(x, axis=axis)
def softplus(x)
-
Softplus of a tensor.
Args
x
- A tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.softplus') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def softplus(x): """Softplus of a tensor. Args: x: A tensor or variable. Returns: A tensor. """ return tf.math.softplus(x)
def softsign(x)
-
Softsign of a tensor.
Args
x
- A tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.softsign') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def softsign(x): """Softsign of a tensor. Args: x: A tensor or variable. Returns: A tensor. """ return tf.math.softsign(x)
def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1)
-
Categorical crossentropy with integer targets.
Args
target
- An integer tensor.
output
- A tensor resulting from a softmax
(unless
from_logits
is True, in which caseoutput
is expected to be the logits). from_logits
- Boolean, whether
output
is the result of a softmax, or is a tensor of logits. axis
- Int specifying the channels axis.
axis=-1
corresponds to data formatchannels_last
, andaxis=1
corresponds to data formatchannels_first
.
Returns
Output tensor.
Raises
ValueError
- if
axis
is neither -1 nor one of the axes ofoutput
.
Expand source code
@keras_export('keras.backend.sparse_categorical_crossentropy') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sparse_categorical_crossentropy(target, output, from_logits=False, axis=-1): """Categorical crossentropy with integer targets. Args: target: An integer tensor. output: A tensor resulting from a softmax (unless `from_logits` is True, in which case `output` is expected to be the logits). from_logits: Boolean, whether `output` is the result of a softmax, or is a tensor of logits. axis: Int specifying the channels axis. `axis=-1` corresponds to data format `channels_last`, and `axis=1` corresponds to data format `channels_first`. Returns: Output tensor. Raises: ValueError: if `axis` is neither -1 nor one of the axes of `output`. """ target = tf.convert_to_tensor(target) output = tf.convert_to_tensor(output) # Use logits whenever they are available. `softmax` and `sigmoid` # activations cache logits on the `output` Tensor. if hasattr(output, '_keras_logits'): output = output._keras_logits # pylint: disable=protected-access if from_logits: warnings.warn( '"`sparse_categorical_crossentropy` received `from_logits=True`, but ' 'the `output` argument was produced by a sigmoid or softmax ' 'activation and thus does not represent logits. Was this intended?"') from_logits = True elif (not from_logits and not isinstance(output, (tf.__internal__.EagerTensor, tf.Variable)) and output.op.type == 'Softmax') and not hasattr(output, '_keras_history'): # When softmax activation function is used for output operation, we # use logits from the softmax function directly to compute loss in order # to prevent collapsing zero when training. # See b/117284466 assert len(output.op.inputs) == 1 output = output.op.inputs[0] from_logits = True elif not from_logits: epsilon_ = _constant_to_tensor(epsilon(), output.dtype.base_dtype) output = tf.clip_by_value(output, epsilon_, 1 - epsilon_) output = tf.math.log(output) if isinstance(output.shape, (tuple, list)): output_rank = len(output.shape) else: output_rank = output.shape.ndims if output_rank is not None: axis %= output_rank if axis != output_rank - 1: permutation = list( itertools.chain(range(axis), range(axis + 1, output_rank), [axis])) output = tf.compat.v1.transpose(output, perm=permutation) elif axis != -1: raise ValueError( 'Cannot compute sparse categorical crossentropy with `axis={}` on an ' 'output tensor with unknown rank'.format(axis)) target = cast(target, 'int64') # Try to adjust the shape so that rank of labels = rank of logits - 1. output_shape = tf.shape(output) target_rank = target.shape.ndims update_shape = ( target_rank is not None and output_rank is not None and target_rank != output_rank - 1) if update_shape: target = flatten(target) output = tf.reshape(output, [-1, output_shape[-1]]) if py_any(_is_symbolic_tensor(v) for v in [target, output]): with get_graph().as_default(): res = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=output) else: res = tf.nn.sparse_softmax_cross_entropy_with_logits( labels=target, logits=output) if update_shape and output_rank >= 3: # If our output includes timesteps or spatial dimensions we need to reshape return tf.reshape(res, output_shape[:-1]) else: return res
def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None)
-
Pads the 2nd and 3rd dimensions of a 4D tensor.
Args
x
- Tensor or variable.
padding
- Tuple of 2 tuples, padding pattern.
data_format
- One of
channels_last
orchannels_first
.
Returns
A padded 4D tensor.
Raises
ValueError
- if
data_format
is neitherchannels_last
orchannels_first
.
Expand source code
@keras_export('keras.backend.spatial_2d_padding') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def spatial_2d_padding(x, padding=((1, 1), (1, 1)), data_format=None): """Pads the 2nd and 3rd dimensions of a 4D tensor. Args: x: Tensor or variable. padding: Tuple of 2 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 4D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 2 assert len(padding[0]) == 2 assert len(padding[1]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], list(padding[0]), list(padding[1])] else: pattern = [[0, 0], list(padding[0]), list(padding[1]), [0, 0]] return tf.compat.v1.pad(x, pattern)
def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None)
-
Pads 5D tensor with zeros along the depth, height, width dimensions.
Pads these dimensions with respectively "padding[0]", "padding[1]" and "padding[2]" zeros left and right.
For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded. For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded.
Args
x
- Tensor or variable.
padding
- Tuple of 3 tuples, padding pattern.
data_format
- One of
channels_last
orchannels_first
.
Returns
A padded 5D tensor.
Raises
ValueError
- if
data_format
is neitherchannels_last
orchannels_first
.
Expand source code
@keras_export('keras.backend.spatial_3d_padding') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def spatial_3d_padding(x, padding=((1, 1), (1, 1), (1, 1)), data_format=None): """Pads 5D tensor with zeros along the depth, height, width dimensions. Pads these dimensions with respectively "padding[0]", "padding[1]" and "padding[2]" zeros left and right. For 'channels_last' data_format, the 2nd, 3rd and 4th dimension will be padded. For 'channels_first' data_format, the 3rd, 4th and 5th dimension will be padded. Args: x: Tensor or variable. padding: Tuple of 3 tuples, padding pattern. data_format: One of `channels_last` or `channels_first`. Returns: A padded 5D tensor. Raises: ValueError: if `data_format` is neither `channels_last` or `channels_first`. """ assert len(padding) == 3 assert len(padding[0]) == 2 assert len(padding[1]) == 2 assert len(padding[2]) == 2 if data_format is None: data_format = image_data_format() if data_format not in {'channels_first', 'channels_last'}: raise ValueError('Unknown data_format: ' + str(data_format)) if data_format == 'channels_first': pattern = [[0, 0], [0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]]] else: pattern = [[0, 0], [padding[0][0], padding[0][1]], [padding[1][0], padding[1][1]], [padding[2][0], padding[2][1]], [0, 0]] return tf.compat.v1.pad(x, pattern)
def sqrt(x)
-
Element-wise square root.
This function clips negative tensor values to 0 before computing the square root.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.sqrt') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sqrt(x): """Element-wise square root. This function clips negative tensor values to 0 before computing the square root. Args: x: Tensor or variable. Returns: A tensor. """ zero = _constant_to_tensor(0., x.dtype.base_dtype) x = tf.maximum(x, zero) return tf.sqrt(x)
def square(x)
-
Element-wise square.
Args
x
- Tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.square') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def square(x): """Element-wise square. Args: x: Tensor or variable. Returns: A tensor. """ return tf.square(x)
def squeeze(x, axis)
-
Removes a 1-dimension from the tensor at index "axis".
Args
x
- A tensor or variable.
axis
- Axis to drop.
Returns
A tensor with the same data as
x
but reduced dimensions.Expand source code
@keras_export('keras.backend.squeeze') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def squeeze(x, axis): """Removes a 1-dimension from the tensor at index "axis". Args: x: A tensor or variable. axis: Axis to drop. Returns: A tensor with the same data as `x` but reduced dimensions. """ return tf.squeeze(x, [axis])
def stack(x, axis=0)
-
Stacks a list of rank
R
tensors into a rankR+1
tensor.Args
x
- List of tensors.
axis
- Axis along which to perform stacking.
Returns
A tensor.
Example
>>> a = tf.constant([[1, 2],[3, 4]]) >>> b = tf.constant([[10, 20],[30, 40]]) >>> tf.keras.backend.stack((a, b)) <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy= array([[[ 1, 2], [ 3, 4]], [[10, 20], [30, 40]]], dtype=int32)>
Expand source code
@keras_export('keras.backend.stack') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def stack(x, axis=0): """Stacks a list of rank `R` tensors into a rank `R+1` tensor. Args: x: List of tensors. axis: Axis along which to perform stacking. Returns: A tensor. Example: >>> a = tf.constant([[1, 2],[3, 4]]) >>> b = tf.constant([[10, 20],[30, 40]]) >>> tf.keras.backend.stack((a, b)) <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy= array([[[ 1, 2], [ 3, 4]], [[10, 20], [30, 40]]], dtype=int32)> """ return tf.stack(x, axis=axis)
def std(x, axis=None, keepdims=False)
-
Standard deviation of a tensor, alongside the specified axis.
It is an alias to
tf.math.reduce_std
.Args
x
- A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float.
axis
- An integer, the axis to compute the standard deviation. If
None
(the default), reduces all dimensions. Must be in the range[-rank(x), rank(x))
. keepdims
- A boolean, whether to keep the dimensions or not.
If
keepdims
isFalse
, the rank of the tensor is reduced by 1. Ifkeepdims
isTrue
, the reduced dimension is retained with length 1.
Returns
A tensor with the standard deviation of elements of
x
with same dtype. Boolean type input will be converted to float.Expand source code
@keras_export('keras.backend.std') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def std(x, axis=None, keepdims=False): """Standard deviation of a tensor, alongside the specified axis. It is an alias to `tf.math.reduce_std`. Args: x: A tensor or variable. It should have numerical dtypes. Boolean type inputs will be converted to float. axis: An integer, the axis to compute the standard deviation. If `None` (the default), reduces all dimensions. Must be in the range `[-rank(x), rank(x))`. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the standard deviation of elements of `x` with same dtype. Boolean type input will be converted to float. """ if x.dtype.base_dtype == tf.bool: x = tf.cast(x, floatx()) return tf.math.reduce_std(x, axis=axis, keepdims=keepdims)
def stop_gradient(variables)
-
Returns
variables
but with zero gradient w.r.t. every other variable.Args
variables
- Tensor or list of tensors to consider constant with respect to any other variable.
Returns
A single tensor or a list of tensors (depending on the passed argument) that has no gradient with respect to any other variable.
Expand source code
@keras_export('keras.backend.stop_gradient') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def stop_gradient(variables): """Returns `variables` but with zero gradient w.r.t. every other variable. Args: variables: Tensor or list of tensors to consider constant with respect to any other variable. Returns: A single tensor or a list of tensors (depending on the passed argument) that has no gradient with respect to any other variable. """ if isinstance(variables, (list, tuple)): return map(tf.stop_gradient, variables) return tf.stop_gradient(variables)
def sum(x, axis=None, keepdims=False)
-
Sum of the values in a tensor, alongside the specified axis.
Args
x
- A tensor or variable.
axis
- An integer, the axis to sum over.
keepdims
- A boolean, whether to keep the dimensions or not.
If
keepdims
isFalse
, the rank of the tensor is reduced by 1. Ifkeepdims
isTrue
, the reduced dimension is retained with length 1.
Returns
A tensor with sum of
x
.Expand source code
@keras_export('keras.backend.sum') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def sum(x, axis=None, keepdims=False): """Sum of the values in a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to sum over. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with sum of `x`. """ return tf.reduce_sum(x, axis, keepdims)
def switch(condition, then_expression, else_expression)
-
Switches between two operations depending on a scalar value.
Note that both
then_expression
andelse_expression
should be symbolic tensors of the same shape.Args
condition
- tensor (
int
orbool
). then_expression
- either a tensor, or a callable that returns a tensor.
else_expression
- either a tensor, or a callable that returns a tensor.
Returns
The selected tensor.
Raises
ValueError
- If rank of
condition
is greater than rank of expressions.
Expand source code
@keras_export('keras.backend.switch') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def switch(condition, then_expression, else_expression): """Switches between two operations depending on a scalar value. Note that both `then_expression` and `else_expression` should be symbolic tensors of the *same shape*. Args: condition: tensor (`int` or `bool`). then_expression: either a tensor, or a callable that returns a tensor. else_expression: either a tensor, or a callable that returns a tensor. Returns: The selected tensor. Raises: ValueError: If rank of `condition` is greater than rank of expressions. """ if condition.dtype != tf.bool: condition = tf.cast(condition, 'bool') cond_ndim = ndim(condition) if not cond_ndim: if not callable(then_expression): def then_expression_fn(): return then_expression else: then_expression_fn = then_expression if not callable(else_expression): def else_expression_fn(): return else_expression else: else_expression_fn = else_expression x = tf.compat.v1.cond(condition, then_expression_fn, else_expression_fn) else: # tf.where needs its condition tensor # to be the same shape as its two # result tensors if callable(then_expression): then_expression = then_expression() if callable(else_expression): else_expression = else_expression() expr_ndim = ndim(then_expression) if cond_ndim > expr_ndim: raise ValueError('Rank of `condition` should be less than or' ' equal to rank of `then_expression` and ' '`else_expression`. ndim(condition)=' + str(cond_ndim) + ', ndim(then_expression)' '=' + str(expr_ndim)) if cond_ndim > 1: ndim_diff = expr_ndim - cond_ndim cond_shape = tf.concat( [tf.shape(condition), [1] * ndim_diff], axis=0) condition = tf.reshape(condition, cond_shape) expr_shape = tf.shape(then_expression) shape_diff = expr_shape - cond_shape tile_shape = tf.where(shape_diff > 0, expr_shape, tf.ones_like(expr_shape)) condition = tf.tile(condition, tile_shape) x = tf.where(condition, then_expression, else_expression) return x
def tanh(x)
-
Element-wise tanh.
Args
x
- A tensor or variable.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.tanh') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def tanh(x): """Element-wise tanh. Args: x: A tensor or variable. Returns: A tensor. """ return tf.tanh(x)
def temporal_padding(x, padding=(1, 1))
-
Pads the middle dimension of a 3D tensor.
Args
x
- Tensor or variable.
padding
- Tuple of 2 integers, how many zeros to add at the start and end of dim 1.
Returns
A padded 3D tensor.
Expand source code
@keras_export('keras.backend.temporal_padding') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def temporal_padding(x, padding=(1, 1)): """Pads the middle dimension of a 3D tensor. Args: x: Tensor or variable. padding: Tuple of 2 integers, how many zeros to add at the start and end of dim 1. Returns: A padded 3D tensor. """ assert len(padding) == 2 pattern = [[0, 0], [padding[0], padding[1]], [0, 0]] return tf.compat.v1.pad(x, pattern)
def tile(x, n)
-
Creates a tensor by tiling
x
byn
.Args
x
- A tensor or variable
n
- A list of integer. The length must be the same as the number of
dimensions in
x
.
Returns
A tiled tensor.
Expand source code
@keras_export('keras.backend.tile') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def tile(x, n): """Creates a tensor by tiling `x` by `n`. Args: x: A tensor or variable n: A list of integer. The length must be the same as the number of dimensions in `x`. Returns: A tiled tensor. """ if isinstance(n, int): n = [n] return tf.tile(x, n)
def to_dense(tensor)
-
Converts a sparse tensor into a dense tensor and returns it.
Args
tensor
- A tensor instance (potentially sparse).
Returns
A dense tensor.
Examples
>>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True >>> c = tf.keras.backend.to_dense(b) >>> print(tf.keras.backend.is_sparse(c)) False
Expand source code
@keras_export('keras.backend.to_dense') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def to_dense(tensor): """Converts a sparse tensor into a dense tensor and returns it. Args: tensor: A tensor instance (potentially sparse). Returns: A dense tensor. Examples: >>> b = tf.keras.backend.placeholder((2, 2), sparse=True) >>> print(tf.keras.backend.is_sparse(b)) True >>> c = tf.keras.backend.to_dense(b) >>> print(tf.keras.backend.is_sparse(c)) False """ if is_sparse(tensor): return tf.sparse.to_dense(tensor) else: return tensor
def transpose(x)
-
Transposes a tensor and returns it.
Args
x
- Tensor or variable.
Returns
A tensor. Examples:
>>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_transposed = tf.keras.backend.transpose(var) >>> tf.keras.backend.eval(var_transposed) array([[1., 4.], [2., 5.], [3., 6.]], dtype=float32) >>> input = tf.keras.backend.placeholder((2, 3)) >>> input <KerasTensor: shape=(2, 3) dtype=float32 ...> >>> input_transposed = tf.keras.backend.transpose(input) >>> input_transposed <KerasTensor: shape=(3, 2) dtype=float32 ...>
Expand source code
@keras_export('keras.backend.transpose') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def transpose(x): """Transposes a tensor and returns it. Args: x: Tensor or variable. Returns: A tensor. Examples: >>> var = tf.keras.backend.variable([[1, 2, 3], [4, 5, 6]]) >>> tf.keras.backend.eval(var) array([[1., 2., 3.], [4., 5., 6.]], dtype=float32) >>> var_transposed = tf.keras.backend.transpose(var) >>> tf.keras.backend.eval(var_transposed) array([[1., 4.], [2., 5.], [3., 6.]], dtype=float32) >>> input = tf.keras.backend.placeholder((2, 3)) >>> input <KerasTensor: shape=(2, 3) dtype=float32 ...> >>> input_transposed = tf.keras.backend.transpose(input) >>> input_transposed <KerasTensor: shape=(3, 2) dtype=float32 ...> """ return tf.compat.v1.transpose(x)
def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None)
-
Returns a tensor with truncated random normal distribution of values.
The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked.
Args
shape
- A tuple of integers, the shape of tensor to create.
mean
- Mean of the values.
stddev
- Standard deviation of the values.
dtype
- String, dtype of returned tensor.
seed
- Integer, random seed.
Returns
A tensor.
Expand source code
@keras_export('keras.backend.truncated_normal') @tf.__internal__.dispatch.add_dispatch_support @doc_controls.do_not_generate_docs def truncated_normal(shape, mean=0.0, stddev=1.0, dtype=None, seed=None): """Returns a tensor with truncated random normal distribution of values. The generated values follow a normal distribution with specified mean and standard deviation, except that values whose magnitude is more than two standard deviations from the mean are dropped and re-picked. Args: shape: A tuple of integers, the shape of tensor to create. mean: Mean of the values. stddev: Standard deviation of the values. dtype: String, dtype of returned tensor. seed: Integer, random seed. Returns: A tensor. """ if dtype is None: dtype = floatx() if seed is None: seed = np.random.randint(10e6) return tf.random.truncated_normal( shape, mean, stddev, dtype=dtype, seed=seed)
def update(x, new_x)
-
Expand source code
@keras_export('keras.backend.update') @doc_controls.do_not_generate_docs def update(x, new_x): return tf.compat.v1.assign(x, new_x)
def update_add(x, increment)
-
Update the value of
x
by addingincrement
.Args
x
- A Variable.
increment
- A tensor of same shape as
x
.
Returns
The variable
x
updated.Expand source code
@keras_export('keras.backend.update_add') @doc_controls.do_not_generate_docs def update_add(x, increment): """Update the value of `x` by adding `increment`. Args: x: A Variable. increment: A tensor of same shape as `x`. Returns: The variable `x` updated. """ return tf.compat.v1.assign_add(x, increment)
def update_sub(x, decrement)
-
Update the value of
x
by subtractingdecrement
.Args
x
- A Variable.
decrement
- A tensor of same shape as
x
.
Returns
The variable
x
updated.Expand source code
@keras_export('keras.backend.update_sub') @doc_controls.do_not_generate_docs def update_sub(x, decrement): """Update the value of `x` by subtracting `decrement`. Args: x: A Variable. decrement: A tensor of same shape as `x`. Returns: The variable `x` updated. """ return tf.compat.v1.assign_sub(x, decrement)
def var(x, axis=None, keepdims=False)
-
Variance of a tensor, alongside the specified axis.
Args
x
- A tensor or variable.
axis
- An integer, the axis to compute the variance.
keepdims
- A boolean, whether to keep the dimensions or not.
If
keepdims
isFalse
, the rank of the tensor is reduced by 1. Ifkeepdims
isTrue
, the reduced dimension is retained with length 1.
Returns
A tensor with the variance of elements of
x
.Expand source code
@keras_export('keras.backend.var') @doc_controls.do_not_generate_docs def var(x, axis=None, keepdims=False): """Variance of a tensor, alongside the specified axis. Args: x: A tensor or variable. axis: An integer, the axis to compute the variance. keepdims: A boolean, whether to keep the dimensions or not. If `keepdims` is `False`, the rank of the tensor is reduced by 1. If `keepdims` is `True`, the reduced dimension is retained with length 1. Returns: A tensor with the variance of elements of `x`. """ if x.dtype.base_dtype == tf.bool: x = tf.cast(x, floatx()) return tf.math.reduce_variance(x, axis=axis, keepdims=keepdims)
def variable(value, dtype=None, name=None, constraint=None)
-
Instantiates a variable and returns it.
Args
value
- Numpy array, initial value of the tensor.
dtype
- Tensor type.
name
- Optional name string for the tensor.
constraint
- Optional projection function to be applied to the variable after an optimizer update.
Returns
A variable instance (with Keras metadata included). Examples:
>>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy= array([[1., 2.], [3., 4.]])>
Expand source code
@keras_export('keras.backend.variable') @doc_controls.do_not_generate_docs def variable(value, dtype=None, name=None, constraint=None): """Instantiates a variable and returns it. Args: value: Numpy array, initial value of the tensor. dtype: Tensor type. name: Optional name string for the tensor. constraint: Optional projection function to be applied to the variable after an optimizer update. Returns: A variable instance (with Keras metadata included). Examples: >>> val = np.array([[1, 2], [3, 4]]) >>> kvar = tf.keras.backend.variable(value=val, dtype='float64', ... name='example_var') >>> tf.keras.backend.dtype(kvar) 'float64' >>> print(kvar) <tf.Variable 'example_var:...' shape=(2, 2) dtype=float64, numpy= array([[1., 2.], [3., 4.]])> """ if dtype is None: dtype = floatx() if hasattr(value, 'tocoo'): sparse_coo = value.tocoo() indices = np.concatenate((np.expand_dims(sparse_coo.row, 1), np.expand_dims( sparse_coo.col, 1)), 1) v = tf.SparseTensor( indices=indices, values=sparse_coo.data, dense_shape=sparse_coo.shape) v._keras_shape = sparse_coo.shape return v v = tf.Variable( value, dtype=tf.as_dtype(dtype), name=name, constraint=constraint) if isinstance(value, np.ndarray): v._keras_shape = value.shape elif hasattr(value, 'shape'): v._keras_shape = int_shape(value) track_variable(v) return v
def zeros(shape, dtype=None, name=None)
-
Instantiates an all-zeros variable and returns it.
Args
shape
- Tuple or list of integers, shape of returned Keras variable
dtype
- data type of returned Keras variable
name
- name of returned Keras variable
Returns
A variable (including Keras metadata), filled with
0.0
. Note that ifshape()
was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example:>>> kvar = tf.keras.backend.zeros((3,4)) >>> tf.keras.backend.eval(kvar) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]], dtype=float32) >>> A = tf.constant([1,2,3]) >>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.] >>> tf.keras.backend.eval(kvar2) array([0., 0., 0.], dtype=float32) >>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32) >>> tf.keras.backend.eval(kvar3) array([0, 0, 0], dtype=int32) >>> kvar4 = tf.keras.backend.zeros([2,3]) >>> tf.keras.backend.eval(kvar4) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32)
Expand source code
@keras_export('keras.backend.zeros') @doc_controls.do_not_generate_docs def zeros(shape, dtype=None, name=None): """Instantiates an all-zeros variable and returns it. Args: shape: Tuple or list of integers, shape of returned Keras variable dtype: data type of returned Keras variable name: name of returned Keras variable Returns: A variable (including Keras metadata), filled with `0.0`. Note that if `shape` was symbolic, we cannot return a variable, and will return a dynamically-shaped tensor instead. Example: >>> kvar = tf.keras.backend.zeros((3,4)) >>> tf.keras.backend.eval(kvar) array([[0., 0., 0., 0.], [0., 0., 0., 0.], [0., 0., 0., 0.]], dtype=float32) >>> A = tf.constant([1,2,3]) >>> kvar2 = tf.keras.backend.zeros(A.shape) # [0., 0., 0.] >>> tf.keras.backend.eval(kvar2) array([0., 0., 0.], dtype=float32) >>> kvar3 = tf.keras.backend.zeros(A.shape,dtype=tf.int32) >>> tf.keras.backend.eval(kvar3) array([0, 0, 0], dtype=int32) >>> kvar4 = tf.keras.backend.zeros([2,3]) >>> tf.keras.backend.eval(kvar4) array([[0., 0., 0.], [0., 0., 0.]], dtype=float32) """ with tf.init_scope(): if dtype is None: dtype = floatx() tf_dtype = tf.as_dtype(dtype) v = tf.zeros(shape=shape, dtype=tf_dtype, name=name) if py_all(v.shape.as_list()): return variable(v, dtype=dtype, name=name) return v
def zeros_like(x, dtype=None, name=None)
-
Instantiates an all-zeros variable of the same shape as another tensor.
Args
x
- Keras variable or Keras tensor.
dtype
- dtype of returned Keras variable.
None
uses the dtype ofx
. name
- name for the variable to create.
Returns
A Keras variable with the shape of
x
filled with zeros. Example:from tensorflow.keras import backend as K kvar = K.variable(np.random.random((2,3))) kvar_zeros = K.zeros_like(kvar) K.eval(kvar_zeros) # array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32)
Expand source code
@keras_export('keras.backend.zeros_like') @doc_controls.do_not_generate_docs def zeros_like(x, dtype=None, name=None): """Instantiates an all-zeros variable of the same shape as another tensor. Args: x: Keras variable or Keras tensor. dtype: dtype of returned Keras variable. `None` uses the dtype of `x`. name: name for the variable to create. Returns: A Keras variable with the shape of `x` filled with zeros. Example: ```python from tensorflow.keras import backend as K kvar = K.variable(np.random.random((2,3))) kvar_zeros = K.zeros_like(kvar) K.eval(kvar_zeros) # array([[ 0., 0., 0.], [ 0., 0., 0.]], dtype=float32) ``` """ return tf.zeros_like(x, dtype=dtype, name=name)
Classes
class name_scope (name, default_name=None, values=None)
-
A context manager for use when defining a Python op.
This context manager validates that the given
values
are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (seetf.Graph.name_scope
for more details on that).For example, to define a new Python op called
my_op
:def my_op(a, b, c, name=None): with tf.name_scope(name, "MyOp", [a, b, c]) as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope)
Initialize the context manager.
Args
name
- The name argument that is passed to the op function.
default_name
- The default name to use if the
name
argument isNone
. values
- The list of
Tensor
arguments that are passed to the op function.
Raises
TypeError
- if
default_name
is passed in but not a string.
Expand source code
class name_scope_v1(object): # pylint: disable=invalid-name """A context manager for use when defining a Python op. This context manager validates that the given `values` are from the same graph, makes that graph the default graph, and pushes a name scope in that graph (see `tf.Graph.name_scope` for more details on that). For example, to define a new Python op called `my_op`: ```python def my_op(a, b, c, name=None): with tf.name_scope(name, "MyOp", [a, b, c]) as scope: a = tf.convert_to_tensor(a, name="a") b = tf.convert_to_tensor(b, name="b") c = tf.convert_to_tensor(c, name="c") # Define some computation that uses `a`, `b`, and `c`. return foo_op(..., name=scope) ``` """ __slots__ = ["_name", "_name_scope"] @property def name(self): return self._name def __init__(self, name, default_name=None, values=None): """Initialize the context manager. Args: name: The name argument that is passed to the op function. default_name: The default name to use if the `name` argument is `None`. values: The list of `Tensor` arguments that are passed to the op function. Raises: TypeError: if `default_name` is passed in but not a string. """ self._name_scope = name_scope( name, default_name, values, skip_on_eager=False) self._name = default_name if name is None else name def __enter__(self): return self._name_scope.__enter__() def __exit__(self, *exc_info): return self._name_scope.__exit__(*exc_info)
Instance variables
var name
-
Expand source code
@property def name(self): return self._name