# tensorflow.python.ops.math_ops 源代码

# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
#
# Unless required by applicable law or agreed to in writing, software
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# ==============================================================================
"""Math Operations.

Note: Functions taking Tensor arguments can also take anything accepted by
tf.convert_to_tensor.

Note: Elementwise binary operations in TensorFlow follow [numpy-style

TensorFlow provides a variety of math functions including:

* Basic arithmetic operators and trigonometric functions.
* Special math functions (like: tf.math.igamma and tf.math.zeta)
* Complex number functions (like: tf.math.imag and tf.math.angle)
* Reductions and scans (like: tf.math.reduce_mean and tf.math.cumsum)
* Segment functions (like: tf.math.segment_sum)

See: tf.linalg for matrix and tensor functions.

<a id=Segmentation></a>

TensorFlow provides several operations that you can use to perform common
math computations on tensor segments.
Here a segmentation is a partitioning of a tensor along
the first dimension, i.e. it  defines a mapping from the first dimension onto
segment_ids. The segment_ids tensor should be the size of
the first dimension, d0, with consecutive IDs in the range 0 to k,
where k<d0.
In particular, a segmentation of a matrix tensor is a mapping of rows to
segments.

For example:

python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.segment_sum(c, tf.constant([0, 0, 1]))
#  ==>  [[0 0 0 0]
#        [5 6 7 8]]


The standard segment_* functions assert that the segment indices are sorted.
If you have unsorted indices use the equivalent unsorted_segment_ function.
These functions take an additional argument num_segments so that the output
tensor can be efficiently allocated.

 python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
# ==> [[ 6,  8, 10, 12],
#       [-1, -2, -3, -4]]


"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function

import numpy as np
import six
from six.moves import builtins
from six.moves import xrange  # pylint: disable=redefined-builtin

from tensorflow.python.eager import context
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import graph_util
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_array_ops
from tensorflow.python.ops import gen_data_flow_ops
from tensorflow.python.ops import gen_math_ops
from tensorflow.python.ops import gen_nn_ops
from tensorflow.python.ops import gen_sparse_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_math_ops import *
# pylint: enable=wildcard-import
from tensorflow.python.platform import tf_logging as logging
from tensorflow.python.util import compat
from tensorflow.python.util import deprecation
from tensorflow.python.util import dispatch
from tensorflow.python.util import nest
from tensorflow.python.util.tf_export import tf_export

# Aliases for some automatically-generated names.
linspace = gen_math_ops.lin_space
nextafter = gen_math_ops.next_after

arg_max = deprecation.deprecated(None, "Use tf.math.argmax instead")(arg_max)  # pylint: disable=used-before-assignment
arg_min = deprecation.deprecated(None, "Use tf.math.argmin instead")(arg_min)  # pylint: disable=used-before-assignment
tf_export(v1=["arg_max"])(arg_max)
tf_export(v1=["arg_min"])(arg_min)

# This is set by resource_variable_ops.py. It is included in this way since
# there is a circular dependency between math_ops and resource_variable_ops
_resource_variable_type = None

def _set_doc(doc):

def _decorator(func):
func.__doc__ = doc
return func

return _decorator

# pylint: disable=redefined-builtin
[文档]@tf_export(v1=["math.argmax", "argmax"])
@deprecation.deprecated_args(None, "Use the axis argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_max.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmax(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmax_v2(input, axis, output_type, name)

@tf_export("math.argmax", "argmax", v1=[])
def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the largest value across axes of a tensor.

Note that in case of ties the identity of the return value is not guaranteed.

For example:

>>> A = tf.constant([2, 20, 30, 3, 6])
>>> tf.math.argmax(A)  # A[2] is maximum in tensor A
<tf.Tensor: shape=(), dtype=int64, numpy=2>
>>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
...                  [14, 45, 23, 5, 27]])
>>> tf.math.argmax(B, 0)
<tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
>>> tf.math.argmax(B, 1)
<tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>

Args:
input: A Tensor.
axis: An integer, the axis to reduce across. Default to 0.
output_type: An optional output dtype (tf.int32 or tf.int64). Defaults
to tf.int64.
name: An optional name for the operation.

Returns:
A Tensor of type output_type.
"""
if axis is None:
axis = 0
return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)

[文档]@tf_export(v1=["math.argmin", "argmin"])
@deprecation.deprecated_args(None, "Use the axis argument instead",
"dimension")
@_set_doc(
gen_math_ops.arg_min.__doc__.replace("dimensions",
"axes").replace("dimension", "axis"))
def argmin(input,
axis=None,
name=None,
dimension=None,
output_type=dtypes.int64):
axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
dimension)
return argmin_v2(input, axis, output_type, name)

@tf_export("math.argmin", "argmin", v1=[])
def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
"""Returns the index with the smallest value across axes of a tensor.

Note that in case of ties the identity of the return value is not guaranteed.

Args:
input: A Tensor. Must be one of the following types: float32, float64,
int32, uint8, int16, int8, complex64, int64, qint8,
quint8, qint32, bfloat16, uint16, complex128, half, uint32,
uint64.
axis: A Tensor. Must be one of the following types: int32, int64.
int32 or int64, must be in the range -rank(input), rank(input)).
Describes which axis of the input Tensor to reduce across. For vectors,
use axis = 0.
output_type: An optional tf.DType from: tf.int32, tf.int64. Defaults to
tf.int64.
name: A name for the operation (optional).

Returns:
A Tensor of type output_type.

Usage:
python
import tensorflow as tf
a = [1, 10, 26.9, 2.8, 166.32, 62.3]
b = tf.math.argmin(input = a)
c = tf.keras.backend.eval(b)
# c = 0
# here a[0] = 1 which is the smallest element of a across axis 0

"""
if axis is None:
axis = 0
return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)

# pylint: enable=redefined-builtin

# pylint: disable=anomalous-backslash-in-string,protected-access
# pylint: disable=g-docstring-has-escape
[文档]@tf_export("math.abs", "abs")
def abs(x, name=None):  # pylint: disable=redefined-builtin
r"""Computes the absolute value of a tensor.

Given a tensor of integer or floating-point values, this operation returns a
tensor of the same type, where each element contains the absolute value of the
corresponding element in the input.

Given a tensor x of complex numbers, this operation returns a tensor of type
float32 or float64 that is the absolute value of each element in x. For
a complex number \$$a + bj\$$, its absolute value is computed as \$$\sqrt{a^2 + b^2}\$$.  For example:

>>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
>>> tf.abs(x)
<tf.Tensor: shape=(2, 1), dtype=float64, numpy=
array([[5.25594901],
[6.60492241]])>

Args:
x: A Tensor or SparseTensor of type float16, float32, float64,
int32, int64, complex64 or complex128.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor of the same size, type and sparsity as x,
with absolute values. Note, for complex64 or complex128 input, the
returned Tensor will be of type float32 or float64, respectively.
"""
with ops.name_scope(name, "Abs", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex:
return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
return gen_math_ops._abs(x, name=name)

# pylint: enable=g-docstring-has-escape

# pylint: disable=redefined-builtin
def _bucketize(input, boundaries, name=None):
return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)

# pylint: enable=redefined-builtin

class DivideDelegateWithName(object):
"""Use Python2/Python3 division delegation to implement divide for tensors."""

def __init__(self, x, name):
"""Construct DivideDelegateWithName.

Args:
x: Tensor to use as left operand in operator overloads
name: The name that is preferred for the op created.
"""
self.x = x
self.name = name

def __truediv__(self, y):
return _truediv_python3(self.x, y, self.name)

def __floordiv__(self, y):
return floordiv(self.x, y, self.name)

def __div__(self, y):
return _div_python2(self.x, y, self.name)

[文档]@tf_export("math.divide", "divide")
def divide(x, y, name=None):
"""Computes Python style division of x by y.

For example:

>>> x = tf.constant([16, 12, 11])
>>> y = tf.constant([4, 6, 2])
>>> tf.divide(x,y)
<tf.Tensor: shape=(3,), dtype=float64,
numpy=array([4. , 2. , 5.5])>

Args:
x: A Tensor
y: A Tensor
name: A name for the operation (optional).

Returns:
A Tensor with same shape as input
"""

if name is not None:
# Cannot use tensors operator overload, because it has no way to track
# override names. Use a dummy class to track the runtime division behavior
return DivideDelegateWithName(x, name) / y
else:
return x / y

[文档]@tf_export("math.multiply", "multiply")
def multiply(x, y, name=None):
"""Returns an element-wise x * y.

For example:

>>> x = tf.constant(([1, 2, 3, 4]))
>>> tf.math.multiply(x, x)
<tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1,  4,  9, 16], dtype=int32)>

Since tf.math.multiply will convert its arguments to Tensors, you can also
pass in non-Tensor arguments:

>>> tf.math.multiply(7,6)
<tf.Tensor: shape=(), dtype=int32, numpy=42>

If x.shape is not thes same as y.shape, they will be broadcast to a

For example:

>>> x = tf.ones([1, 2]);
>>> y = tf.ones([2, 1]);
>>> x * y  # Taking advantage of operator overriding
<tf.Tensor: shape=(2, 2), dtype=float32, numpy=
array([[1., 1.],
[1., 1.]], dtype=float32)>

Args:
x: A Tensor. Must be one of the following types: bfloat16,
half, float32, float64, uint8, int8, uint16,
int16, int32, int64, complex64, complex128.
y: A Tensor. Must have the same type as x.
name: A name for the operation (optional).

Returns:

A Tensor.  Has the same type as x.

Raises:

* InvalidArgumentError: When x and y have incomptatible shapes or types.
"""

return gen_math_ops.mul(x, y, name)

# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"tf.mul(x, y) is deprecated; use tf.math.multiply(x, y) or x * y")
def _mul(x, y, name=None):
return gen_math_ops.mul(x, y, name)

_mul.__doc__ = (
gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))

[文档]@tf_export("math.subtract", "subtract")
def subtract(x, y, name=None):
return gen_math_ops.sub(x, y, name)

subtract.__doc__ = gen_math_ops.sub.__doc__.replace("Sub", "tf.subtract")

# TODO(aselle): put deprecation in after another round of global code changes
@deprecation.deprecated(
"2016-12-30",
"tf.sub(x, y) is deprecated, please use tf.subtract(x, y) or x - y")
def _sub(x, y, name=None):
return gen_math_ops.sub(x, y, name)

_sub.__doc__ = (
gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))

negative = gen_math_ops.neg

# pylint: disable=g-docstring-has-escape
@deprecation.deprecated(
"2016-12-30",
"tf.neg(x) is deprecated, please use tf.negative(x) or -x")
def _neg(x, name=None):
"""Computes numerical negative value element-wise.

I.e., \$$y = -x\$$.

Args:
x: A Tensor or SparseTensor. Must be one of the following types: half,
float32, float64, int32, int64, complex64, complex128.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor, respectively. Has the same type as x.
"""
return negative(x, name)

# pylint: enable=g-docstring-has-escape

[文档]@tf_export(v1=["math.scalar_mul", "scalar_mul"])
def scalar_mul(scalar, x, name=None):
"""Multiplies a scalar times a Tensor or IndexedSlices object.

Intended for use in gradient code which might deal with IndexedSlices
objects, which are easy to multiply by a scalar but more expensive to
multiply with arbitrary tensors.

Args:
scalar: A 0-D scalar Tensor. Must have known shape.
x: A Tensor or IndexedSlices to be scaled.
name: A name for the operation (optional).

Returns:
scalar * x of the same type (Tensor or IndexedSlices) as x.

Raises:
ValueError: if scalar is not a 0-D scalar.
"""
scalar = ops.convert_to_tensor(
scalar, dtype=x.dtype.base_dtype, name="scalar")
shape = scalar.get_shape()
if shape.ndims == 0:
if isinstance(x, ops.IndexedSlices):
return ops.IndexedSlices(
gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
else:
return gen_math_ops.mul(scalar, x, name)
else:
raise ValueError("Only scalar multiply works, got shape %s" % shape)

@tf_export("math.scalar_mul", "scalar_mul", v1=[])
@_set_doc(scalar_mul.__doc__)
def scalar_mul_v2(scalar, x, name=None):
with ops.name_scope(name, "scalar_mul", [x]) as name:
return scalar_mul(scalar, x, name)

[文档]@tf_export("math.pow", "pow")
def pow(x, y, name=None):  # pylint: disable=redefined-builtin
r"""Computes the power of one value to another.

Given a tensor x and a tensor y, this operation computes \$$x^y\$$ for
corresponding elements in x and y. For example:

python
x = tf.constant([[2, 2], [3, 3]])
y = tf.constant([[8, 16], [2, 3]])
tf.pow(x, y)  # [[256, 65536], [9, 27]]


Args:
x: A Tensor of type float16, float32, float64, int32, int64,
complex64, or complex128.
y: A Tensor of type float16, float32, float64, int32, int64,
complex64, or complex128.
name: A name for the operation (optional).

Returns:
A Tensor.
"""
with ops.name_scope(name, "Pow", [x]) as name:
return gen_math_ops._pow(x, y, name=name)

# pylint: disable=redefined-builtin,redefined-outer-name
[文档]@tf_export("dtypes.complex", "complex")
def complex(real, imag, name=None):
r"""Converts two real numbers to a complex number.

Given a tensor real representing the real part of a complex number, and a
tensor imag representing the imaginary part of a complex number, this
operation returns complex numbers elementwise of the form \$$a + bj\$$, where
*a* represents the real part and *b* represents the imag part.

The input tensors real and imag must have the same shape.

For example:

python
real = tf.constant([2.25, 3.25])
imag = tf.constant([4.75, 5.75])
tf.complex(real, imag)  # [[2.25 + 4.75j], [3.25 + 5.75j]]


Args:
real: A Tensor. Must be one of the following types: float32, float64.
imag: A Tensor. Must have the same type as real.
name: A name for the operation (optional).

Returns:
A Tensor of type complex64 or complex128.

Raises:
TypeError: Real and imag must be correct types
"""
real = ops.convert_to_tensor(real, name="real")
imag = ops.convert_to_tensor(imag, name="imag")
with ops.name_scope(name, "Complex", [real, imag]) as name:
input_types = (real.dtype, imag.dtype)
if input_types == (dtypes.float64, dtypes.float64):
Tout = dtypes.complex128
elif input_types == (dtypes.float32, dtypes.float32):
Tout = dtypes.complex64
else:
raise TypeError("real and imag have incorrect types: "
"{} {}".format(real.dtype.name, imag.dtype.name))
return gen_math_ops._complex(real, imag, Tout=Tout, name=name)

[文档]@tf_export("math.sign", "sign")
def sign(x, name=None):
"""Returns an element-wise indication of the sign of a number.

y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0.

For complex numbers, y = sign(x) = x / |x| if x != 0, otherwise y = 0.

Example usage:

>>> tf.math.sign([0., 2., -3.])
<tf.Tensor: ... numpy=array([ 0.,  1., -1.], dtype=float32)>

Args:
x: A Tensor. Must be one of the following types: bfloat16, half, float32,
float64, int32, int64, complex64, complex128.
name: A name for the operation (optional).

Returns:
A Tensor. Has the same type as x.

If x is a SparseTensor, returns SparseTensor(x.indices,
tf.math.sign(x.values, ...), x.dense_shape).
"""
x = ops.convert_to_tensor(x)
if x.dtype in (dtypes.complex64, dtypes.complex128):
return gen_math_ops.div_no_nan(
x,
cast(
gen_math_ops.complex_abs(
x,
Tout=dtypes.float32
if x.dtype == dtypes.complex64 else dtypes.float64),
dtype=x.dtype),
name=name)
return gen_math_ops.sign(x, name=name)

@tf_export("math.real", v1=["math.real", "real"])
@deprecation.deprecated_endpoints("real")
def real(input, name=None):
r"""Returns the real part of a complex (or real) tensor.

Given a tensor input, this operation returns a tensor of type float that
is the real part of each element in input considered as a complex number.

For example:

python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.real(x)  # [-2.25, 3.25]


If input is already real, it is returned unchanged.

Args:
input: A Tensor. Must have numeric type.
name: A name for the operation (optional).

Returns:
A Tensor of type float32 or float64.
"""
with ops.name_scope(name, "Real", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
real_dtype = input.dtype.real_dtype
return gen_math_ops.real(input, Tout=real_dtype, name=name)
else:
return input

@tf_export("math.imag", v1=["math.imag", "imag"])
@deprecation.deprecated_endpoints("imag")
def imag(input, name=None):
r"""Returns the imaginary part of a complex (or real) tensor.

Given a tensor input, this operation returns a tensor of type float that
is the imaginary part of each element in input considered as a complex
number. If input is real, a tensor of all zeros is returned.

For example:

python
x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
tf.math.imag(x)  # [4.75, 5.75]


Args:
input: A Tensor. Must be one of the following types: float, double,
complex64, complex128.
name: A name for the operation (optional).

Returns:
A Tensor of type float32 or float64.
"""
with ops.name_scope(name, "Imag", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)

@tf_export("math.angle", v1=["math.angle", "angle"])
@deprecation.deprecated_endpoints("angle")
def angle(input, name=None):
r"""Returns the element-wise argument of a complex (or real) tensor.

Given a tensor input, this operation returns a tensor of type float that
is the argument of each element in input considered as a complex number.

The elements in input are considered to be complex numbers of the form
\$$a + bj\$$, where *a* is the real part and *b* is the imaginary part.
If input is real then *b* is zero by definition.

The argument returned by this function is of the form \$$atan2(b, a)\$$.
If input is real, a tensor of all zeros is returned.

For example:


input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
tf.math.angle(input).numpy()
# ==> array([2.0131705, 1.056345 ], dtype=float32)


Args:
input: A Tensor. Must be one of the following types: float, double,
complex64, complex128.
name: A name for the operation (optional).

Returns:
A Tensor of type float32 or float64.
"""
with ops.name_scope(name, "Angle", [input]) as name:
input = ops.convert_to_tensor(input, name="input")
if input.dtype.is_complex:
return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
else:
return array_ops.zeros_like(input)

# pylint: enable=redefined-outer-name,redefined-builtin

[文档]@tf_export("math.round", "round")
def round(x, name=None):  # pylint: disable=redefined-builtin
"""Rounds the values of a tensor to the nearest integer, element-wise.

Rounds half to even.  Also known as bankers rounding. If you want to round
according to the current system rounding mode use tf::cint.
For example:

python
x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
tf.round(x)  # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]


Args:
x: A Tensor of type float16, float32, float64, int32, or int64.
name: A name for the operation (optional).

Returns:
A Tensor of same shape and type as x.
"""
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_integer:
return x
else:
return gen_math_ops.round(x, name=name)

[文档]@tf_export("cast", "dtypes.cast")
def cast(x, dtype, name=None):
"""Casts a tensor to a new type.

The operation casts x (in case of Tensor) or x.values
(in case of SparseTensor or IndexedSlices) to dtype.

For example:

>>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
>>> tf.dtypes.cast(x, tf.int32)
<tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>

The operation supports data types (for x and dtype) of
uint8, uint16, uint32, uint64, int8, int16, int32, int64,
float16, float32, float64, complex64, complex128, bfloat16.
In case of casting from complex types (complex64, complex128) to real
types, only the real part of x is returned. In case of casting from real
types to complex types (complex64, complex128), the imaginary part of the
returned value is set to 0. The handling of complex types here matches the
behavior of numpy.

Args:
x: A Tensor or SparseTensor or IndexedSlices of numeric type. It could
be uint8, uint16, uint32, uint64, int8, int16, int32,
int64, float16, float32, float64, complex64, complex128,
bfloat16.
dtype: The destination type. The list of supported dtypes is the same as
x.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x and
same type as dtype.

Raises:
TypeError: If x cannot be cast to the dtype.
"""
base_type = dtypes.as_dtype(dtype).base_dtype
if isinstance(x,
(ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
return x
with ops.name_scope(name, "Cast", [x]) as name:
if isinstance(x, sparse_tensor.SparseTensor):
values_cast = cast(x.values, base_type, name=name)
x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
elif isinstance(x, ops.IndexedSlices):
values_cast = cast(x.values, base_type, name=name)
x = ops.IndexedSlices(values_cast, x.indices, x.dense_shape)
else:
# TODO(josh11b): If x is not already a Tensor, we could return
# ops.convert_to_tensor(x, dtype=dtype, ...)  here, but that
# allows some conversions that cast() can't do, e.g. casting numbers to
# strings.
x = ops.convert_to_tensor(x, name="x")
if x.dtype.base_dtype != base_type:
x = gen_math_ops.cast(x, base_type, name=name)
if x.dtype.is_complex and base_type.is_floating:
logging.warn("Casting complex to real discards imaginary part.")
return x

[文档]@tf_export("dtypes.saturate_cast", "saturate_cast")
def saturate_cast(value, dtype, name=None):
"""Performs a safe saturating cast of value to dtype.

This function casts the input to dtype without applying any scaling.  If
there is a danger that values would over or underflow in the cast, this op
applies the appropriate clamping before the cast.

Args:
value: A Tensor.
dtype: The desired output DType.
name: A name for the operation (optional).

Returns:
value safely cast to dtype.
"""
# When casting to a type with smaller representable range, clamp.
# Note that this covers casting to unsigned types as well.
with ops.name_scope(name, "saturate_cast", [value]) as name:
value = ops.convert_to_tensor(value, name="value")
dtype = dtypes.as_dtype(dtype).base_dtype
if value.dtype.min < dtype.min:
value = gen_math_ops.maximum(
value,
ops.convert_to_tensor(dtype.min, dtype=value.dtype, name="min"))
if value.dtype.max > dtype.max:
value = gen_math_ops.minimum(
value,
ops.convert_to_tensor(dtype.max, dtype=value.dtype, name="max"))
return cast(value, dtype, name=name)

@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_float"])
def to_float(x, name="ToFloat"):
"""Casts a tensor to type float32.

Args:
x: A Tensor or SparseTensor or IndexedSlices.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x with
type float32.

Raises:
TypeError: If x cannot be cast to the float32.
"""
return cast(x, dtypes.float32, name=name)

@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_double"])
def to_double(x, name="ToDouble"):
"""Casts a tensor to type float64.

Args:
x: A Tensor or SparseTensor or IndexedSlices.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x with
type float64.

Raises:
TypeError: If x cannot be cast to the float64.
"""
return cast(x, dtypes.float64, name=name)

@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_int32"])
def to_int32(x, name="ToInt32"):
"""Casts a tensor to type int32.

Args:
x: A Tensor or SparseTensor or IndexedSlices.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x with
type int32.

Raises:
TypeError: If x cannot be cast to the int32.
"""
return cast(x, dtypes.int32, name=name)

@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_int64"])
def to_int64(x, name="ToInt64"):
"""Casts a tensor to type int64.

Args:
x: A Tensor or SparseTensor or IndexedSlices.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x with
type int64.

Raises:
TypeError: If x cannot be cast to the int64.
"""
return cast(x, dtypes.int64, name=name)

@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_bfloat16"])
def to_bfloat16(x, name="ToBFloat16"):
"""Casts a tensor to type bfloat16.

Args:
x: A Tensor or SparseTensor or IndexedSlices.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x with
type bfloat16.

Raises:
TypeError: If x cannot be cast to the bfloat16.
"""
return cast(x, dtypes.bfloat16, name=name)

@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_complex64"])
def to_complex64(x, name="ToComplex64"):
"""Casts a tensor to type complex64.

Args:
x: A Tensor or SparseTensor or IndexedSlices.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x with
type complex64.

Raises:
TypeError: If x cannot be cast to the complex64.
"""
return cast(x, dtypes.complex64, name=name)

@deprecation.deprecated(date=None, instructions="Use tf.cast instead.")
@tf_export(v1=["to_complex128"])
def to_complex128(x, name="ToComplex128"):
"""Casts a tensor to type complex128.

Args:
x: A Tensor or SparseTensor or IndexedSlices.
name: A name for the operation (optional).

Returns:
A Tensor or SparseTensor or IndexedSlices with same shape as x with
type complex128.

Raises:
TypeError: If x cannot be cast to the complex128.
"""
return cast(x, dtypes.complex128, name=name)

ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
ops.Tensor._override_operator("__abs__", abs)
# __invert__ corresponds to the ~ operator.  Here we follow the numpy convention
# ~ marks an elementwise bit-wise inverse.  This is only implemented for boolean
# tensors and will throw a TypeError if used on nonboolean arrays
ops.Tensor._override_operator("__invert__", gen_math_ops.logical_not)

def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
"""Register operators with different tensor and scalar versions.

If clazz_object is SparseTensor, assumes func takes (sp_indices,
sp_values, sp_shape, dense) and outputs (new_sp_values).

Args:
func: the operator
op_name: name of the operator being overridden
clazz_object: class to override for.  Either Tensor or SparseTensor.
"""

def binary_op_wrapper(x, y):
with ops.name_scope(None, op_name, [x, y]) as name:
if isinstance(x, ops.Tensor) and isinstance(y, ops.Tensor):
return func(x, y, name=name)
elif not isinstance(y, sparse_tensor.SparseTensor):
try:
y = ops.convert_to_tensor_v2(
y, dtype_hint=x.dtype.base_dtype, name="y")
except TypeError:
# If the RHS is not a tensor, it might be a tensor aware object
# that can implement the operator with knowledge of itself
# and the tensor.
if hasattr(type(y), "__r%s__" % op_name):
return NotImplemented
else:
raise
return func(x, y, name=name)

def binary_op_wrapper_sparse(sp_x, y):
with ops.name_scope(None, op_name, [sp_x, y]) as name:
y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
return sparse_tensor.SparseTensor(
sp_x.indices,
func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
sp_x.dense_shape)

def r_binary_op_wrapper(y, x):
with ops.name_scope(None, op_name, [x, y]) as name:
x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
return func(x, y, name=name)

# Propagate func.__doc__ to the wrappers
try:
doc = func.__doc__
except AttributeError:
doc = None
binary_op_wrapper.__doc__ = doc
r_binary_op_wrapper.__doc__ = doc
binary_op_wrapper_sparse.__doc__ = doc

if clazz_object is ops.Tensor:
clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
del binary_op_wrapper
clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
del r_binary_op_wrapper
else:
clazz_object._override_operator("__%s__" % op_name,
binary_op_wrapper_sparse)
del binary_op_wrapper_sparse

# Conversion table for __truediv__.  None entries mean no conversion required.
_TRUEDIV_TABLE = {
dtypes.uint8: dtypes.float32,
dtypes.int8: dtypes.float32,
dtypes.uint16: dtypes.float32,
dtypes.int16: dtypes.float32,
dtypes.int32: dtypes.float64,
dtypes.int64: dtypes.float64,
dtypes.bfloat16: None,
dtypes.float16: None,
dtypes.float32: None,
dtypes.float64: None,
dtypes.complex64: None,
dtypes.complex128: None,
}

# NOTE: the support of "sparse (true)div dense" is currently not baked in into
# "tf.(true_)div()".  Until such an API decision is made, the supported usage is
# to explicitly use the "/" operator to invoke either truediv or div.
def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
"""Internal helper function for 'sp_t / dense_t'."""
with ops.name_scope(name, "truediv",
[sp_indices, sp_values, sp_shape, y]) as name:
sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
y = ops.convert_to_tensor(y, name="y")
x_dtype = sp_values.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
sp_values = cast(sp_values, dtype)
y = cast(y, dtype)
return gen_sparse_ops.sparse_dense_cwise_div(
sp_indices, sp_values, sp_shape, y, name=name)

def _truediv_python3(x, y, name=None):
with ops.name_scope(name, "truediv", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y")
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
try:
dtype = _TRUEDIV_TABLE[x_dtype]
except KeyError:
raise TypeError("Invalid dtype %r in __truediv__" % x_dtype)
if dtype is not None:
x = cast(x, dtype)
y = cast(y, dtype)
return gen_math_ops.real_div(x, y, name=name)

def _div_python2(x, y, name=None):
"""Divide two values using Python 2 semantics.

Used for Tensor.__div__.

Args:
x: Tensor numerator of real numeric type.
y: Tensor denominator of real numeric type.
name: A name for the operation (optional).

Returns:
x / y returns the quotient of x and y.
"""

with ops.name_scope(name, "div", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
if x_dtype.is_floating or x_dtype.is_complex:
return gen_math_ops.real_div(x, y, name=name)
else:
return gen_math_ops.floor_div(x, y, name=name)

[文档]@tf_export("math.truediv", "truediv")
def truediv(x, y, name=None):
"""Divides x / y elementwise (using Python 3 division operator semantics).

NOTE: Prefer using the Tensor operator or tf.divide which obey Python
division operator semantics.

This function forces Python 3 division operator semantics where all integer
arguments are cast to floating types first.   This op is generated by normal
x / y division in Python 3 and in Python 2.7 with
from __future__ import division.  If you want integer division that rounds
down, use x // y or tf.math.floordiv.

x and y must have the same numeric type.  If the inputs are floating
point, the output will have the same type.  If the inputs are integral, the
inputs are cast to float32 for int8 and int16 and float64 for int32
and int64 (matching the behavior of Numpy).

Args:
x: Tensor numerator of numeric type.
y: Tensor denominator of numeric type.
name: A name for the operation (optional).

Returns:
x / y evaluated in floating point.

Raises:
TypeError: If x and y have different dtypes.
"""
return _truediv_python3(x, y, name)

@deprecation.deprecated(
date=None,
instructions="Deprecated in favor of operator or tf.math.divide.")
@tf_export(v1=["div"])
def div(x, y, name=None):
"""Divides x / y elementwise (using Python 2 division operator semantics).

NOTE: Prefer using the Tensor division operator or tf.divide which obey Python
3 division operator semantics.

This function divides x and y, forcing Python 2 semantics. That is, if x
and y are both integers then the result will be an integer. This is in
contrast to Python 3, where division with / is always a float while division
with // is always an integer.

Args:
x: Tensor numerator of real numeric type.
y: Tensor denominator of real numeric type.
name: A name for the operation (optional).

Returns:
x / y returns the quotient of x and y.
"""
return _div_python2(x, y, name)

@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
@deprecation.deprecated_endpoints("div_no_nan")
def div_no_nan(x, y, name=None):
"""Computes a safe divide which returns 0 if the y is zero.

Args:
x: A Tensor. Must be one of the following types: float32, float64.
y: A Tensor whose dtype is compatible with x.
name: A name for the operation (optional).

Returns:
The element-wise value of the x divided by y.
"""

with ops.name_scope(name, "div_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
return gen_math_ops.div_no_nan(x, y, name=name)

@tf_export("math.multiply_no_nan")
def multiply_no_nan(x, y, name=None):
"""Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.

Args:
x: A Tensor. Must be one of the following types: float32, float64.
y: A Tensor whose dtype is compatible with x.
name: A name for the operation (optional).

Returns:
The element-wise value of the x times y.
"""

with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
x = ops.convert_to_tensor(x, name="x")
y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
x_dtype = x.dtype.base_dtype
y_dtype = y.dtype.base_dtype
if x_dtype != y_dtype:
raise TypeError("x and y must have the same dtype, got %r != %r" %
(x_dtype, y_dtype))
return gen_math_ops.mul_no_nan(x, y, name=name)

# TODO(aselle): This should be removed
mod = gen_math_ops.floor_mod

# TODO(aselle): Deprecate this once all internal functionality uses
# tf.truncatediv
@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
@deprecation.deprecated_endpoints("floordiv")
def floordiv(x, y, name=None):
"""Divides x / y elementwise, rounding toward the most negative integer.

The same as tf.compat.v1.div(x,y) for integers, but uses
tf.floor(tf.compat.v1.div(x,y)) for
floating point arguments so that the result is always an integer (though
possibly an integer represented as floating point).  This op is generated by
x // y floor division in Python 3 and in Python 2.7 with
from __future__ import division.

x and y must have the same type, and the result will have the same type
as well.

Args:
x: Tensor numerator of real numeric type.
y: Tensor denominator of real numeric type.
name: A name for the operation (optional).

Returns:
x / y rounded down.

Raises:
TypeError: If the inputs are complex.
"""
with ops.name_scope(name, "floordiv", [x, y]) as name:
return gen_math_ops.floor_div(x, y, name=name)

realdiv = gen_math_ops.real_div
truncatediv = gen_math_ops.truncate_div
# TODO(aselle): Rename this to floordiv when we can.
floor_div = gen_math_ops.floor_div
truncatemod = gen_math_ops.truncate_mod
floormod = gen_math_ops.floor_mod

if x.dtype == dtypes.string:
else:

def _mul_dispatch(x, y, name=None):
"""Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
is_tensor_y = isinstance(y, ops.Tensor)
if is_tensor_y:
return gen_math_ops.mul(x, y, name=name)
else:
assert isinstance(y, sparse_tensor.SparseTensor)  # Case: Dense * Sparse.
new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
y.dense_shape, x, name)
return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)

# NOTE(aselle): When integer division is added for sparse_dense_cwise,
# div, truediv, and floordiv should be delegated appropriately for
# Python semantics, analogous to dense cwise tensor operations.
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
sparse_tensor.SparseTensor)
_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
sparse_tensor.SparseTensor)

_OverrideBinaryOperatorHelper(gen_math_ops.sub, "sub")
_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
_OverrideBinaryOperatorHelper(_div_python2, "div")
_OverrideBinaryOperatorHelper(_truediv_python3, "truediv")
_OverrideBinaryOperatorHelper(floordiv, "floordiv")
_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
_OverrideBinaryOperatorHelper(pow, "pow")

@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
@deprecation.deprecated_endpoints("logical_xor")
def logical_xor(x, y, name="LogicalXor"):
"""Logical XOR function.

x ^ y = (x | y) & ~(x & y)

The operation works for the following input types:

- Two single elements of type bool
- One tf.Tensor of type bool and one single bool, where the result will
be calculated by applying logical XOR with the single element to each
element in the larger Tensor.
- Two tf.Tensor objects of type bool of the same shape. In this case,
the result will be the element-wise logical XOR of the two input tensors.

Usage:

>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_xor(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>

>>> c = tf.constant([True])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_xor(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False,  True])>

>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_xor(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False,  True,  True, False])>

Args:
x: A tf.Tensor type bool.
y: A tf.Tensor of type bool.
name: A name for the operation (optional).

Returns:
A tf.Tensor of type bool with the same size as that of x or y.
"""
# TODO(alemi) Make this a cwise op if people end up relying on it.
return gen_math_ops.logical_and(
gen_math_ops.logical_or(x, y),
gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
name=name)

[文档]@tf_export("math.logical_and", "logical_and")
def logical_and(x, y, name=None):
"""Logical AND function.

The operation works for the following input types:

- Two single elements of type bool
- One tf.Tensor of type bool and one single bool, where the result will
be calculated by applying logical AND with the single element to each
element in the larger Tensor.
- Two tf.Tensor objects of type bool of the same shape. In this case,
the result will be the element-wise logical AND of the two input tensors.

Usage:

>>> a = tf.constant([True])
>>> b = tf.constant([False])
>>> tf.math.logical_and(a, b)
<tf.Tensor: shape=(1,), dtype=bool, numpy=array([False])>

>>> c = tf.constant([True])
>>> x = tf.constant([False, True, True, False])
>>> tf.math.logical_and(c, x)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False,  True,  True, False])>

>>> y = tf.constant([False, False, True, True])
>>> z = tf.constant([False, True, False, True])
>>> tf.math.logical_and(y, z)
<tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, False, False,  True])>

Args:
x: A tf.Tensor type bool.
y: A tf.Tensor of type bool.
name: A name for the operation (optional).

Returns:
A tf.Tensor of type bool with the same size as that of x or y.
"""
return gen_math_ops.logical_and(x, y, name)

_OverrideBinaryOperatorHelper(gen_math_ops.logical_and, "and")
_OverrideBinaryOperatorHelper(gen_math_ops.logical_or, "or")
_OverrideBinaryOperatorHelper(logical_xor, "xor")

ops.Tensor._override_operator("__lt__", gen_math_ops.less)
ops.Tensor._override_operator("__le__", gen_math_ops.less_equal)
ops.Tensor._override_operator("__gt__", gen_math_ops.greater)
ops.Tensor._override_operator("__ge__", gen_math_ops.greater_equal)

[文档]@tf_export("math.equal", "equal")
def equal(x, y, name=None):
"""Returns the truth value of (x == y) element-wise.

arguments and then an element-wise equality comparison, returning a Tensor of
boolean values.

For example:

>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  False])>

>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True,  True])>

Args:
x: A tf.Tensor or tf.SparseTensor or tf.IndexedSlices.
y: A tf.Tensor or tf.SparseTensor or tf.IndexedSlices.
name: A name for the operation (optional).

Returns:
A tf.Tensor of type bool with the same size as that of x or y.

Raises:
tf.errors.InvalidArgumentError: If shapes of arguments are incompatible
"""
return gen_math_ops.equal(x, y, name=name)

[文档]@tf_export("math.not_equal", "not_equal")
def not_equal(x, y, name=None):
"""Returns the truth value of (x != y) element-wise.

arguments and then an element-wise inequality comparison, returning a Tensor
of boolean values.

For example:

>>> x = tf.constant([2, 4])
>>> y = tf.constant(2)
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  True])>

>>> x = tf.constant([2, 4])
>>> y = tf.constant([2, 4])
>>> tf.math.not_equal(x, y)
<tf.Tensor: shape=(2,), dtype=bool, numpy=array([False,  False])>

Args:
x: A tf.Tensor or tf.SparseTensor or tf.IndexedSlices.
y: A tf.Tensor or tf.SparseTensor or tf.IndexedSlices.
name: A name for the operation (optional).

Returns:
A tf.Tensor of type bool with the same size as that of x or y.

Raises:
tf.errors.InvalidArgumentError: If shapes of arguments are incompatible
"""
return gen_math_ops.not_equal(x, y, name=name)

def tensor_equals(self, other):
"""Compares two tensors element-wise for equality."""
if other is None:
return False
g = getattr(self, "graph", None)
if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
(g is None or g.building_function)):
return gen_math_ops.equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is other

def tensor_not_equals(self, other):
"""Compares two tensors element-wise for equality."""
if other is None:
return True
if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
else:
# In legacy graph mode, tensor equality is object equality
return self is not other

ops.Tensor._override_operator("__eq__", tensor_equals)
ops.Tensor._override_operator("__ne__", tensor_not_equals)

[文档]@tf_export("range")
def range(start, limit=None, delta=1, dtype=None, name="range"):  # pylint: disable=redefined-builtin
"""Creates a sequence of numbers.

Creates a sequence of numbers that begins at start and extends by
increments of delta up to but not including limit.

The dtype of the resulting tensor is inferred from the inputs unless
it is provided explicitly.

Like the Python builtin range, start defaults to 0, so that
range(n) = range(0, n).

For example:

>>> start = 3
>>> limit = 18
>>> delta = 3
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([ 3,  6,  9, 12, 15], dtype=int32)>

>>> start = 3
>>> limit = 1
>>> delta = -0.5
>>> tf.range(start, limit, delta)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>

>>> limit = 5
>>> tf.range(limit)
<tf.Tensor: shape=(5,), dtype=int32,
numpy=array([0, 1, 2, 3, 4], dtype=int32)>

Args:
start: A 0-D Tensor (scalar). Acts as first entry in the range if limit
is not None; otherwise, acts as range limit and first entry defaults to 0.
limit: A 0-D Tensor (scalar). Upper limit of sequence, exclusive. If None,
defaults to the value of start while the first entry of the range
defaults to 0.
delta: A 0-D Tensor (scalar). Number that increments start. Defaults to
1.
dtype: The type of the elements of the resulting tensor.
name: A name for the operation. Defaults to "range".

Returns:
An 1-D Tensor of type dtype.

@compatibility(numpy)
Equivalent to np.arange
@end_compatibility
"""
if limit is None:
start, limit = 0, start

with ops.name_scope(name, "Range", [start, limit, delta]) as name:
if not isinstance(start, ops.Tensor):
start = ops.convert_to_tensor(start, dtype=dtype, name="start")
if not isinstance(limit, ops.Tensor):
limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
if not isinstance(delta, ops.Tensor):
delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")

# infer dtype if not explicitly provided
if dtype is None:
dtype_hierarchy = [
dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
]
assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
key=dtype_hierarchy.index)
else:
inferred_dtype = dtype
# Always try perform a cast even start/limit/delta are already tensors.
# This will revole the case where start/limit/delta's original's dtype
# is different from provided dtype.
start = cast(start, inferred_dtype)
limit = cast(limit, inferred_dtype)
delta = cast(delta, inferred_dtype)

return gen_math_ops._range(start, limit, delta, name=name)

def _range_tensor_conversion_function(value, dtype=None, name=None,
as_ref=False):
del as_ref
return range(value.start, value.stop, value.step, dtype=dtype, name=name)

if not six.PY2:
ops.register_tensor_conversion_function(builtins.range,
_range_tensor_conversion_function)

# Reduction operations
def _ReductionDims(x, axis, reduction_indices=None):  # pylint: disable=invalid-name
"""Returns range(0, rank(x)) if reduction_indices is None."""
# TODO(aselle): Remove this after deprecation
if reduction_indices is not None:
if axis is not None:
raise ValueError("Can't specify both axis' and 'reduction_indices'.")
axis = reduction_indices
if axis is not None:
return axis
else:
# Fast path: avoid creating Rank and Range ops if ndims is known.
if isinstance(x, ops.Tensor):
rank = x.shape.rank
if rank is not None:
return constant_op.constant(np.arange(rank, dtype=np.int32))
elif (isinstance(x, sparse_tensor.SparseTensor) and
x.dense_shape.shape.is_fully_defined()):
rank = x.dense_shape.shape.dims[0].value  # sparse.dense_shape is 1-D.
return constant_op.constant(np.arange(rank, dtype=np.int32))

# Otherwise, we rely on Range and Rank to do the right thing at run-time.
return range(0, array_ops.rank(x))

def _has_fully_defined_shape(tensor):
"""Returns true if tensor has a fully defined shape."""
return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()

def _may_reduce_to_scalar(keepdims, axis, output):
"""Set a reduction's output shape to be a scalar if we are certain."""
if not _has_fully_defined_shape(output) and (not keepdims) and (
axis is None):
output.set_shape(())
return output

@tf_export(v1=["math.reduce_sum", "reduce_sum"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_sum_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the sum of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x)  # 6
tf.reduce_sum(x, 0)  # [2, 2, 2]
tf.reduce_sum(x, 1)  # [3, 3]
tf.reduce_sum(x, 1, keepdims=True)  # [[3], [3]]
tf.reduce_sum(x, [0, 1])  # 6


Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor, of the same dtype as the input_tensor.

@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_sum(input_tensor, axis, keepdims, name)

[文档]@tf_export("math.reduce_sum", "reduce_sum", v1=[])
def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the sum of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[1, 1, 1], [1, 1, 1]])
tf.reduce_sum(x)  # 6
tf.reduce_sum(x, 0)  # [2, 2, 2]
tf.reduce_sum(x, 1)  # [3, 3]
tf.reduce_sum(x, 1, keepdims=True)  # [[3], [3]]
tf.reduce_sum(x, [0, 1])  # 6


Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor, of the same dtype as the input_tensor.

@compatibility(numpy)
Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
int64 while tensorflow returns the same dtype as the input.
@end_compatibility
"""

return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))

def reduce_sum_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._sum(input_tensor, dims, keepdims, name=name))

@tf_export("math.reduce_euclidean_norm")
def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the Euclidean norm of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
tf.math.reduce_euclidean_norm(x)  # returns 4 as dtype is tf.int32
y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
tf.math.reduce_euclidean_norm(y)  # returns 4.1231055 which is sqrt(17)
tf.math.reduce_euclidean_norm(y, 0)  # [sqrt(2), sqrt(5), sqrt(10)]
tf.math.reduce_euclidean_norm(y, 1)  # [sqrt(14), sqrt(3)]
tf.math.reduce_euclidean_norm(y, 1, keepdims=True)  # [[sqrt(14)], [sqrt(3)]]
tf.math.reduce_euclidean_norm(y, [0, 1])  # sqrt(17)


Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor, of the same dtype as the input_tensor.
"""
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.euclidean_norm(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))

@tf_export(v1=["math.count_nonzero", "count_nonzero"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
@deprecation.deprecated_args(
None, "reduction_indices is deprecated, use axis instead",
"reduction_indices")
def count_nonzero(input_tensor=None,
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None,
reduction_indices=None,
keep_dims=None,
input=None):  # pylint: disable=redefined-builtin
"""Computes number of nonzero elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis has no entries, all dimensions are reduced, and a
tensor with a single element is returned.

**NOTE** Floating point comparison to zero is done by exact floating point
equality check.  Small values are **not** rounded to zero for purposes of
the nonzero check.

For example:

python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x)  # 3
tf.math.count_nonzero(x, 0)  # [1, 2, 0]
tf.math.count_nonzero(x, 1)  # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
tf.math.count_nonzero(x, [0, 1])  # 3


**NOTE** Strings are compared against zero-length empty string "". Any
string with a size greater than zero is already considered as nonzero.

For example:
python
x = tf.constant(["", "a", "  ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.


Args:
input_tensor: The tensor to reduce. Should be of numeric type, bool, or
string.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to tf.int64.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.
input: Overrides input_tensor. For compatibility.

Returns:
The reduced tensor (number of nonzero values).
"""
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
input_tensor = deprecation.deprecated_argument_lookup("input", input,
"input_tensor",
input_tensor)
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)

return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)

@tf_export("math.count_nonzero", v1=[])
def count_nonzero_v2(
input,  # pylint: disable=redefined-builtin
axis=None,
keepdims=None,
dtype=dtypes.int64,
name=None):
"""Computes number of nonzero elements across dimensions of a tensor.

Reduces input along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis has no entries, all dimensions are reduced, and a
tensor with a single element is returned.

**NOTE** Floating point comparison to zero is done by exact floating point
equality check.  Small values are **not** rounded to zero for purposes of
the nonzero check.

For example:

python
x = tf.constant([[0, 1, 0], [1, 1, 0]])
tf.math.count_nonzero(x)  # 3
tf.math.count_nonzero(x, 0)  # [1, 2, 0]
tf.math.count_nonzero(x, 1)  # [1, 2]
tf.math.count_nonzero(x, 1, keepdims=True)  # [[1], [2]]
tf.math.count_nonzero(x, [0, 1])  # 3


**NOTE** Strings are compared against zero-length empty string "". Any
string with a size greater than zero is already considered as nonzero.

For example:
python
x = tf.constant(["", "a", "  ", "b", ""])
tf.math.count_nonzero(x) # 3, with "a", "  ", and "b" as nonzero strings.


Args:
input: The tensor to reduce. Should be of numeric type, bool, or string.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input), rank(input)).
keepdims: If true, retains reduced dimensions with length 1.
dtype: The output dtype; defaults to tf.int64.
name: A name for the operation (optional).

Returns:
The reduced tensor (number of nonzero values).
"""
if keepdims is None:
keepdims = False
with ops.name_scope(name, "count_nonzero", [input]):
input = ops.convert_to_tensor(input, name="input")
# A scalar of 'zero' is enough as not_equal will broadcast.
zero = array_ops.zeros([], dtype=input.dtype)
return cast(
reduce_sum(
# int64 reduction happens on GPU
cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
axis=axis,
keepdims=keepdims),
dtype=dtype)

@tf_export(v1=["math.reduce_mean", "reduce_mean"])
def reduce_mean_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the mean of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis by computing the
mean of elements across the dimensions in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a tensor with a single
element is returned.

For example:

>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>

Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.mean

Please note that np.mean has a dtype parameter that could be used to
specify the output type. By default this is dtype=float64. On the other
hand, tf.reduce_mean has an aggressive type inference from input_tensor,
for example:

>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>

@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_mean(input_tensor, axis, keepdims, name)

[文档]@tf_export("math.reduce_mean", "reduce_mean", v1=[])
def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the mean of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis by computing the
mean of elements across the dimensions in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions are retained
with length 1.

If axis is None, all dimensions are reduced, and a tensor with a single
element is returned.

For example:

>>> x = tf.constant([[1., 1.], [2., 2.]])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=float32, numpy=1.5>
>>> tf.reduce_mean(x, 0)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
>>> tf.reduce_mean(x, 1)
<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>

Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.mean

Please note that np.mean has a dtype parameter that could be used to
specify the output type. By default this is dtype=float64. On the other
hand, tf.reduce_mean has an aggressive type inference from input_tensor,
for example:

>>> x = tf.constant([1, 0, 1, 0])
>>> tf.reduce_mean(x)
<tf.Tensor: shape=(), dtype=int32, numpy=0>
>>> y = tf.constant([1., 0., 1., 0.])
>>> tf.reduce_mean(y)
<tf.Tensor: shape=(), dtype=float32, numpy=0.5>

@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.mean(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))

@tf_export("math.reduce_variance")
def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the variance of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[1., 2.], [3., 4.]])
tf.reduce_variance(x)  # 1.25
tf.reduce_variance(x, 0)  # [1., 1.]
tf.reduce_variance(x, 1)  # [0.25,  0.25]


Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).

Returns:
The reduced tensor, of the same dtype as the input_tensor.

@compatibility(numpy)
Equivalent to np.var

Please note that np.var has a dtype parameter that could be used to
specify the output type. By default this is dtype=float64. On the other
hand, tf.reduce_variance has an aggressive type inference from
input_tensor,
@end_compatibility
"""
name = name if name else "reduce_variance"
with ops.name_scope(name):
means = reduce_mean(input_tensor, axis=axis, keepdims=True)
squared_deviations = gen_math_ops.square(input_tensor - means)
return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)

@tf_export("math.reduce_std")
def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the standard deviation of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[1., 2.], [3., 4.]])
tf.reduce_std(x)  # 1.1180339887498949
tf.reduce_std(x, 0)  # [1., 1.]
tf.reduce_std(x, 1)  # [0.5,  0.5]


Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name scope for the associated operations (optional).

Returns:
The reduced tensor, of the same dtype as the input_tensor.

@compatibility(numpy)
Equivalent to np.std

Please note that np.std has a dtype parameter that could be used to
specify the output type. By default this is dtype=float64. On the other
hand, tf.reduce_std has an aggressive type inference from input_tensor,
@end_compatibility
"""
name = name if name else "reduce_std"
with ops.name_scope(name):
variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
return gen_math_ops.sqrt(variance)

[文档]@tf_export("math.reduce_prod", "reduce_prod", v1=[])
def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the product of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops.prod(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))

@tf_export(v1=["math.reduce_prod", "reduce_prod"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_prod_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the product of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.prod
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_prod(input_tensor, axis, keepdims, name)

@tf_export(v1=["math.reduce_min", "reduce_min"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_min_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the minimum of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_min(input_tensor, axis, keepdims, name)

[文档]@tf_export("math.reduce_min", "reduce_min", v1=[])
def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the minimum of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor.

For example:
>>> a = tf.constant([[1, 2], [3, 4]])
>>> tf.reduce_min(a)
<tf.Tensor: shape=(), dtype=int32, numpy=1>

@compatibility(numpy)
Equivalent to np.min
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._min(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))

@tf_export(v1=["math.reduce_max", "reduce_max"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_max_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the maximum of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.max
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_max(input_tensor, axis, keepdims, name)

[文档]@tf_export("math.reduce_max", "reduce_max", v1=[])
def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the maximum of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

Usage example:

>>> x = tf.constant([5, 1, 2, 4])
>>> print(tf.reduce_max(x))
tf.Tensor(5, shape=(), dtype=int32)
>>> x = tf.constant([-5, -1, -2, -4])
>>> print(tf.reduce_max(x))
tf.Tensor(-1, shape=(), dtype=int32)
>>> x = tf.constant([4, float('nan')])
>>> print(tf.reduce_max(x))
tf.Tensor(4.0, shape=(), dtype=float32)
>>> x = tf.constant([float('nan'), float('nan')])
>>> print(tf.reduce_max(x))
tf.Tensor(-inf, shape=(), dtype=float32)
>>> x = tf.constant([float('-inf'), float('inf')])
>>> print(tf.reduce_max(x))
tf.Tensor(inf, shape=(), dtype=float32)

See the numpy docs for np.amax and np.nanmax behavior.

Args:
input_tensor: The tensor to reduce. Should have real numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor.
"""
return reduce_max_with_dims(input_tensor, axis, keepdims, name,
_ReductionDims(input_tensor, axis))

def reduce_max_with_dims(input_tensor,
axis=None,
keepdims=False,
name=None,
dims=None):
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._max(input_tensor, dims, keepdims, name=name))

@tf_export(v1=["math.reduce_all", "reduce_all"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_all_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the "logical and" of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[True,  True], [False, False]])
tf.reduce_all(x)  # False
tf.reduce_all(x, 0)  # [False, False]
tf.reduce_all(x, 1)  # [True, False]


Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_all(input_tensor, axis, keepdims, name)

[文档]@tf_export("reduce_all", "math.reduce_all", v1=[])
def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the "logical and" of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[True,  True], [False, False]])
tf.reduce_all(x)  # False
tf.reduce_all(x, 0)  # [False, False]
tf.reduce_all(x, 1)  # [True, False]


Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.all
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._all(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))

@tf_export(v1=["math.reduce_any", "reduce_any"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_any_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes the "logical or" of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[True,  True], [False, False]])
tf.reduce_any(x)  # True
tf.reduce_any(x, 0)  # [True, True]
tf.reduce_any(x, 1)  # [True, False]


Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_any(input_tensor, axis, keepdims, name)

[文档]@tf_export("math.reduce_any", "reduce_any", v1=[])
def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
"""Computes the "logical or" of elements across dimensions of a tensor.

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis is None, all dimensions are reduced, and a
tensor with a single element is returned.

For example:

python
x = tf.constant([[True,  True], [False, False]])
tf.reduce_any(x)  # True
tf.reduce_any(x, 0)  # [True, True]
tf.reduce_any(x, 1)  # [True, False]


Args:
input_tensor: The boolean tensor to reduce.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor.

@compatibility(numpy)
Equivalent to np.any
@end_compatibility
"""
keepdims = False if keepdims is None else keepdims
return _may_reduce_to_scalar(
keepdims, axis,
gen_math_ops._any(
input_tensor, _ReductionDims(input_tensor, axis), keepdims,
name=name))

@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
@deprecation.deprecated_args(None,
"keep_dims is deprecated, use keepdims instead",
"keep_dims")
def reduce_logsumexp_v1(input_tensor,
axis=None,
keepdims=None,
name=None,
reduction_indices=None,
keep_dims=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis has no entries, all dimensions are reduced, and a
tensor with a single element is returned.

This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.

For example:

python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x)  # log(6)
tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1])  # log(6)


Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).
reduction_indices: The old (deprecated) name for axis.
keep_dims: Deprecated alias for keepdims.

Returns:
The reduced tensor.
"""
axis = deprecation.deprecated_argument_lookup("axis", axis,
"reduction_indices",
reduction_indices)
keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
"keep_dims", keep_dims)
return reduce_logsumexp(input_tensor, axis, keepdims, name)

[文档]@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
"""Computes log(sum(exp(elements across dimensions of a tensor))).

Reduces input_tensor along the dimensions given in axis.
Unless keepdims is true, the rank of the tensor is reduced by 1 for each
entry in axis. If keepdims is true, the reduced dimensions
are retained with length 1.

If axis has no entries, all dimensions are reduced, and a
tensor with a single element is returned.

This function is more numerically stable than log(sum(exp(input))). It avoids
overflows caused by taking the exp of large inputs and underflows caused by
taking the log of small inputs.

For example:

python
x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
tf.reduce_logsumexp(x)  # log(6)
tf.reduce_logsumexp(x, 0)  # [log(2), log(2), log(2)]
tf.reduce_logsumexp(x, 1)  # [log(3), log(3)]
tf.reduce_logsumexp(x, 1, keepdims=True)  # [[log(3)], [log(3)]]
tf.reduce_logsumexp(x, [0, 1])  # log(6)


Args:
input_tensor: The tensor to reduce. Should have numeric type.
axis: The dimensions to reduce. If None (the default), reduces all
dimensions. Must be in the range [-rank(input_tensor),
rank(input_tensor)).
keepdims: If true, retains reduced dimensions with length 1.
name: A name for the operation (optional).

Returns:
The reduced tensor.
"""
keepdims = False if keepdims is None else keepdims
input_tensor = ops.convert_to_tensor(input_tensor)
with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
reduce_dim = _ReductionDims(input_tensor, axis)
raw_max = reduce_max_with_dims(
input_tensor, axis=axis, keepdims=True, dims=reduce_dim)
gen_math_ops.select(
gen_math_ops.is_finite(raw_max), raw_max,
gen_array_ops.zeros_like(raw_max)))
result = gen_math_ops.log(
reduce_sum_with_dims(
gen_math_ops.exp(gen_math_ops.sub(input_tensor, my_max)),
axis=axis,
keepdims=keepdims,
dims=reduce_dim))
if not keepdims:
my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
return _may_reduce_to_scalar(keepdims, axis, result)

@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
@deprecation.deprecated_endpoints("trace")
def trace(x, name=None):
"""Compute the trace of a tensor x.

trace(x) returns the sum along the main diagonal of each inner-most matrix
in x. If x is of rank k with shape [I, J, K, ..., L, M, N], then output
is a tensor of rank k-2 with dimensions [I, J, K, ..., L] where

output[i, j, k, ..., l] = trace(x[i, j, i, ..., l, :, :])

For example:

python
x = tf.constant([[1, 2], [3, 4]])
tf.linalg.trace(x)  # 5

x = tf.constant([[1, 2, 3],
[4, 5, 6],
[7, 8, 9]])
tf.linalg.trace(x)  # 15

x = tf.constant([[[1, 2, 3],
[4, 5, 6],
[7, 8, 9]],
[[-1, -2, -3],
[-4, -5, -6],
[-7, -8, -9]]])
tf.linalg.trace(x)  # [15, -15]


Args:
x: tensor.
name: A name for the operation (optional).

Returns:
The trace of input tensor.
"""
with ops.name_scope(name, "Trace", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)

[文档]@tf_export("linalg.matmul", "matmul")
def matmul(a,
b,
transpose_a=False,
transpose_b=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix a by matrix b, producing a * b.

The inputs must, following any transpositions, be tensors of rank >= 2
where the inner 2 dimensions specify valid matrix multiplication dimensions,
and any further outer dimensions specify matching batch size.

Both matrices must be of the same type. The supported types are:
float16, float32, float64, int32, complex64, complex128.

Either matrix can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to True. These are False
by default.

If one or both of the matrices contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
a_is_sparse or b_is_sparse flag to True. These are False by default.
This optimization is only available for plain matrices (rank-2 tensors) with
datatypes bfloat16 or float32.

A simple 2-D tensor matrix multiplication:

>>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
>>> a  # 2-D tensor
<tf.Tensor: shape=(2, 3), dtype=int32, numpy=
array([[1, 2, 3],
[4, 5, 6]], dtype=int32)>
>>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
>>> b  # 2-D tensor
<tf.Tensor: shape=(3, 2), dtype=int32, numpy=
array([[ 7,  8],
[ 9, 10],
[11, 12]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c  # a * b
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 58,  64],
[139, 154]], dtype=int32)>

A batch matrix multiplication with batch shape [2]:

>>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
>>> a  # 3-D tensor
<tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
array([[[ 1,  2,  3],
[ 4,  5,  6]],
[[ 7,  8,  9],
[10, 11, 12]]], dtype=int32)>
>>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
>>> b  # 3-D tensor
<tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
array([[[13, 14],
[15, 16],
[17, 18]],
[[19, 20],
[21, 22],
[23, 24]]], dtype=int32)>
>>> c = tf.matmul(a, b)
>>> c  # a * b
<tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
array([[[ 94, 100],
[229, 244]],
[[508, 532],
[697, 730]]], dtype=int32)>

Since python >= 3.5 the @ operator is supported
(see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
it simply calls the tf.matmul() function, so the following lines are
equivalent:

>>> d = a @ b @ [[10], [11]]
>>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])

Args:
a: tf.Tensor of type float16, float32, float64, int32,
complex64, complex128 and rank > 1.
b: tf.Tensor with same type and rank as a.
transpose_a: If True, a is transposed before multiplication.
transpose_b: If True, b is transposed before multiplication.
adjoint_a: If True, a is conjugated and transposed before
multiplication.
adjoint_b: If True, b is conjugated and transposed before
multiplication.
a_is_sparse: If True, a is treated as a sparse matrix. Notice, this
**does not support tf.sparse.SparseTensor**, it just makes optimizations
that assume most values in a are zero.
See tf.sparse.sparse_dense_matmul
for some support for tf.SparseTensor multiplication.
b_is_sparse: If True, b is treated as a sparse matrix. Notice, this
**does not support tf.sparse.SparseTensor**, it just makes optimizations
that assume most values in a are zero.
See tf.sparse.sparse_dense_matmul
for some support for tf.SparseTensor multiplication.
name: Name for the operation (optional).

Returns:
A tf.Tensor of the same type as a and b where each inner-most matrix
is the product of the corresponding matrices in a and b, e.g. if all
transpose or adjoint attributes are False:

output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j]),
for all indices i, j.

Note: This is matrix product, not element-wise product.

Raises:
ValueError: If transpose_a and adjoint_a, or transpose_b and
adjoint_b are both set to True.
"""
with ops.name_scope(name, "MatMul", [a, b]) as name:
raise ValueError("Only one of transpose_a and adjoint_a can be True.")
raise ValueError("Only one of transpose_b and adjoint_b can be True.")

if context.executing_eagerly():
if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
a = ops.convert_to_tensor(a, name="a")
if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
b = ops.convert_to_tensor(b, name="b")
else:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")

# TODO(apassos) remove _shape_tuple here when it is not needed.
a_shape = a._shape_tuple()  # pylint: disable=protected-access
b_shape = b._shape_tuple()  # pylint: disable=protected-access

output_may_have_non_empty_batch_shape = (
(a_shape is None or len(a_shape) > 2) or
(b_shape is None or len(b_shape) > 2))

if (not a_is_sparse and
not b_is_sparse) and output_may_have_non_empty_batch_shape:
# BatchMatmul does not support transpose, so we conjugate the matrix and
if transpose_a:
a = conj(a)
if transpose_b:
b = conj(b)
return gen_math_ops.batch_mat_mul_v2(

# Neither matmul nor sparse_matmul support adjoint, so we conjugate
# the matrix and use transpose instead. Conj() is a noop for real
# matrices.
a = conj(a)
transpose_a = True
b = conj(b)
transpose_b = True

use_sparse_matmul = False
if a_is_sparse or b_is_sparse:
sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
use_sparse_matmul = (
a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
if ((a.dtype == dtypes.bfloat16 or b.dtype == dtypes.bfloat16) and
a.dtype != b.dtype):
# matmul currently doesn't handle mixed-precision inputs.
use_sparse_matmul = True
if use_sparse_matmul:
ret = sparse_matmul(
a,
b,
transpose_a=transpose_a,
transpose_b=transpose_b,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse,
name=name)
# sparse_matmul always returns float32, even with
# bfloat16 inputs. This prevents us from configuring bfloat16 training.
# casting to bfloat16 also matches non-sparse matmul behavior better.
if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
ret = cast(ret, dtypes.bfloat16)
return ret
else:
return gen_math_ops.mat_mul(
a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)

@tf_export("linalg.matvec")
def matvec(a,
b,
transpose_a=False,
a_is_sparse=False,
b_is_sparse=False,
name=None):
"""Multiplies matrix a by vector b, producing a * b.

The matrix a must, following any transpositions, be a tensor of rank >= 2,
with shape(a)[-1] == shape(b)[-1], and shape(a)[:-2] able to broadcast
with shape(b)[:-1].

Both a and b must be of the same type. The supported types are:
float16, float32, float64, int32, complex64, complex128.

Matrix a can be transposed or adjointed (conjugated and transposed) on
the fly by setting one of the corresponding flag to True. These are False
by default.

If one or both of the inputs contain a lot of zeros, a more efficient
multiplication algorithm can be used by setting the corresponding
a_is_sparse or b_is_sparse flag to True. These are False by default.
This optimization is only available for plain matrices/vectors (rank-2/1
tensors) with datatypes bfloat16 or float32.

For example:

python
# 2-D tensor a
# [[1, 2, 3],
#  [4, 5, 6]]
a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])

# 1-D tensor b
# [7, 9, 11]
b = tf.constant([7, 9, 11], shape=[3])

# a * b
# [ 58,  64]
c = tf.linalg.matvec(a, b)

# 3-D tensor a
# [[[ 1,  2,  3],
#   [ 4,  5,  6]],
#  [[ 7,  8,  9],
#   [10, 11, 12]]]
a = tf.constant(np.arange(1, 13, dtype=np.int32),
shape=[2, 2, 3])

# 2-D tensor b
# [[13, 14, 15],
#  [16, 17, 18]]
b = tf.constant(np.arange(13, 19, dtype=np.int32),
shape=[2, 3])

# a * b
# [[ 86, 212],
#  [410, 563]]
c = tf.linalg.matvec(a, b)


Args:
a: Tensor of type float16, float32, float64, int32, complex64,
complex128 and rank > 1.
b: Tensor with same type as a and compatible dimensions.
transpose_a: If True, a is transposed before multiplication.
adjoint_a: If True, a is conjugated and transposed before
multiplication.
a_is_sparse: If True, a is treated as a sparse matrix.
b_is_sparse: If True, b is treated as a sparse matrix.
name: Name for the operation (optional).

Returns:
A Tensor of the same type as a and b where each inner-most vector is
the product of the corresponding matrices in a and vectors in b, e.g. if
all transpose or adjoint attributes are False:

output[..., i] = sum_k (a[..., i, k] * b[..., k]), for all indices i.

Note: This is matrix-vector product, not element-wise product.

Raises:
ValueError: If transpose_a and adjoint_a are both set to True.
"""
with ops.name_scope(name, "MatVec", [a, b]) as name:
output = matmul(
a,
array_ops.expand_dims(b, axis=-1),
transpose_a=transpose_a,
a_is_sparse=a_is_sparse,
b_is_sparse=b_is_sparse)
return array_ops.squeeze(output, axis=-1)

_OverrideBinaryOperatorHelper(matmul, "matmul")

sparse_matmul = deprecation.deprecated(None, "Use tf.linalg.matmul instead")(
gen_math_ops.sparse_mat_mul)
tf_export(v1=["sparse_matmul"])(sparse_matmul)

@ops.RegisterStatistics("MatMul", "flops")
def _calc_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for MatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[0])
else:
k = int(a_shape[1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))

@ops.RegisterStatistics("BatchMatMul", "flops")
@ops.RegisterStatistics("BatchMatMulV2", "flops")
def _calc_batch_mat_mul_flops(graph, node):
"""Calculates the compute resources needed for BatchMatMul."""
transpose_a = node.attr["transpose_a"].b
a_shape = graph_util.tensor_shape_from_node_def_name(graph, node.input[0])
a_shape.assert_is_fully_defined()
if transpose_a:
k = int(a_shape[-2])
else:
k = int(a_shape[-1])
output_shape = graph_util.tensor_shape_from_node_def_name(graph, node.name)
output_shape.assert_is_fully_defined()
output_count = np.prod(output_shape.as_list())
return ops.OpStats("flops", (k * output_count * 2))

def _as_indexed_slices(x, optimize=True):
"""Convert 'x' to IndexedSlices.

Convert a dense Tensor to a block-sparse IndexedSlices.

Args:
x: Either a Tensor object, or an IndexedSlices object.
optimize: if true, attempt to optimize the conversion of 'x'.

Returns:
An IndexedSlices object.

Raises:
TypeError: If 'x' is not a Tensor or an IndexedSlices object.
"""
# TODO(touts): op_scope
if not isinstance(x, (ops.Tensor, ops.IndexedSlices)):
raise TypeError("Not a Tensor or IndexedSlices: %s" % type(x))
if isinstance(x, ops.IndexedSlices):
return x
x_shape = array_ops.shape_internal(x, optimize=optimize)
return ops.IndexedSlices(x, range(0, x_shape[0]), x_shape)

def _as_indexed_slices_list(inputs, optimize=True):
"""Convert all elements of 'inputs' to IndexedSlices.

Additionally, homogenize the types of all the indices to
either int32 or int64.

Args:
inputs: List containing either Tensor or IndexedSlices objects.
optimize: if true, attempt to optimize the conversion of each input.

Returns:
A list of IndexedSlices objects.

Raises:
TypeError: If 'inputs' is not a list or a tuple.
"""
if not isinstance(inputs, (list, tuple)):
raise TypeError("Expected a list or tuple, not a %s" % type(inputs))
outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
with_int32_index = [
o.indices for o in outputs if o.indices.dtype == dtypes.int32
]
if not with_int32_index or len(with_int32_index) == len(outputs):
return outputs
casted_outputs = []
for o in outputs:
if o.indices.dtype == dtypes.int32:
casted_outputs.append(
ops.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
o.dense_shape))
else:
casted_outputs.append(o)
return casted_outputs

tf.math.add_n performs the same operation as tf.math.accumulate_n, but it
waits for all of its inputs to be ready before beginning to sum.
This buffering can result in higher memory consumption when inputs are ready
at different times, since the minimum temporary storage required is
proportional to the input size rather than the output size.

its inputs. If you need broadcasting, use tf.math.add (or the + operator)

For example:

>>> a = tf.constant([[3, 5], [4, 8]])
>>> b = tf.constant([[1, 6], [2, 9]])
<tf.Tensor: shape=(2, 2), dtype=int32, numpy=
array([[ 7, 16],
[10, 25]], dtype=int32)>

Args:
inputs: A list of tf.Tensor or tf.IndexedSlices objects, each with the
same shape and type. tf.IndexedSlices objects will be converted into
name: A name for the operation (optional).

Returns:
A tf.Tensor of the same shape and type as the elements of inputs.

Raises:
ValueError: If inputs don't all have same shape and dtype or the shape
cannot be inferred.
"""
if not inputs or not isinstance(inputs, (list, tuple)):
raise ValueError("inputs must be a list of at least one "
"Tensor/IndexedSlices with the same dtype and shape")
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, (ops.Tensor, ops.IndexedSlices)) for x in inputs):
raise ValueError("inputs must be a list of at least one "
"Tensor/IndexedSlices with the same dtype and shape")

if len(inputs) == 1:
if isinstance(inputs[0], ops.IndexedSlices):
values = ops.convert_to_tensor(inputs[0])
else:
values = inputs[0]
if name:
return array_ops.identity(values, name=name)
return values

@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
@deprecation.deprecated_endpoints("accumulate_n")
def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
"""Returns the element-wise sum of a list of tensors.

Optionally, pass shape and tensor_dtype for shape and type checking,
otherwise, these are inferred.

accumulate_n performs the same operation as tf.math.add_n.

For example:

python
a = tf.constant([[1, 2], [3, 4]])
b = tf.constant([[5, 0], [0, 6]])
tf.math.accumulate_n([a, b, a])  # [[7, 4], [6, 14]]

# Explicitly pass shape and type
tf.math.accumulate_n([a, b, a], shape=[2, 2], tensor_dtype=tf.int32)
# [[7,  4],
#  [6, 14]]


Args:
inputs: A list of Tensor objects, each with same shape and type.
shape: Expected shape of elements of inputs (optional). Also controls the
output shape of this op, which may affect type inference in other ops. A
value of None means "infer the input shape from the shapes in inputs".
tensor_dtype: Expected data type of inputs (optional). A value of None
means "infer the input dtype from inputs[0]".
name: A name for the operation (optional).

Returns:
A Tensor of same shape and type as the elements of inputs.

Raises:
ValueError: If inputs don't all have same shape and dtype or the shape
cannot be inferred.
"""

def _input_error():
return ValueError("inputs must be a list of at least one Tensor with the "
"same dtype and shape")

if not inputs or not isinstance(inputs, (list, tuple)):
raise _input_error()
inputs = ops.convert_n_to_tensor_or_indexed_slices(inputs)
if not all(isinstance(x, ops.Tensor) for x in inputs):
raise _input_error()
if not all(x.dtype == inputs[0].dtype for x in inputs):
raise _input_error()
if shape is not None:
shape = tensor_shape.as_shape(shape)
else:
shape = tensor_shape.unknown_shape()
for input_tensor in inputs:
if isinstance(input_tensor, ops.Tensor):
shape = shape.merge_with(input_tensor.get_shape())

# tensor_dtype is for safety only; operator's output type computed in C++
if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
raise TypeError("tensor_dtype is {}, but input is of type {}".format(
tensor_dtype, inputs[0].dtype))

if len(inputs) == 1 and name is None:
return inputs[0]
elif len(inputs) == 1 and name is not None:
return array_ops.identity(inputs[0], name=name)

[文档]@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
def sigmoid(x, name=None):
r"""Computes sigmoid of x element-wise.

Formula for calculating sigmoid(x): y = 1 / (1 + exp(-x)).

For x \in (-inf, inf) => sigmoid(x) \in (0, 1)

Example Usage:

If a positive number is large, then its sigmoid will approach to 1 since the
formula will be y = <large_num> / (1 + <large_num>)

>>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32,
numpy=array([0.5      , 0.7310586, 1.       , 1.       ], dtype=float32)>

If a negative number is large, its sigmoid will approach to 0 since the
formula will be y = 1 / (1 + <large_num>)

>>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
>>> tf.math.sigmoid(x)
<tf.Tensor: shape=(4,), dtype=float32, numpy=
array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
dtype=float32)>

Args:
x: A Tensor with type float16, float32, float64, complex64, or
complex128.
name: A name for the operation (optional).

Returns:
A Tensor with the same type as x.

Usage Example:

>>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
>>> tf.sigmoid(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0. , 0.5, 1. ], dtype=float32)>

@compatibility(scipy)
Equivalent to scipy.special.expit
@end_compatibility
"""
with ops.name_scope(name, "Sigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.sigmoid(x, name=name)

@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
@deprecation.deprecated_endpoints("log_sigmoid")
def log_sigmoid(x, name=None):
"""Computes log sigmoid of x element-wise.

Specifically, y = log(1 / (1 + exp(-x))).  For numerical stability,
we use y = -tf.nn.softplus(-x).

Args:
x: A Tensor with type float32 or float64.
name: A name for the operation (optional).

Returns:
A Tensor with the same type as x.
"""
with ops.name_scope(name, "LogSigmoid", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name)

@tf_export("math.bincount", v1=[])
def bincount(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32,
name=None):
"""Counts the number of occurrences of each value in an integer array.

If minlength and maxlength are not given, returns a vector with length
tf.reduce_max(arr) + 1 if arr is non-empty, and length 0 otherwise.
If weights are non-None, then index i of the output stores the sum of the
value in weights at each index where the corresponding value in arr is
i.

python
values = tf.constant([1,1,2,3,2,4,4,5])
tf.math.bincount(values) #[0 2 2 1 2 1]

Vector length = Maximum element in vector values is 5. Adding 1, which is 6
will be the vector length.

Each bin value in the output indicates number of occurrences of the particular
index. Here, index 1 in output has a value 2. This indicates value 1 occurs
two times in values.

python
values = tf.constant([1,1,2,3,2,4,4,5])
weights = tf.constant([1,5,0,1,0,5,4,5])
tf.math.bincount(values, weights=weights) #[0 6 0 1 9 5]

Bin will be incremented by the corresponding weight instead of 1.
Here, index 1 in output has a value 6. This is the summation of weights
corresponding to the value in values.

Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
arr, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least minlength,
padding with zeros at the end if necessary.
maxlength: If given, skips values in arr that are equal or greater than
maxlength, ensuring that the output has length at most maxlength.
dtype: If weights is None, determines the type of the output bins.
name: A name scope for the associated operations (optional).

Returns:
A vector with the same dtype as weights or the given dtype. The bin
values.

Raises:
InvalidArgumentError if negative values are provided as an input.

"""
name = "bincount" if name is None else name
with ops.name_scope(name):
arr = ops.convert_to_tensor(arr, name="arr", dtype=dtypes.int32)
array_is_nonempty = reduce_prod(array_ops.shape(arr)) > 0
output_size = cast(array_is_nonempty, dtypes.int32) * (reduce_max(arr) + 1)
if minlength is not None:
minlength = ops.convert_to_tensor(
minlength, name="minlength", dtype=dtypes.int32)
output_size = gen_math_ops.maximum(minlength, output_size)
if maxlength is not None:
maxlength = ops.convert_to_tensor(
maxlength, name="maxlength", dtype=dtypes.int32)
output_size = gen_math_ops.minimum(maxlength, output_size)
if weights is not None:
weights = ops.convert_to_tensor(weights, name="weights")
return gen_math_ops.unsorted_segment_sum(weights, arr, output_size)
weights = constant_op.constant([], dtype)
return gen_math_ops.bincount(arr, output_size, weights)

@tf_export(v1=["math.bincount", "bincount"])
@deprecation.deprecated_endpoints("bincount")
def bincount_v1(arr,
weights=None,
minlength=None,
maxlength=None,
dtype=dtypes.int32):
"""Counts the number of occurrences of each value in an integer array.

If minlength and maxlength are not given, returns a vector with length
tf.reduce_max(arr) + 1 if arr is non-empty, and length 0 otherwise.
If weights are non-None, then index i of the output stores the sum of the
value in weights at each index where the corresponding value in arr is
i.

Args:
arr: An int32 tensor of non-negative values.
weights: If non-None, must be the same shape as arr. For each value in
arr, the bin will be incremented by the corresponding weight instead of
1.
minlength: If given, ensures the output has length at least minlength,
padding with zeros at the end if necessary.
maxlength: If given, skips values in arr that are equal or greater than
maxlength, ensuring that the output has length at most maxlength.
dtype: If weights is None, determines the type of the output bins.

Returns:
A vector with the same dtype as weights or the given dtype. The bin
values.
"""
return bincount(arr, weights, minlength, maxlength, dtype)

[文档]@tf_export("math.cumsum", "cumsum")
def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative sum of the tensor x along axis.

By default, this op performs an inclusive cumsum, which means that the first
element of the input is identical to the first element of the output:
For example:

>>> # tf.cumsum([a, b, c])   # [a, a + b, a + b + c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 2,  6, 12, 20], dtype=int32)>

>>> # using varying axis values
>>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
>>> tf.cumsum(y, axis=0)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2,  4,  6,  8],
[ 3,  7, 11, 15]], dtype=int32)>
>>> tf.cumsum(y, axis=1)
<tf.Tensor: shape=(2, 4), dtype=int32, numpy=
array([[ 2,  6, 12, 20],
[ 1,  4,  9, 16]], dtype=int32)>

By setting the exclusive kwarg to True, an exclusive cumsum is performed

>>> # tf.cumsum([a, b, c], exclusive=True)  => [0, a, a + b]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([ 0,  2,  6, 12], dtype=int32)>

By setting the reverse kwarg to True, the cumsum is performed in the
opposite direction:

>>> # tf.cumsum([a, b, c], reverse=True)  # [a + b + c, b + c, c]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([20, 18, 14,  8], dtype=int32)>

This is more efficient than using separate tf.reverse ops.
The reverse and exclusive kwargs can also be combined:

>>> # tf.cumsum([a, b, c], exclusive=True, reverse=True)  # [b + c, c, 0]
>>> x = tf.constant([2, 4, 6, 8])
>>> tf.cumsum(x, exclusive=True, reverse=True)
<tf.Tensor: shape=(4,), dtype=int32,
numpy=array([18, 14,  8,  0], dtype=int32)>

Args:
x: A Tensor. Must be one of the following types: float32, float64,
int64, int32, uint8, uint16, int16, int8, complex64,
complex128, qint8, quint8, qint32, half.
axis: A Tensor of type int32 (default: 0). Must be in the range
[-rank(x), rank(x)).
exclusive: If True, perform exclusive cumsum.
reverse: A bool (default: False).
name: A name for the operation (optional).

Returns:
A Tensor. Has the same type as x.
"""
with ops.name_scope(name, "Cumsum", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumsum(
x, axis, exclusive=exclusive, reverse=reverse, name=name)

@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
@deprecation.deprecated_endpoints("cumprod")
def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative product of the tensor x along axis.

By default, this op performs an inclusive cumprod, which means that the
first element of the input is identical to the first element of the output:

python
tf.math.cumprod([a, b, c])  # [a, a * b, a * b * c]


By setting the exclusive kwarg to True, an exclusive cumprod is
performed

python
tf.math.cumprod([a, b, c], exclusive=True)  # [1, a, a * b]


By setting the reverse kwarg to True, the cumprod is performed in the
opposite direction:

python
tf.math.cumprod([a, b, c], reverse=True)  # [a * b * c, b * c, c]


This is more efficient than using separate tf.reverse ops.
The reverse and exclusive kwargs can also be combined:

python
tf.math.cumprod([a, b, c], exclusive=True, reverse=True)  # [b * c, c, 1]


Args:
x: A Tensor. Must be one of the following types: float32, float64,
int64, int32, uint8, uint16, int16, int8, complex64,
complex128, qint8, quint8, qint32, half.
axis: A Tensor of type int32 (default: 0). Must be in the range
[-rank(x), rank(x)).
exclusive: If True, perform exclusive cumprod.
reverse: A bool (default: False).
name: A name for the operation (optional).

Returns:
A Tensor. Has the same type as x.
"""
with ops.name_scope(name, "Cumprod", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumprod(
x, axis, exclusive=exclusive, reverse=reverse, name=name)

@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
"""Compute the cumulative log-sum-exp of the tensor x along axis.

By default, this op performs an inclusive cumulative log-sum-exp, which means
that the first element of the input is identical to the first element of
the output.

This operation is significantly more numerically stable than the equivalent
tensorflow operation tf.math.log(tf.math.cumsum(tf.math.exp(x))), although
computes the same result given infinite numerical precision. However, note
that in some cases, it may be less stable than tf.math.reduce_logsumexp
for a given element, as it applies the "log-sum-exp trick" in a different
way.

More precisely, where tf.math.reduce_logsumexp uses the following trick:


log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)


it cannot be directly used here as there is no fast way of applying it
to each prefix x[:i]. Instead, this function implements a prefix
scan using pairwise log-add-exp, which is a commutative and associative
(up to floating point precision) operator:


log_add_exp(x, y) = log(exp(x) + exp(y))
= log(1 + exp(min(x, y) - max(x, y))) + max(x, y)


However, reducing using the above operator leads to a different computation
tree (logs are taken repeatedly instead of only at the end), and the maximum
is only computed pairwise instead of over the entire prefix. In general, this
leads to a different and slightly less precise computation.

Args:
x: A Tensor. Must be one of the following types: float16, float32,
float64.
axis: A Tensor of type int32 or int64 (default: 0). Must be in the
range [-rank(x), rank(x)).
exclusive: If True, perform exclusive cumulative log-sum-exp.
reverse: If True, performs the cumulative log-sum-exp in the reverse
direction.
name: A name for the operation (optional).

Returns:
A Tensor. Has the same shape and type as x.
"""
with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
return gen_math_ops.cumulative_logsumexp(
x, axis, exclusive=exclusive, reverse=reverse, name=name)

@tf_export("math.conj", v1=["math.conj", "conj"])
@deprecation.deprecated_endpoints("conj")
def conj(x, name=None):
r"""Returns the complex conjugate of a complex number.

Given a tensor input of complex numbers, this operation returns a tensor of
complex numbers that are the complex conjugate of each element in input. The
complex numbers in input must be of the form \$$a + bj\$$, where *a* is the
real part and *b* is the imaginary part.

The complex conjugate returned by this operation is of the form \$$a - bj\$$.

For example:

# tensor 'input' is [-2.25 + 4.75j, 3.25 + 5.75j]
tf.math.conj(input) ==> [-2.25 - 4.75j, 3.25 - 5.75j]

If x is real, it is returned unchanged.

Args:
x: Tensor to conjugate.  Must have numeric or variant type.
name: A name for the operation (optional).

Returns:
A Tensor that is the conjugate of x (with the same type).

Raises:
TypeError: If x is not a numeric tensor.
"""
if isinstance(x, ops.Tensor):
dt = x.dtype
if dt.is_floating or dt.is_integer:
return x
with ops.name_scope(name, "Conj", [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if x.dtype.is_complex or x.dtype == dtypes.variant:
return gen_math_ops.conj(x, name=name)
elif x.dtype.is_floating or x.dtype.is_integer:
return x
else:
raise TypeError("Expected numeric or variant tensor, got dtype %r" %
x.dtype)

def reduced_shape(input_shape, axes):
"""Helper function for reduction ops.

Args:
input_shape: 1-D Tensor, the shape of the Tensor being reduced.
axes: 1-D Tensor, the reduction axes.

Returns:
A 1-D Tensor, the output shape as if keepdims were set to True.
"""
if context.executing_eagerly():
input_shape = input_shape.numpy()
axes = axes.numpy()
input_shape[axes] = 1
return input_shape

# Example:
# cast needed for SparseTensor reductions
input_shape = cast(input_shape, dtypes.int32)  # [2, 3, 5, 7]
axes = cast(axes, dtypes.int32)  # [1, 2]

input_rank = array_ops.size(input_shape)  # 4
axes = (axes + input_rank) % input_rank
axes_shape = array_ops.shape(axes)  # [2]
return gen_data_flow_ops.dynamic_stitch(  # [2, 1, 1, 7]
[
range(input_rank),  # [0, 1, 2, 3]
axes
],  # [1, 2]
[
input_shape,  # [2, 3, 5, 7]
array_ops.fill(axes_shape, 1)
])  # [1, 1]

def _unsorted_segment_N(data, segment_ids, num_segments):
""" Helper function for unsorted_segment_mean/_sqrtN.

Computes the number
of segment entries with 0-entries set to 1 to allow division by N.
"""
num_segments = ops.convert_to_tensor(num_segments)
# bincount doesn't support negative indices so we use unsorted_segment_sum
segment_ids_shape = array_ops.shape_internal(segment_ids)
ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
# add dimensions for all non-reduced axes
[num_segments[array_ops.newaxis],
array_ops.ones([array_ops.rank(data)
- array_ops.rank(segment_ids)],
dtype=num_segments.dtype)],
axis=0)
return gen_math_ops.maximum(n, 1)

@tf_export(
"math.unsorted_segment_mean",
v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
@deprecation.deprecated_endpoints("unsorted_segment_mean")
def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
r"""Computes the mean along segments of a tensor.

for an explanation of segments.

This operator is similar to the unsorted segment sum operator found
[here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Instead of computing the sum over segments, it computes the mean of all
entries belonging to a segment such that:

\$$output_i = 1/N_i \sum_{j...} data[j...]\$$ where the sum is over tuples
j... such that segment_ids[j...] == i with \\N_i\\ being the number of
occurrences of id \\i\\.

If there is no entry for a given segment ID i, it outputs 0.

If the given segment ID i is negative, the value is dropped and will not
be added to the sum of the segment.

Args:
data: A Tensor with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of data.shape.
num_segments: An integer scalar Tensor.  The number of distinct segment
IDs.
name: A name for the operation (optional).

Returns:
A Tensor.  Has same shape as data, except for the first segment_ids.rank
dimensions, which are replaced with a single dimension which has size
num_segments.
"""
with ops.name_scope(name, "UnsortedSegmentMean"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / N

@tf_export(
"math.unsorted_segment_sqrt_n",
v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
r"""Computes the sum along segments of a tensor divided by the sqrt(N).

for an explanation of segments.

This operator is similar to the unsorted segment sum operator found
[here](../../../api_docs/python/math_ops.md#UnsortedSegmentSum).
Additionally to computing the sum over segments, it divides the results by
sqrt(N).

\$$output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\$$ where the sum is over
tuples j... such that segment_ids[j...] == i with \\N_i\\ being the
number of occurrences of id \\i\\.

If there is no entry for a given segment ID i, it outputs 0.

Note that this op only supports floating point and complex dtypes,
due to tf.sqrt only supporting these types.

If the given segment ID i is negative, the value is dropped and will not
be added to the sum of the segment.

Args:
data: A Tensor with floating point or complex dtype.
segment_ids: An integer tensor whose shape is a prefix of data.shape.
num_segments: An integer scalar Tensor.  The number of distinct segment
IDs.
name: A name for the operation (optional).

Returns:
A Tensor.  Has same shape as data, except for the first segment_ids.rank
dimensions, which are replaced with a single dimension which has size
num_segments.
"""
with ops.name_scope(name, "UnsortedSegmentSqrtN"):
data = ops.convert_to_tensor(data)
segment_ids = ops.convert_to_tensor(segment_ids)
N = _unsorted_segment_N(data, segment_ids, num_segments)
summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
return summed / gen_math_ops.sqrt(N)

@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
@deprecation.deprecated_endpoints("sparse_segment_sum")
def sparse_segment_sum(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor.

for an explanation of segments.

Like tf.math.segment_sum, but segment_ids can have rank less than data's
first dimension, selecting a subset of dimension 0, specified by indices.
segment_ids is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases num_segments is used to determine
the size of the output.

For example:

python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]

# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1  2  3  4]
#     [-1 -2 -3 -4]]

# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1  2  3  4]
#     [ 0  0  0  0]
#     [-1 -2 -3 -4]
#     [ 0  0  0  0]]

# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
#     [5 6 7 8]]

# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))


Args:
data: A Tensor with data that will be assembled in the output.
indices: A 1-D Tensor with indices into data. Has same rank as
segment_ids.
segment_ids: A 1-D Tensor with indices into the output Tensor. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
Tensor.

Returns:
A tensor of the shape as data, except for dimension 0 which
has size k, the number of segments specified via num_segments or
inferred for the last element in segments_ids.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sum_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sum(
data=data, indices=indices, segment_ids=segment_ids, name=name)

@tf_export("sparse.segment_sum", v1=[])
def sparse_segment_sum_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor.

for an explanation of segments.

Like tf.math.segment_sum, but segment_ids can have rank less than data's
first dimension, selecting a subset of dimension 0, specified by indices.
segment_ids is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases num_segments is used to determine
the size of the output.

For example:

python
c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])

# Select two rows, one segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
# => [[0 0 0 0]]

# Select two rows, two segment.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
# => [[ 1  2  3  4]
#     [-1 -2 -3 -4]]

# With missing segment ids.
tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
num_segments=4)
# => [[ 1  2  3  4]
#     [ 0  0  0  0]
#     [-1 -2 -3 -4]
#     [ 0  0  0  0]]

# Select all rows, two segments.
tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
# => [[0 0 0 0]
#     [5 6 7 8]]

# Which is equivalent to:
tf.math.segment_sum(c, tf.constant([0, 0, 1]))


Args:
data: A Tensor with data that will be assembled in the output.
indices: A 1-D Tensor with indices into data. Has same rank as
segment_ids.
segment_ids: A 1-D Tensor with indices into the output Tensor. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
Tensor.
name: A name for the operation (optional).

Returns:
A tensor of the shape as data, except for dimension 0 which
has size k, the number of segments specified via num_segments or
inferred for the last element in segments_ids.
"""
return sparse_segment_sum(
data, indices, segment_ids, name=name, num_segments=num_segments)

@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
@deprecation.deprecated_endpoints("sparse_segment_mean")
def sparse_segment_mean(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the mean along sparse segments of a tensor.

for an explanation of segments.

Like tf.math.segment_mean, but segment_ids can have rank less than
data's first dimension, selecting a subset of dimension 0, specified by
indices.
segment_ids is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases num_segments is used to determine
the size of the output.

Args:
data: A Tensor with data that will be assembled in the output.
indices: A 1-D Tensor with indices into data. Has same rank as
segment_ids.
segment_ids: A 1-D Tensor with indices into the output Tensor. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
Tensor.

Returns:
A tensor of the shape as data, except for dimension 0 which
has size k, the number of segments specified via num_segments or
inferred for the last element in segments_ids.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_mean_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_mean(
data=data, indices=indices, segment_ids=segment_ids, name=name)

@tf_export("sparse.segment_mean", v1=[])
def sparse_segment_mean_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the mean along sparse segments of a tensor.

for an explanation of segments.

Like tf.math.segment_mean, but segment_ids can have rank less than
data's first dimension, selecting a subset of dimension 0, specified by
indices.
segment_ids is allowed to have missing ids, in which case the output will
be zeros at those indices. In those cases num_segments is used to determine
the size of the output.

Args:
data: A Tensor with data that will be assembled in the output.
indices: A 1-D Tensor with indices into data. Has same rank as
segment_ids.
segment_ids: A 1-D Tensor with indices into the output Tensor. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
Tensor.
name: A name for the operation (optional).

Returns:
A tensor of the shape as data, except for dimension 0 which
has size k, the number of segments specified via num_segments or
inferred for the last element in segments_ids.
"""
return sparse_segment_mean(
data, indices, segment_ids, name=name, num_segments=num_segments)

@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
def sparse_segment_sqrt_n(data,
indices,
segment_ids,
name=None,
num_segments=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).

N is the size of the segment being reduced.

Args:
data: A Tensor with data that will be assembled in the output.
indices: A 1-D Tensor with indices into data. Has same rank as
segment_ids.
segment_ids: A 1-D Tensor with indices into the output Tensor. Values
should be sorted and can be repeated.
name: A name for the operation (optional).
num_segments: An optional int32 scalar. Indicates the size of the output
Tensor.

Returns:
A tensor of the shape as data, except for dimension 0 which
has size k, the number of segments specified via num_segments or
inferred for the last element in segments_ids.
"""
if num_segments is not None:
return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
data=data,
indices=indices,
segment_ids=segment_ids,
num_segments=num_segments,
name=name)
else:
return gen_math_ops.sparse_segment_sqrt_n(
data=data, indices=indices, segment_ids=segment_ids, name=name)

@tf_export("sparse.segment_sqrt_n", v1=[])
def sparse_segment_sqrt_n_v2(data,
indices,
segment_ids,
num_segments=None,
name=None):
r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).

for an explanation of segments.

Like tf.sparse.segment_mean, but instead of dividing by the size of the
segment, N, divide by sqrt(N) instead.

Args:
data: A Tensor with data that will be assembled in the output.
indices: A 1-D Tensor with indices into data. Has same rank as
segment_ids.
segment_ids: A 1-D Tensor with indices into the output Tensor. Values
should be sorted and can be repeated.
num_segments: An optional int32 scalar. Indicates the size of the output
Tensor.
name: A name for the operation (optional).

Returns:
A tensor of the shape as data, except for dimension 0 which
has size k, the number of segments specified via num_segments or
inferred for the last element in segments_ids.
"""
return sparse_segment_sqrt_n(
data, indices, segment_ids, name=name, num_segments=num_segments)

[文档]@tf_export("tensordot", "linalg.tensordot")
def tensordot(a, b, axes, name=None):
r"""Tensor contraction of a and b along specified axes and outer product.

Tensordot (also known as tensor contraction) sums the product of elements
from a and b over the indices specified by a_axes and b_axes.
The lists a_axes and b_axes specify those pairs of axes along which to
contract the tensors. The axis a_axes[i] of a must have the same dimension
as axis b_axes[i] of b for all i in range(0, len(a_axes)). The lists
a_axes and b_axes must have identical length and consist of unique
integers that specify valid axes for each of the tensors. Additionally
outer product is supported by passing axes=0.

This operation corresponds to numpy.tensordot(a, b, axes).

Example 1: When a and b are matrices (order 2), the case axes = 1
is equivalent to matrix multiplication.

Example 2: When a and b are matrices (order 2), the case
axes = [[1], [0]] is equivalent to matrix multiplication.

Example 3: When a and b are matrices (order 2), the case axes=0 gives
the outer product, a tensor of order 4.

Example 4: Suppose that \$$a_{ijk}\$$ and \$$b_{lmn}\$$ represent two
tensors of order 3. Then, contract(a, b, [[0], [2]]) is the order 4 tensor
\$$c_{jklm}\$$ whose entry
corresponding to the indices \$$(j,k,l,m)\$$ is given by:

\$$c_{jklm} = \sum_i a_{ijk} b_{lmi} \$$.

In general, order(c) = order(a) + order(b) - 2*len(axes[0]).

Args:
a: Tensor of type float32 or float64.
b: Tensor with the same type as a.
axes: Either a scalar N, or a list or an int32 Tensor of shape [2, k].
If axes is a scalar, sum over the last N axes of a and the first N axes of
b in order. If axes is a list or Tensor the first and second row contain
the set of unique integers specifying axes along which the contraction is
computed, for a and b, respectively. The number of axes for a and
b must be equal. If axes=0, computes the outer product between a and
b.
name: A name for the operation (optional).

Returns:
A Tensor with the same type as a.

Raises:
ValueError: If the shapes of a, b, and axes are incompatible.
IndexError: If the values in axes exceed the rank of the corresponding
tensor.
"""

def _tensordot_reshape(a, axes, flipped=False):
"""Helper method to perform transpose and reshape for contraction op.

This method is helpful in reducing math_ops.tensordot to math_ops.matmul
using array_ops.transpose and array_ops.reshape. The method takes a
tensor and performs the correct transpose and reshape operation for a given
set of indices. It returns the reshaped tensor as well as a list of indices
necessary to reshape the tensor again after matrix multiplication.

Args:
a: Tensor.
axes: List or int32 Tensor of unique indices specifying valid axes of
a.
flipped: An optional bool. Defaults to False. If True, the method
assumes that a is the second argument in the contraction operation.

Returns:
A tuple (reshaped_a, free_dims, free_dims_static) where reshaped_a is
the tensor a reshaped to allow contraction via matmul, free_dims is
either a list of integers or an int32 Tensor, depending on whether
the shape of a is fully specified, and free_dims_static is either a list
of integers and None values, or None, representing the inferred
static shape of the free dimensions
"""
if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
free_dims = [shape_a[i] for i in free]
prod_free = int(np.prod([shape_a[i] for i in free]))
prod_axes = int(np.prod([shape_a[i] for i in axes]))
perm = list(axes) + free if flipped else free + list(axes)
new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
if (perm != np.arange(len(shape_a))).any():
a_trans = array_ops.transpose(a, perm)
else:
a_trans = a
if a_trans.get_shape().as_list() != new_shape:
reshaped_a = array_ops.reshape(a_trans, new_shape)
else:
reshaped_a = a_trans
return reshaped_a, free_dims, free_dims
else:
if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
shape_a = a.get_shape().as_list()
axes = [i if i >= 0 else i + len(shape_a) for i in axes]
free = [i for i in xrange(len(shape_a)) if i not in axes]
axes_dims = [shape_a[i] for i in axes]
free_dims = [shape_a[i] for i in free]
free_dims_static = free_dims
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
shape_a = array_ops.shape(a)
else:
free_dims_static = None
shape_a = array_ops.shape(a)
rank_a = array_ops.rank(a)
axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
axes = array_ops.where(axes >= 0, axes, axes + rank_a)
free, _ = array_ops.setdiff1d(range(rank_a), axes)
free_dims = array_ops.gather(shape_a, free)
axes_dims = array_ops.gather(shape_a, axes)
prod_free_dims = reduce_prod(free_dims)
prod_axes_dims = reduce_prod(axes_dims)
if flipped:
perm = array_ops.concat([axes, free], 0)
new_shape = array_ops.stack([prod_axes_dims, prod_free_dims])
else:
perm = array_ops.concat([free, axes], 0)
new_shape = array_ops.stack([prod_free_dims, prod_axes_dims])
reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
return reshaped_a, free_dims, free_dims_static

def _tensordot_axes(a, axes):
"""Generates two sets of contraction axes for the two tensor arguments."""
a_shape = a.get_shape()
if isinstance(axes, compat.integral_types):
if axes < 0:
raise ValueError("'axes' must be at least 0.")
if a_shape.ndims is not None:
if axes > a_shape.ndims:
raise ValueError("'axes' must not be larger than the number of "
"dimensions of tensor %s." % a)
return (list(xrange(a_shape.ndims - axes,
a_shape.ndims)), list(xrange(axes)))
else:
rank = array_ops.rank(a)
return (range(rank - axes, rank,
dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
elif isinstance(axes, (list, tuple)):
if len(axes) != 2:
raise ValueError("'axes' must be an integer or have length 2.")
a_axes = axes[0]
b_axes = axes[1]
if isinstance(a_axes, compat.integral_types) and \
isinstance(b_axes, compat.integral_types):
a_axes = [a_axes]
b_axes = [b_axes]
if len(a_axes) != len(b_axes):
raise ValueError(
"Different number of contraction axes 'a' and 'b', %s != %s." %
(len(a_axes), len(b_axes)))
return a_axes, b_axes
else:
axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
return axes[0], axes[1]

with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
a = ops.convert_to_tensor(a, name="a")
b = ops.convert_to_tensor(b, name="b")
a_axes, b_axes = _tensordot_axes(a, axes)
a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
b, b_axes, True)
ab_matmul = matmul(a_reshape, b_reshape)
if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
if (ab_matmul.get_shape().is_fully_defined() and
ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
return ab_matmul
else:
return array_ops.reshape(
ab_matmul, a_free_dims + b_free_dims, name=name)
else:
a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
product = array_ops.reshape(
ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
if a_free_dims_static is not None and b_free_dims_static is not None:
product.set_shape(a_free_dims_static + b_free_dims_static)
return product

@tf_export("math.polyval")
def polyval(coeffs, x, name=None):
r"""Computes the elementwise value of a polynomial.

If x is a tensor and coeffs is a list n + 1 tensors,
this function returns the value of the n-th order polynomial

p(x) = coeffs[n-1] + coeffs[n-2] * x + ...  + coeffs[0] * x**(n-1)

evaluated using Horner's method, i.e.

p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] +
x * coeffs[0]))

Usage Example:

>>> coefficients = [1.0, 2.5, -4.2]
>>> x = 5.0
>>> y = tf.math.polyval(coefficients, x)
>>> y
<tf.Tensor: shape=(), dtype=float32, numpy=33.3>

Usage Example:

>>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
<tf.Tensor: shape=(), dtype=int32, numpy=21>

tf.math.polyval can also be used in polynomial regression. Taking
advantage of this function can facilitate writing a polynomial equation
as compared to explicitly writing it out, especially for higher degree
polynomials.

>>> x = tf.constant(3)
>>> theta1 = tf.Variable(2)
>>> theta2 = tf.Variable(1)
>>> theta3 = tf.Variable(0)
>>> tf.math.polyval([theta1, theta2, theta3], x)
<tf.Tensor: shape=(), dtype=int32, numpy=21>

Args:
coeffs: A list of Tensor representing the coefficients of the polynomial.
x: A Tensor representing the variable of the polynomial.
name: A name for the operation (optional).

Returns:
A tensor of the shape as the expression p(x) with usual broadcasting
rules for element-wise addition and multiplication applied.

@compatibility(numpy)
Equivalent to numpy.polyval.
@end_compatibility
"""
if not isinstance(coeffs, list):
raise ValueError("Argument coeffs must be list type "
"found {}.".format(type(coeffs)))

with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
x = ops.convert_to_tensor(x, name="x")
if len(coeffs) < 1:
return array_ops.zeros_like(x, name=name)
coeffs = [
ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
for index, coeff in enumerate(coeffs)
]
p = coeffs[0]
for c in coeffs[1:]:
p = c + p * x
return p

@tf_export("math.reciprocal_no_nan")
def reciprocal_no_nan(x, name=None):
"""Performs a safe reciprocal operation, element wise.

If a particular element is zero, the reciprocal for that element is
also set to zero.

For example:
python
x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
tf.math.reciprocal_no_nan(x)  # [ 0.5, 2, 0.0, 1.0 ]


Args:
x: A Tensor of type float16, float32, float64 complex64 or
complex128.
name: A name for the operation (optional).

Returns:
A Tensor of same shape and type as x.

Raises:
TypeError: x must be of a valid dtype.

"""

with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
x = ops.convert_to_tensor(x, name="x")
one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
return gen_math_ops.div_no_nan(one, x, name=scope)

@tf_export("math.xlog1py")
def xlog1py(x, y, name=None):
r"""Compute x * log1p(y).

Given x and y, compute x * log1p(y). This function safely returns
zero when x = 0, no matter what the value of y is.

Example:

>>> tf.math.xlog1py(0., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>
>>> tf.math.xlog1py(1., 1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
>>> tf.math.xlog1py(2., 2.)
<tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
>>> tf.math.xlog1py(0., -1.)
<tf.Tensor: shape=(), dtype=float32, numpy=0.>

Args:
x: A tf.Tensor of type bfloat16, half, float32, float64,
complex64, complex128
y: A tf.Tensor of type bfloat16, half, float32, float64,
complex64, complex128
name: A name for the operation (optional).

Returns:
x * log1p(y).

@compatibility(scipy)
Equivalent to scipy.special.xlog1py
@end_compatibility
"""
with ops.name_scope(name, "xlog1py", [x]):
return gen_math_ops.xlog1py(x, y)

@tf_export("math.erfinv")
def erfinv(x, name=None):
"""Compute inverse error function.

Given x, compute the inverse error function of x. This function
is the inverse of tf.math.erf.

Args:
x: Tensor with type float or double.
name: A name for the operation (optional).
Returns:
Inverse error function of x.
"""
with ops.name_scope(name, "erfinv", [x]):
return gen_math_ops.erfinv(x)

@tf_export("math.ndtri")
def ndtri(x, name=None):
"""Compute quantile of Standard Normal.

Args:
x: Tensor with type float or double.
name: A name for the operation (optional).
Returns:
Inverse error function of x.
"""
with ops.name_scope(name, "ndtri", [x]):
return gen_math_ops.ndtri(x)

@tf_export("math.ceil", v1=["math.ceil", "ceil"])
@deprecation.deprecated_endpoints("ceil")
def ceil(x, name=None):
"""Return the ceiling of the input, element-wise.

For example:

>>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
<tf.Tensor: shape=(7,), dtype=float32,
numpy=array([-1., -1., -0.,  1.,  2.,  2.,  2.], dtype=float32)>

Args:
x: A tf.Tensor. Must be one of the following types: bfloat16, half,
float32, float64. int32
name: A name for the operation (optional).

Returns:
A tf.Tensor. Has the same type as x.

@compatibility(numpy)
Equivalent to np.ceil
@end_compatibility
"""
return gen_math_ops.ceil(x, name)

[文档]@tf_export("math.sqrt", "sqrt")
def sqrt(x, name=None):  # pylint: disable=redefined-builtin
r"""Computes element-wise square root of the input tensor.

Note: This operation does not support integer types.

>>> x = tf.constant([[4.0], [16.0]])
>>> tf.sqrt(x)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[2.],
[4.]], dtype=float32)>
>>> y = tf.constant([[-4.0], [16.0]])
>>> tf.sqrt(y)
<tf.Tensor: shape=(2, 1), dtype=float32, numpy=
array([[nan],
[ 4.]], dtype=float32)>
>>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
>>> tf.sqrt(z)
<tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
array([[0.0+1.j],
[4.0+0.j]])>

Note: In order to support complex complex, please provide an input tensor
of complex64 or complex128.

Args:
x: A tf.Tensor of type bfloat16, half, float32, float64,
complex64, complex128
name: A name for the operation (optional).

Returns:
A tf.Tensor of same size, type and sparsity as x.
"""
return gen_math_ops.sqrt(x, name)

# pylint: disable=g-docstring-has-escape
[文档]@tf_export("math.exp", "exp")
def exp(x, name=None):
r"""Computes exponential of x element-wise.  \$$y = e^x\$$.

This function computes the exponential of the input tensor element-wise.
i.e. math.exp(x) or \$$e^x\$$, where x is the input tensor.
\$$e\$$ denotes Euler's number and is approximately equal to 2.718281.
Output is positive for any real input.

>>> x = tf.constant(2.0)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=float32, numpy=7.389056>

>>> x = tf.constant([2.0, 8.0])
>>> tf.math.exp(x)
<tf.Tensor: shape=(2,), dtype=float32,
numpy=array([   7.389056, 2980.958   ], dtype=float32)>

For complex numbers, the exponential value is calculated as
\$$e^{x+iy}={e^x}{e^{iy}}={e^x}{\\cos(y)+i\\sin(y)}\$$

For 1+1j the value would be computed as:
\$$e^1{\\cos(1)+i\\sin(1)} = 2.7182817 \\times (0.5403023+0.84147096j)\$$

>>> x = tf.constant(1 + 1j)
>>> tf.math.exp(x)
<tf.Tensor: shape=(), dtype=complex128,
numpy=(1.4686939399158851+2.2873552871788423j)>

Args:
x: A tf.Tensor. Must be one of the following types: bfloat16, half,
float32, float64, complex64, complex128.
name: A name for the operation (optional).

Returns:
A tf.Tensor. Has the same type as x.

@compatibility(numpy)
Equivalent to np.exp
@end_compatibility
"""
return gen_math_ops.exp(x, name)

# pylint: enable=g-docstring-has-escape

@tf_export("math.sobol_sample")
def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
"""Generates points from the Sobol sequence.

Creates a Sobol sequence with num_results samples. Each sample has dimension
dim. Skips the first skip samples.

Args:
dim: Positive scalar Tensor representing each sample's dimension.
num_results: Positive scalar Tensor of dtype int32. The number of Sobol
points to return in the output.
skip: (Optional) Positive scalar Tensor of dtype int32. The number of
initial points of the Sobol sequence to skip. Default value is 0.
dtype: (Optional) The tf.Dtype of the sample. One of: tf.float32 or
tf.float64. Defaults to tf.float32.
name: (Optional) Python str name prefixed to ops created by this function.

Returns:
Tensor of samples from Sobol sequence with shape [num_results, dim].
"""
with ops.name_scope(name, "sobol", [dim, num_results, skip]):
return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)

@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
@deprecation.deprecated_endpoints("rsqrt")
def rsqrt(x, name=None):
"""Computes reciprocal of square root of x element-wise.

For example:

>>> x = tf.constant([2., 0., -2.])
>>> tf.math.rsqrt(x)
<tf.Tensor: shape=(3,), dtype=float32,
numpy=array([0.707, inf, nan], dtype=float32)>

Args:
x: A tf.Tensor. Must be one of the following types: bfloat16, half,
float32, float64. int32
name: A name for the operation (optional).

Returns:
A tf.Tensor. Has the same type as x.
"""
return gen_math_ops.rsqrt(x, name)