tensorflow.python.ops.gen_array_ops 源代码

"""Python wrappers around TensorFlow ops.

This file is MACHINE GENERATED! Do not edit.
Original C++ source file: array_ops.cc
"""

import collections

from tensorflow.python import pywrap_tfe as pywrap_tfe
from tensorflow.python.eager import context as _context
from tensorflow.python.eager import core as _core
from tensorflow.python.eager import execute as _execute
from tensorflow.python.framework import dtypes as _dtypes

from tensorflow.python.framework import op_def_registry as _op_def_registry
from tensorflow.python.framework import ops as _ops
from tensorflow.python.framework import op_def_library as _op_def_library
from tensorflow.python.util.deprecation import deprecated_endpoints
from tensorflow.python.util import dispatch as _dispatch
from tensorflow.python.util.tf_export import tf_export


def batch_matrix_band_part(input, num_lower, num_upper, name=None):
  r"""TODO: add doc.

  Args:
    input: A `Tensor`.
    num_lower: A `Tensor` of type `int64`.
    num_upper: A `Tensor` of type `int64`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "BatchMatrixBandPart", name,
        tld.op_callbacks, input, num_lower, num_upper)
      return _result
    except _core._FallbackException:
      try:
        return batch_matrix_band_part_eager_fallback(
            input, num_lower, num_upper, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "BatchMatrixBandPart", input=input, num_lower=num_lower,
                               num_upper=num_upper, name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("T", _op._get_attr_type("T"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "BatchMatrixBandPart", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result

BatchMatrixBandPart = tf_export("raw_ops.BatchMatrixBandPart")(_ops.to_raw_op(batch_matrix_band_part))


def batch_matrix_band_part_eager_fallback(input, num_lower, num_upper, name, ctx):
  _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
  num_lower = _ops.convert_to_tensor(num_lower, _dtypes.int64)
  num_upper = _ops.convert_to_tensor(num_upper, _dtypes.int64)
  _inputs_flat = [input, num_lower, num_upper]
  _attrs = ("T", _attr_T)
  _result = _execute.execute(b"BatchMatrixBandPart", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BatchMatrixBandPart", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result


def batch_matrix_diag(diagonal, name=None):
  r"""TODO: add doc.

  Args:
    diagonal: A `Tensor`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `diagonal`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "BatchMatrixDiag", name,
        tld.op_callbacks, diagonal)
      return _result
    except _core._FallbackException:
      try:
        return batch_matrix_diag_eager_fallback(
            diagonal, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "BatchMatrixDiag", diagonal=diagonal, name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("T", _op._get_attr_type("T"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "BatchMatrixDiag", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result

BatchMatrixDiag = tf_export("raw_ops.BatchMatrixDiag")(_ops.to_raw_op(batch_matrix_diag))


def batch_matrix_diag_eager_fallback(diagonal, name, ctx):
  _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], ctx)
  _inputs_flat = [diagonal]
  _attrs = ("T", _attr_T)
  _result = _execute.execute(b"BatchMatrixDiag", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BatchMatrixDiag", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result


def batch_matrix_diag_part(input, name=None):
  r"""TODO: add doc.

  Args:
    input: A `Tensor`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "BatchMatrixDiagPart", name,
        tld.op_callbacks, input)
      return _result
    except _core._FallbackException:
      try:
        return batch_matrix_diag_part_eager_fallback(
            input, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "BatchMatrixDiagPart", input=input, name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("T", _op._get_attr_type("T"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "BatchMatrixDiagPart", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result

BatchMatrixDiagPart = tf_export("raw_ops.BatchMatrixDiagPart")(_ops.to_raw_op(batch_matrix_diag_part))


def batch_matrix_diag_part_eager_fallback(input, name, ctx):
  _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
  _inputs_flat = [input]
  _attrs = ("T", _attr_T)
  _result = _execute.execute(b"BatchMatrixDiagPart", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BatchMatrixDiagPart", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result


def batch_matrix_set_diag(input, diagonal, name=None):
  r"""TODO: add doc.

  Args:
    input: A `Tensor`.
    diagonal: A `Tensor`. Must have the same type as `input`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "BatchMatrixSetDiag", name,
        tld.op_callbacks, input, diagonal)
      return _result
    except _core._FallbackException:
      try:
        return batch_matrix_set_diag_eager_fallback(
            input, diagonal, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "BatchMatrixSetDiag", input=input, diagonal=diagonal, name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("T", _op._get_attr_type("T"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "BatchMatrixSetDiag", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result

BatchMatrixSetDiag = tf_export("raw_ops.BatchMatrixSetDiag")(_ops.to_raw_op(batch_matrix_set_diag))


def batch_matrix_set_diag_eager_fallback(input, diagonal, name, ctx):
  _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], ctx)
  (input, diagonal) = _inputs_T
  _inputs_flat = [input, diagonal]
  _attrs = ("T", _attr_T)
  _result = _execute.execute(b"BatchMatrixSetDiag", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BatchMatrixSetDiag", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result


def batch_to_space(input, crops, block_size, name=None):
  r"""BatchToSpace for 4-D tensors of type T.

  This is a legacy version of the more general BatchToSpaceND.

  Rearranges (permutes) data from batch into blocks of spatial data, followed by
  cropping. This is the reverse transformation of SpaceToBatch. More specifically,
  this op outputs a copy of the input tensor where values from the `batch`
  dimension are moved in spatial blocks to the `height` and `width` dimensions,
  followed by cropping along the `height` and `width` dimensions.

  Args:
    input: A `Tensor`. 4-D tensor with shape
      `[batch*block_size*block_size, height_pad/block_size, width_pad/block_size,
        depth]`. Note that the batch size of the input tensor must be divisible by
      `block_size * block_size`.
    crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      2-D tensor of non-negative integers with shape `[2, 2]`. It specifies
      how many elements to crop from the intermediate result across the spatial
      dimensions as follows:

          crops = [[crop_top, crop_bottom], [crop_left, crop_right]]
    block_size: An `int` that is `>= 2`.
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "BatchToSpace", name,
        tld.op_callbacks, input, crops, "block_size", block_size)
      return _result
    except _core._FallbackException:
      try:
        return batch_to_space_eager_fallback(
            input, crops, block_size=block_size, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  block_size = _execute.make_int(block_size, "block_size")
  _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "BatchToSpace", input=input, crops=crops, block_size=block_size,
                        name=name)
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("T", _op._get_attr_type("T"), "block_size",
              _op._get_attr_int("block_size"), "Tidx",
              _op._get_attr_type("Tidx"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "BatchToSpace", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result

BatchToSpace = tf_export("raw_ops.BatchToSpace")(_ops.to_raw_op(batch_to_space))


def batch_to_space_eager_fallback(input, crops, block_size, name, ctx):
  block_size = _execute.make_int(block_size, "block_size")
  _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
  _attr_Tidx, (crops,) = _execute.args_to_matching_eager([crops], ctx, _dtypes.int32)
  _inputs_flat = [input, crops]
  _attrs = ("T", _attr_T, "block_size", block_size, "Tidx", _attr_Tidx)
  _result = _execute.execute(b"BatchToSpace", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BatchToSpace", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result


@_dispatch.add_dispatch_list
@tf_export(v1=['batch_to_space_nd', 'manip.batch_to_space_nd'])
@deprecated_endpoints('batch_to_space_nd', 'manip.batch_to_space_nd')
def batch_to_space_nd(input, block_shape, crops, name=None):
  r"""BatchToSpace for N-D tensors of type T.

  This operation reshapes the "batch" dimension 0 into `M + 1` dimensions of shape
  `block_shape + [batch]`, interleaves these blocks back into the grid defined by
  the spatial dimensions `[1, ..., M]`, to obtain a result with the same rank as
  the input.  The spatial dimensions of this intermediate result are then
  optionally cropped according to `crops` to produce the output.  This is the
  reverse of SpaceToBatch.  See below for a precise description.

  Args:
    input: A `Tensor`.
      N-D with shape `input_shape = [batch] + spatial_shape + remaining_shape`,
      where spatial_shape has M dimensions.
    block_shape: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      1-D with shape `[M]`, all values must be >= 1.
    crops: A `Tensor`. Must be one of the following types: `int32`, `int64`.
      2-D with shape `[M, 2]`, all values must be >= 0.
        `crops[i] = [crop_start, crop_end]` specifies the amount to crop from input
        dimension `i + 1`, which corresponds to spatial dimension `i`.  It is
        required that
        `crop_start[i] + crop_end[i] <= block_shape[i] * input_shape[i + 1]`.

      This operation is equivalent to the following steps:

      1. Reshape `input` to `reshaped` of shape:
           [block_shape[0], ..., block_shape[M-1],
            batch / prod(block_shape),
            input_shape[1], ..., input_shape[N-1]]

      2. Permute dimensions of `reshaped` to produce `permuted` of shape
           [batch / prod(block_shape),

            input_shape[1], block_shape[0],
            ...,
            input_shape[M], block_shape[M-1],

            input_shape[M+1], ..., input_shape[N-1]]

      3. Reshape `permuted` to produce `reshaped_permuted` of shape
           [batch / prod(block_shape),

            input_shape[1] * block_shape[0],
            ...,
            input_shape[M] * block_shape[M-1],

            input_shape[M+1],
            ...,
            input_shape[N-1]]

      4. Crop the start and end of dimensions `[1, ..., M]` of
         `reshaped_permuted` according to `crops` to produce the output of shape:
           [batch / prod(block_shape),

            input_shape[1] * block_shape[0] - crops[0,0] - crops[0,1],
            ...,
            input_shape[M] * block_shape[M-1] - crops[M-1,0] - crops[M-1,1],

            input_shape[M+1], ..., input_shape[N-1]]

      Some examples:

      (1) For the following input of shape `[4, 1, 1, 1]`, `block_shape = [2, 2]`, and
          `crops = [[0, 0], [0, 0]]`:

      ```
      [[[[1]]], [[[2]]], [[[3]]], [[[4]]]]
      ```

      The output tensor has shape `[1, 2, 2, 1]` and value:

      ```
      x = [[[[1], [2]], [[3], [4]]]]
      ```

      (2) For the following input of shape `[4, 1, 1, 3]`, `block_shape = [2, 2]`, and
          `crops = [[0, 0], [0, 0]]`:

      ```
      [[[[1, 2, 3]]], [[[4, 5, 6]]], [[[7, 8, 9]]], [[[10, 11, 12]]]]
      ```

      The output tensor has shape `[1, 2, 2, 3]` and value:

      ```
      x = [[[[1, 2, 3], [4, 5, 6]],
            [[7, 8, 9], [10, 11, 12]]]]
      ```

      (3) For the following input of shape `[4, 2, 2, 1]`, `block_shape = [2, 2]`, and
          `crops = [[0, 0], [0, 0]]`:

      ```
      x = [[[[1], [3]], [[9], [11]]],
           [[[2], [4]], [[10], [12]]],
           [[[5], [7]], [[13], [15]]],
           [[[6], [8]], [[14], [16]]]]
      ```

      The output tensor has shape `[1, 4, 4, 1]` and value:

      ```
      x = [[[[1],   [2],  [3],  [4]],
           [[5],   [6],  [7],  [8]],
           [[9],  [10], [11],  [12]],
           [[13], [14], [15],  [16]]]]
      ```

      (4) For the following input of shape `[8, 1, 3, 1]`, `block_shape = [2, 2]`, and
          `crops = [[0, 0], [2, 0]]`:

      ```
      x = [[[[0], [1], [3]]], [[[0], [9], [11]]],
           [[[0], [2], [4]]], [[[0], [10], [12]]],
           [[[0], [5], [7]]], [[[0], [13], [15]]],
           [[[0], [6], [8]]], [[[0], [14], [16]]]]
      ```

      The output tensor has shape `[2, 2, 4, 1]` and value:

      ```
      x = [[[[1],   [2],  [3],  [4]],
            [[5],   [6],  [7],  [8]]],
           [[[9],  [10], [11],  [12]],
            [[13], [14], [15],  [16]]]]
      ```
    name: A name for the operation (optional).

  Returns:
    A `Tensor`. Has the same type as `input`.
  """
  _ctx = _context._context or _context.context()
  tld = _ctx._thread_local_data
  if tld.is_eager:
    try:
      _result = pywrap_tfe.TFE_Py_FastPathExecute(
        _ctx._context_handle, tld.device_name, "BatchToSpaceND", name,
        tld.op_callbacks, input, block_shape, crops)
      return _result
    except _core._FallbackException:
      try:
        return batch_to_space_nd_eager_fallback(
            input, block_shape, crops, name=name, ctx=_ctx)
      except _core._SymbolicException:
        pass  # Add nodes to the TensorFlow graph.
      except (TypeError, ValueError):
        result = _dispatch.dispatch(
              batch_to_space_nd, input=input, block_shape=block_shape,
                                 crops=crops, name=name)
        if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
          return result
        raise
    except _core._NotOkStatusException as e:
      _ops.raise_from_not_ok_status(e, name)
  # Add nodes to the TensorFlow graph.
  try:
    _, _, _op, _outputs = _op_def_library._apply_op_helper(
        "BatchToSpaceND", input=input, block_shape=block_shape, crops=crops,
                          name=name)
  except (TypeError, ValueError):
    result = _dispatch.dispatch(
          batch_to_space_nd, input=input, block_shape=block_shape,
                             crops=crops, name=name)
    if result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
      return result
    raise
  _result = _outputs[:]
  if _execute.must_record_gradient():
    _attrs = ("T", _op._get_attr_type("T"), "Tblock_shape",
              _op._get_attr_type("Tblock_shape"), "Tcrops",
              _op._get_attr_type("Tcrops"))
    _inputs_flat = _op.inputs
    _execute.record_gradient(
        "BatchToSpaceND", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result

BatchToSpaceND = tf_export("raw_ops.BatchToSpaceND")(_ops.to_raw_op(batch_to_space_nd))


def batch_to_space_nd_eager_fallback(input, block_shape, crops, name, ctx):
  _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx)
  _attr_Tblock_shape, (block_shape,) = _execute.args_to_matching_eager([block_shape], ctx, _dtypes.int32)
  _attr_Tcrops, (crops,) = _execute.args_to_matching_eager([crops], ctx, _dtypes.int32)
  _inputs_flat = [input, block_shape, crops]
  _attrs = ("T", _attr_T, "Tblock_shape", _attr_Tblock_shape, "Tcrops",
  _attr_Tcrops)
  _result = _execute.execute(b"BatchToSpaceND", 1, inputs=_inputs_flat,
                             attrs=_attrs, ctx=ctx, name=name)
  if _execute.must_record_gradient():
    _execute.record_gradient(
        "BatchToSpaceND", _inputs_flat, _attrs, _result)
  _result, = _result
  return _result


[文档]@_dispatch.add_dispatch_list @tf_export('bitcast') def bitcast(input, type, name=None): r"""Bitcasts a tensor from one type to another without copying data. Given a tensor `input`, this operation returns a tensor that has the same buffer data as `input` with datatype `type`. If the input datatype `T` is larger than the output datatype `type` then the shape changes from [...] to [..., sizeof(`T`)/sizeof(`type`)]. If `T` is smaller than `type`, the operator requires that the rightmost dimension be equal to sizeof(`type`)/sizeof(`T`). The shape then goes from [..., sizeof(`type`)/sizeof(`T`)] to [...]. tf.bitcast() and tf.cast() work differently when real dtype is casted as a complex dtype (e.g. tf.complex64 or tf.complex128) as tf.cast() make imaginary part 0 while tf.bitcast() gives module error. For example, Example 1: >>> a = [1., 2., 3.] >>> equality_bitcast = tf.bitcast(a, tf.complex128) Traceback (most recent call last): ... InvalidArgumentError: Cannot bitcast from 1 to 18 [Op:Bitcast] >>> equality_cast = tf.cast(a, tf.complex128) >>> print(equality_cast) tf.Tensor([1.+0.j 2.+0.j 3.+0.j], shape=(3,), dtype=complex128) Example 2: >>> tf.bitcast(tf.constant(0xffffffff, dtype=tf.uint32), tf.uint8) <tf.Tensor: shape=(4,), dtype=uint8, numpy=array([255, 255, 255, 255], dtype=uint8)> Example 3: >>> x = [1., 2., 3.] >>> y = [0., 2., 3.] >>> equality= tf.equal(x,y) >>> equality_cast = tf.cast(equality,tf.float32) >>> equality_bitcast = tf.bitcast(equality_cast,tf.uint8) >>> print(equality) tf.Tensor([False True True], shape=(3,), dtype=bool) >>> print(equality_cast) tf.Tensor([0. 1. 1.], shape=(3,), dtype=float32) >>> print(equality_bitcast) tf.Tensor( [[ 0 0 0 0] [ 0 0 128 63] [ 0 0 128 63]], shape=(3, 4), dtype=uint8) *NOTE*: Bitcast is implemented as a low-level cast, so machines with different endian orderings will give different results. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int64`, `int32`, `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `complex64`, `complex128`, `qint8`, `quint8`, `qint16`, `quint16`, `qint32`. type: A `tf.DType` from: `tf.bfloat16, tf.half, tf.float32, tf.float64, tf.int64, tf.int32, tf.uint8, tf.uint16, tf.uint32, tf.uint64, tf.int8, tf.int16, tf.complex64, tf.complex128, tf.qint8, tf.quint8, tf.qint16, tf.quint16, tf.qint32`. name: A name for the operation (optional). Returns: A `Tensor` of type `type`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Bitcast", name, tld.op_callbacks, input, "type", type) return _result except _core._FallbackException: try: return bitcast_eager_fallback( input, type=type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( bitcast, input=input, type=type, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. type = _execute.make_type(type, "type") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "Bitcast", input=input, type=type, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( bitcast, input=input, type=type, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "type", _op._get_attr_type("type")) _inputs_flat = _op.inputs _execute.record_gradient( "Bitcast", _inputs_flat, _attrs, _result) _result, = _result return _result
Bitcast = tf_export("raw_ops.Bitcast")(_ops.to_raw_op(bitcast)) def bitcast_eager_fallback(input, type, name, ctx): type = _execute.make_type(type, "type") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "type", type) _result = _execute.execute(b"Bitcast", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Bitcast", _inputs_flat, _attrs, _result) _result, = _result return _result def broadcast_args(s0, s1, name=None): r"""Return the shape of s0 op s1 with broadcast. Given `s0` and `s1`, tensors that represent shapes, compute `r0`, the broadcasted shape. `s0`, `s1` and `r0` are all integer vectors. Args: s0: A `Tensor`. Must be one of the following types: `int32`, `int64`. s1: A `Tensor`. Must have the same type as `s0`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `s0`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "BroadcastArgs", name, tld.op_callbacks, s0, s1) return _result except _core._FallbackException: try: return broadcast_args_eager_fallback( s0, s1, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "BroadcastArgs", s0=s0, s1=s1, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "BroadcastArgs", _inputs_flat, _attrs, _result) _result, = _result return _result BroadcastArgs = tf_export("raw_ops.BroadcastArgs")(_ops.to_raw_op(broadcast_args)) def broadcast_args_eager_fallback(s0, s1, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], ctx, _dtypes.int32) (s0, s1) = _inputs_T _inputs_flat = [s0, s1] _attrs = ("T", _attr_T) _result = _execute.execute(b"BroadcastArgs", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "BroadcastArgs", _inputs_flat, _attrs, _result) _result, = _result return _result _BroadcastGradientArgsOutput = collections.namedtuple( "BroadcastGradientArgs", ["r0", "r1"]) def broadcast_gradient_args(s0, s1, name=None): r"""Return the reduction indices for computing gradients of s0 op s1 with broadcast. This is typically used by gradient computations for a broadcasting operation. Args: s0: A `Tensor`. Must be one of the following types: `int32`, `int64`. s1: A `Tensor`. Must have the same type as `s0`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (r0, r1). r0: A `Tensor`. Has the same type as `s0`. r1: A `Tensor`. Has the same type as `s0`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "BroadcastGradientArgs", name, tld.op_callbacks, s0, s1) _result = _BroadcastGradientArgsOutput._make(_result) return _result except _core._FallbackException: try: return broadcast_gradient_args_eager_fallback( s0, s1, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "BroadcastGradientArgs", s0=s0, s1=s1, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "BroadcastGradientArgs", _inputs_flat, _attrs, _result) _result = _BroadcastGradientArgsOutput._make(_result) return _result BroadcastGradientArgs = tf_export("raw_ops.BroadcastGradientArgs")(_ops.to_raw_op(broadcast_gradient_args)) def broadcast_gradient_args_eager_fallback(s0, s1, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([s0, s1], ctx, _dtypes.int32) (s0, s1) = _inputs_T _inputs_flat = [s0, s1] _attrs = ("T", _attr_T) _result = _execute.execute(b"BroadcastGradientArgs", 2, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "BroadcastGradientArgs", _inputs_flat, _attrs, _result) _result = _BroadcastGradientArgsOutput._make(_result) return _result
[文档]@_dispatch.add_dispatch_list @tf_export('broadcast_to') def broadcast_to(input, shape, name=None): r"""Broadcast an array for a compatible shape. Broadcasting is the process of making arrays to have compatible shapes for arithmetic operations. Two shapes are compatible if for each dimension pair they are either equal or one of them is one. When trying to broadcast a Tensor to a shape, it starts with the trailing dimensions, and works its way forward. For example, >>> x = tf.constant([1, 2, 3]) >>> y = tf.broadcast_to(x, [3, 3]) >>> print(y) tf.Tensor( [[1 2 3] [1 2 3] [1 2 3]], shape=(3, 3), dtype=int32) In the above example, the input Tensor with the shape of `[1, 3]` is broadcasted to output Tensor with shape of `[3, 3]`. Args: input: A `Tensor`. A Tensor to broadcast. shape: A `Tensor`. Must be one of the following types: `int32`, `int64`. An 1-D `int` Tensor. The shape of the desired output. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "BroadcastTo", name, tld.op_callbacks, input, shape) return _result except _core._FallbackException: try: return broadcast_to_eager_fallback( input, shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( broadcast_to, input=input, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "BroadcastTo", input=input, shape=shape, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( broadcast_to, input=input, shape=shape, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) _inputs_flat = _op.inputs _execute.record_gradient( "BroadcastTo", _inputs_flat, _attrs, _result) _result, = _result return _result
BroadcastTo = tf_export("raw_ops.BroadcastTo")(_ops.to_raw_op(broadcast_to)) def broadcast_to_eager_fallback(input, shape, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _attr_Tidx, (shape,) = _execute.args_to_matching_eager([shape], ctx, _dtypes.int32) _inputs_flat = [input, shape] _attrs = ("T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"BroadcastTo", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "BroadcastTo", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('debugging.check_numerics', v1=['debugging.check_numerics', 'check_numerics']) @deprecated_endpoints('check_numerics') def check_numerics(tensor, message, name=None): r"""Checks a tensor for NaN and Inf values. When run, reports an `InvalidArgument` error if `tensor` has any values that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is. Args: tensor: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. message: A `string`. Prefix of the error message. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "CheckNumerics", name, tld.op_callbacks, tensor, "message", message) return _result except _core._FallbackException: try: return check_numerics_eager_fallback( tensor, message=message, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( check_numerics, tensor=tensor, message=message, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. message = _execute.make_str(message, "message") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "CheckNumerics", tensor=tensor, message=message, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( check_numerics, tensor=tensor, message=message, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "message", _op.get_attr("message")) _inputs_flat = _op.inputs _execute.record_gradient( "CheckNumerics", _inputs_flat, _attrs, _result) _result, = _result return _result CheckNumerics = tf_export("raw_ops.CheckNumerics")(_ops.to_raw_op(check_numerics)) def check_numerics_eager_fallback(tensor, message, name, ctx): message = _execute.make_str(message, "message") _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx) _inputs_flat = [tensor] _attrs = ("T", _attr_T, "message", message) _result = _execute.execute(b"CheckNumerics", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "CheckNumerics", _inputs_flat, _attrs, _result) _result, = _result return _result def check_numerics_v2(tensor, message, name=None): r"""Checks a tensor for NaN, -Inf and +Inf values. When run, reports an `InvalidArgument` error if `tensor` has any values that are not a number (NaN) or infinity (Inf). Otherwise, passes `tensor` as-is. Unlike CheckNumerics (V1), CheckNumericsV2 distinguishes -Inf and +Inf in the errors it throws. Args: tensor: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. message: A `string`. Prefix of the error message. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `tensor`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "CheckNumericsV2", name, tld.op_callbacks, tensor, "message", message) return _result except _core._FallbackException: try: return check_numerics_v2_eager_fallback( tensor, message=message, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. message = _execute.make_str(message, "message") _, _, _op, _outputs = _op_def_library._apply_op_helper( "CheckNumericsV2", tensor=tensor, message=message, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "message", _op.get_attr("message")) _inputs_flat = _op.inputs _execute.record_gradient( "CheckNumericsV2", _inputs_flat, _attrs, _result) _result, = _result return _result CheckNumericsV2 = tf_export("raw_ops.CheckNumericsV2")(_ops.to_raw_op(check_numerics_v2)) def check_numerics_v2_eager_fallback(tensor, message, name, ctx): message = _execute.make_str(message, "message") _attr_T, (tensor,) = _execute.args_to_matching_eager([tensor], ctx) _inputs_flat = [tensor] _attrs = ("T", _attr_T, "message", message) _result = _execute.execute(b"CheckNumericsV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "CheckNumericsV2", _inputs_flat, _attrs, _result) _result, = _result return _result def concat(concat_dim, values, name=None): r"""Concatenates tensors along one dimension. Args: concat_dim: A `Tensor` of type `int32`. 0-D. The dimension along which to concatenate. Must be in the range [0, rank(values)). values: A list of at least 2 `Tensor` objects with the same type. The `N` Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions except `concat_dim`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Concat", name, tld.op_callbacks, concat_dim, values) return _result except _core._FallbackException: try: return concat_eager_fallback( concat_dim, values, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat' Op, not %r." % values) _attr_N = len(values) _, _, _op, _outputs = _op_def_library._apply_op_helper( "Concat", concat_dim=concat_dim, values=values, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "Concat", _inputs_flat, _attrs, _result) _result, = _result return _result Concat = tf_export("raw_ops.Concat")(_ops.to_raw_op(concat)) def concat_eager_fallback(concat_dim, values, name, ctx): if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat' Op, not %r." % values) _attr_N = len(values) _attr_T, values = _execute.args_to_matching_eager(list(values), ctx) concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32) _inputs_flat = [concat_dim] + list(values) _attrs = ("N", _attr_N, "T", _attr_T) _result = _execute.execute(b"Concat", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Concat", _inputs_flat, _attrs, _result) _result, = _result return _result def concat_offset(concat_dim, shape, name=None): r"""Computes offsets of concat inputs within its output. For example: ``` # 'x' is [2, 2, 7] # 'y' is [2, 3, 7] # 'z' is [2, 5, 7] concat_offset(2, [x, y, z]) => [0, 0, 0], [0, 2, 0], [0, 5, 0] ``` This is typically used by gradient computations for a concat operation. Args: concat_dim: A `Tensor` of type `int32`. The dimension along which to concatenate. shape: A list of at least 2 `Tensor` objects with type `int32`. The `N` int32 vectors representing shape of tensors being concatenated. name: A name for the operation (optional). Returns: A list with the same length as `shape` of `Tensor` objects with type `int32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ConcatOffset", name, tld.op_callbacks, concat_dim, shape) return _result except _core._FallbackException: try: return concat_offset_eager_fallback( concat_dim, shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if not isinstance(shape, (list, tuple)): raise TypeError( "Expected list for 'shape' argument to " "'concat_offset' Op, not %r." % shape) _attr_N = len(shape) _, _, _op, _outputs = _op_def_library._apply_op_helper( "ConcatOffset", concat_dim=concat_dim, shape=shape, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("N", _op._get_attr_int("N")) _inputs_flat = _op.inputs _execute.record_gradient( "ConcatOffset", _inputs_flat, _attrs, _result) return _result ConcatOffset = tf_export("raw_ops.ConcatOffset")(_ops.to_raw_op(concat_offset)) def concat_offset_eager_fallback(concat_dim, shape, name, ctx): if not isinstance(shape, (list, tuple)): raise TypeError( "Expected list for 'shape' argument to " "'concat_offset' Op, not %r." % shape) _attr_N = len(shape) concat_dim = _ops.convert_to_tensor(concat_dim, _dtypes.int32) shape = _ops.convert_n_to_tensor(shape, _dtypes.int32) _inputs_flat = [concat_dim] + list(shape) _attrs = ("N", _attr_N) _result = _execute.execute(b"ConcatOffset", _attr_N, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ConcatOffset", _inputs_flat, _attrs, _result) return _result def concat_v2(values, axis, name=None): r"""Concatenates tensors along one dimension. Args: values: A list of at least 2 `Tensor` objects with the same type. List of `N` Tensors to concatenate. Their ranks and types must match, and their sizes must match in all dimensions except `concat_dim`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D. The dimension along which to concatenate. Must be in the range [-rank(values), rank(values)). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ConcatV2", name, tld.op_callbacks, values, axis) return _result except _core._FallbackException: try: return concat_v2_eager_fallback( values, axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat_v2' Op, not %r." % values) _attr_N = len(values) _, _, _op, _outputs = _op_def_library._apply_op_helper( "ConcatV2", values=values, axis=axis, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "Tidx", _op._get_attr_type("Tidx")) _inputs_flat = _op.inputs _execute.record_gradient( "ConcatV2", _inputs_flat, _attrs, _result) _result, = _result return _result ConcatV2 = tf_export("raw_ops.ConcatV2")(_ops.to_raw_op(concat_v2)) def concat_v2_eager_fallback(values, axis, name, ctx): if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'concat_v2' Op, not %r." % values) _attr_N = len(values) _attr_T, values = _execute.args_to_matching_eager(list(values), ctx) _attr_Tidx, (axis,) = _execute.args_to_matching_eager([axis], ctx, _dtypes.int32) _inputs_flat = list(values) + [axis] _attrs = ("N", _attr_N, "T", _attr_T, "Tidx", _attr_Tidx) _result = _execute.execute(b"ConcatV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ConcatV2", _inputs_flat, _attrs, _result) _result, = _result return _result def conjugate_transpose(x, perm, name=None): r"""Shuffle dimensions of x according to a permutation and conjugate the result. The output `y` has the same rank as `x`. The shapes of `x` and `y` satisfy: `y.shape[i] == x.shape[perm[i]] for i in [0, 1, ..., rank(x) - 1]` `y[i,j,k,...,s,t,u] == conj(x[perm[i], perm[j], perm[k],...,perm[s], perm[t], perm[u]])` Args: x: A `Tensor`. perm: A `Tensor`. Must be one of the following types: `int32`, `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ConjugateTranspose", name, tld.op_callbacks, x, perm) return _result except _core._FallbackException: try: return conjugate_transpose_eager_fallback( x, perm, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "ConjugateTranspose", x=x, perm=perm, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tperm", _op._get_attr_type("Tperm")) _inputs_flat = _op.inputs _execute.record_gradient( "ConjugateTranspose", _inputs_flat, _attrs, _result) _result, = _result return _result ConjugateTranspose = tf_export("raw_ops.ConjugateTranspose")(_ops.to_raw_op(conjugate_transpose)) def conjugate_transpose_eager_fallback(x, perm, name, ctx): _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx) _attr_Tperm, (perm,) = _execute.args_to_matching_eager([perm], ctx, _dtypes.int32) _inputs_flat = [x, perm] _attrs = ("T", _attr_T, "Tperm", _attr_Tperm) _result = _execute.execute(b"ConjugateTranspose", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ConjugateTranspose", _inputs_flat, _attrs, _result) _result, = _result return _result def const(value, dtype, name=None): r"""Returns a constant tensor. Args: value: A `tf.TensorProto`. Attr `value` is the tensor to return. dtype: A `tf.DType`. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Const", name, tld.op_callbacks, "value", value, "dtype", dtype) return _result except _core._FallbackException: try: return const_eager_fallback( value=value, dtype=dtype, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. value = _execute.make_tensor(value, "value") dtype = _execute.make_type(dtype, "dtype") _, _, _op, _outputs = _op_def_library._apply_op_helper( "Const", value=value, dtype=dtype, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("value", _op.get_attr("value"), "dtype", _op._get_attr_type("dtype")) _inputs_flat = _op.inputs _execute.record_gradient( "Const", _inputs_flat, _attrs, _result) _result, = _result return _result Const = tf_export("raw_ops.Const")(_ops.to_raw_op(const)) def const_eager_fallback(value, dtype, name, ctx): value = _execute.make_tensor(value, "value") dtype = _execute.make_type(dtype, "dtype") _inputs_flat = [] _attrs = ("value", value, "dtype", dtype) _result = _execute.execute(b"Const", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Const", _inputs_flat, _attrs, _result) _result, = _result return _result def debug_gradient_identity(input, name=None): r"""Identity op for gradient debugging. This op is hidden from public in Python. It is used by TensorFlow Debugger to register gradient tensors for gradient debugging. This op operates on non-reference-type tensors. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "DebugGradientIdentity", name, tld.op_callbacks, input) return _result except _core._FallbackException: try: return debug_gradient_identity_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "DebugGradientIdentity", input=input, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "DebugGradientIdentity", _inputs_flat, _attrs, _result) _result, = _result return _result DebugGradientIdentity = tf_export("raw_ops.DebugGradientIdentity")(_ops.to_raw_op(debug_gradient_identity)) def debug_gradient_identity_eager_fallback(input, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"DebugGradientIdentity", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "DebugGradientIdentity", _inputs_flat, _attrs, _result) _result, = _result return _result def debug_gradient_ref_identity(input, name=None): r"""Identity op for gradient debugging. This op is hidden from public in Python. It is used by TensorFlow Debugger to register gradient tensors for gradient debugging. This op operates on reference-type tensors. Args: input: A mutable `Tensor`. name: A name for the operation (optional). Returns: A mutable `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: raise RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.") # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "DebugGradientRefIdentity", input=input, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "DebugGradientRefIdentity", _inputs_flat, _attrs, _result) _result, = _result return _result DebugGradientRefIdentity = tf_export("raw_ops.DebugGradientRefIdentity")(_ops.to_raw_op(debug_gradient_ref_identity)) def debug_gradient_ref_identity_eager_fallback(input, name, ctx): raise RuntimeError("debug_gradient_ref_identity op does not support eager execution. Arg 'output' is a ref.") def deep_copy(x, name=None): r"""Makes a copy of `x`. Args: x: A `Tensor`. The source tensor of type `T`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "DeepCopy", name, tld.op_callbacks, x) return _result except _core._FallbackException: try: return deep_copy_eager_fallback( x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "DeepCopy", x=x, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "DeepCopy", _inputs_flat, _attrs, _result) _result, = _result return _result DeepCopy = tf_export("raw_ops.DeepCopy")(_ops.to_raw_op(deep_copy)) def deep_copy_eager_fallback(x, name, ctx): _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"DeepCopy", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "DeepCopy", _inputs_flat, _attrs, _result) _result, = _result return _result def depth_to_space(input, block_size, data_format="NHWC", name=None): r"""DepthToSpace for tensors of type T. Rearranges data from depth into blocks of spatial data. This is the reverse transformation of SpaceToDepth. More specifically, this op outputs a copy of the input tensor where values from the `depth` dimension are moved in spatial blocks to the `height` and `width` dimensions. The attr `block_size` indicates the input block size and how the data is moved. * Chunks of data of size `block_size * block_size` from depth are rearranged into non-overlapping blocks of size `block_size x block_size` * The width the output tensor is `input_depth * block_size`, whereas the height is `input_height * block_size`. * The Y, X coordinates within each block of the output image are determined by the high order component of the input channel index. * The depth of the input tensor must be divisible by `block_size * block_size`. The `data_format` attr specifies the layout of the input and output tensors with the following options: "NHWC": `[ batch, height, width, channels ]` "NCHW": `[ batch, channels, height, width ]` "NCHW_VECT_C": `qint8 [ batch, channels / 4, height, width, 4 ]` It is useful to consider the operation as transforming a 6-D Tensor. e.g. for data_format = NHWC, Each element in the input tensor can be specified via 6 coordinates, ordered by decreasing memory layout significance as: n,iY,iX,bY,bX,oC (where n=batch index, iX, iY means X or Y coordinates within the input image, bX, bY means coordinates within the output block, oC means output channels). The output would be the input transposed to the following layout: n,iY,bY,iX,bX,oC This operation is useful for resizing the activations between convolutions (but keeping all data), e.g. instead of pooling. It is also useful for training purely convolutional models. For example, given an input of shape `[1, 1, 1, 4]`, data_format = "NHWC" and block_size = 2: ``` x = [[[[1, 2, 3, 4]]]] ``` This operation will output a tensor of shape `[1, 2, 2, 1]`: ``` [[[[1], [2]], [[3], [4]]]] ``` Here, the input has a batch of 1 and each batch element has shape `[1, 1, 4]`, the corresponding output will have 2x2 elements and will have a depth of 1 channel (1 = `4 / (block_size * block_size)`). The output element shape is `[2, 2, 1]`. For an input tensor with larger depth, here of shape `[1, 1, 1, 12]`, e.g. ``` x = [[[[1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]]]] ``` This operation, for block size of 2, will return the following tensor of shape `[1, 2, 2, 3]` ``` [[[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]] ``` Similarly, for the following input of shape `[1 2 2 4]`, and a block size of 2: ``` x = [[[[1, 2, 3, 4], [5, 6, 7, 8]], [[9, 10, 11, 12], [13, 14, 15, 16]]]] ``` the operator will return the following tensor of shape `[1 4 4 1]`: ``` x = [[[ [1], [2], [5], [6]], [ [3], [4], [7], [8]], [ [9], [10], [13], [14]], [ [11], [12], [15], [16]]]] ``` Args: input: A `Tensor`. block_size: An `int` that is `>= 2`. The size of the spatial block, same as in Space2Depth. data_format: An optional `string` from: `"NHWC", "NCHW", "NCHW_VECT_C"`. Defaults to `"NHWC"`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "DepthToSpace", name, tld.op_callbacks, input, "block_size", block_size, "data_format", data_format) return _result except _core._FallbackException: try: return depth_to_space_eager_fallback( input, block_size=block_size, data_format=data_format, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. block_size = _execute.make_int(block_size, "block_size") if data_format is None: data_format = "NHWC" data_format = _execute.make_str(data_format, "data_format") _, _, _op, _outputs = _op_def_library._apply_op_helper( "DepthToSpace", input=input, block_size=block_size, data_format=data_format, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "block_size", _op._get_attr_int("block_size"), "data_format", _op.get_attr("data_format")) _inputs_flat = _op.inputs _execute.record_gradient( "DepthToSpace", _inputs_flat, _attrs, _result) _result, = _result return _result DepthToSpace = tf_export("raw_ops.DepthToSpace")(_ops.to_raw_op(depth_to_space)) def depth_to_space_eager_fallback(input, block_size, data_format, name, ctx): block_size = _execute.make_int(block_size, "block_size") if data_format is None: data_format = "NHWC" data_format = _execute.make_str(data_format, "data_format") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "block_size", block_size, "data_format", data_format) _result = _execute.execute(b"DepthToSpace", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "DepthToSpace", _inputs_flat, _attrs, _result) _result, = _result return _result def dequantize(input, min_range, max_range, mode="MIN_COMBINED", narrow_range=False, axis=-1, dtype=_dtypes.float32, name=None): r"""Dequantize the 'input' tensor into a float or bfloat16 Tensor. [min_range, max_range] are scalar floats that specify the range for the output. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents. In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: ``` if T == qint8: in[i] += (range(T) + 1)/ 2.0 out[i] = min_range + (in[i]* (max_range - min_range) / range(T)) ``` here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` *MIN_COMBINED Mode Example* If the input comes from a QuantizedRelu6, the output type is quint8 (range of 0-255) but the possible range of QuantizedRelu6 is 0-6. The min_range and max_range values are therefore 0.0 and 6.0. Dequantize on quint8 will take each value, cast to float, and multiply by 6 / 255. Note that if quantizedtype is qint8, the operation will additionally add each value by 128 prior to casting. If the mode is 'MIN_FIRST', then this approach is used: ```c++ num_discrete_values = 1 << (# of bits in T) range_adjust = num_discrete_values / (num_discrete_values - 1) range = (range_max - range_min) * range_adjust range_scale = range / num_discrete_values const double offset_input = static_cast<double>(input) - lowest_quantized; result = range_min + ((input - numeric_limits<T>::min()) * range_scale) ``` If the mode is `SCALED`, dequantization is performed by multiplying each input value by a scaling_factor. (Thus an input of 0 always maps to 0.0). The scaling_factor is determined from `min_range`, `max_range`, and `narrow_range` in a way that is compatible with `QuantizeAndDequantize{V2|V3}` and `QuantizeV2`, using the following algorithm: ```c++ const int min_expected_T = std::numeric_limits<T>::min() + (narrow_range ? 1 : 0); const int max_expected_T = std::numeric_limits<T>::max(); const float max_expected_T = std::numeric_limits<float>::max(); const float scale_factor = (std::numeric_limits<T>::min() == 0) ? (max_range / max_expected_T) : std::max(min_range / min_expected_T, max_range / max_expected_T); ``` Args: input: A `Tensor`. Must be one of the following types: `qint8`, `quint8`, `qint32`, `qint16`, `quint16`. min_range: A `Tensor` of type `float32`. The minimum scalar value possibly produced for the input. max_range: A `Tensor` of type `float32`. The maximum scalar value possibly produced for the input. mode: An optional `string` from: `"MIN_COMBINED", "MIN_FIRST", "SCALED"`. Defaults to `"MIN_COMBINED"`. narrow_range: An optional `bool`. Defaults to `False`. axis: An optional `int`. Defaults to `-1`. dtype: An optional `tf.DType` from: `tf.bfloat16, tf.float32`. Defaults to `tf.float32`. Type of the output tensor. Currently Dequantize supports float and bfloat16. If 'dtype' is 'bfloat16', it only supports 'MIN_COMBINED' mode. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Dequantize", name, tld.op_callbacks, input, min_range, max_range, "mode", mode, "narrow_range", narrow_range, "axis", axis, "dtype", dtype) return _result except _core._FallbackException: try: return dequantize_eager_fallback( input, min_range, max_range, mode=mode, narrow_range=narrow_range, axis=axis, dtype=dtype, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if mode is None: mode = "MIN_COMBINED" mode = _execute.make_str(mode, "mode") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") if dtype is None: dtype = _dtypes.float32 dtype = _execute.make_type(dtype, "dtype") _, _, _op, _outputs = _op_def_library._apply_op_helper( "Dequantize", input=input, min_range=min_range, max_range=max_range, mode=mode, narrow_range=narrow_range, axis=axis, dtype=dtype, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "mode", _op.get_attr("mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis"), "dtype", _op._get_attr_type("dtype")) _inputs_flat = _op.inputs _execute.record_gradient( "Dequantize", _inputs_flat, _attrs, _result) _result, = _result return _result Dequantize = tf_export("raw_ops.Dequantize")(_ops.to_raw_op(dequantize)) def dequantize_eager_fallback(input, min_range, max_range, mode, narrow_range, axis, dtype, name, ctx): if mode is None: mode = "MIN_COMBINED" mode = _execute.make_str(mode, "mode") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") if dtype is None: dtype = _dtypes.float32 dtype = _execute.make_type(dtype, "dtype") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) min_range = _ops.convert_to_tensor(min_range, _dtypes.float32) max_range = _ops.convert_to_tensor(max_range, _dtypes.float32) _inputs_flat = [input, min_range, max_range] _attrs = ("T", _attr_T, "mode", mode, "narrow_range", narrow_range, "axis", axis, "dtype", dtype) _result = _execute.execute(b"Dequantize", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Dequantize", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.tensor_diag', v1=['linalg.tensor_diag', 'diag']) @deprecated_endpoints('diag') def diag(diagonal, name=None): r"""Returns a diagonal tensor with a given diagonal values. Given a `diagonal`, this operation returns a tensor with the `diagonal` and everything else padded with zeros. The diagonal is computed as follows: Assume `diagonal` has dimensions [D1,..., Dk], then the output is a tensor of rank 2k with dimensions [D1,..., Dk, D1,..., Dk] where: `output[i1,..., ik, i1,..., ik] = diagonal[i1, ..., ik]` and 0 everywhere else. For example: ``` # 'diagonal' is [1, 2, 3, 4] tf.diag(diagonal) ==> [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] ``` Args: diagonal: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. Rank k tensor where k is at most 1. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `diagonal`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Diag", name, tld.op_callbacks, diagonal) return _result except _core._FallbackException: try: return diag_eager_fallback( diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( diag, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "Diag", diagonal=diagonal, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( diag, diagonal=diagonal, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "Diag", _inputs_flat, _attrs, _result) _result, = _result return _result Diag = tf_export("raw_ops.Diag")(_ops.to_raw_op(diag)) def diag_eager_fallback(diagonal, name, ctx): _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], ctx) _inputs_flat = [diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"Diag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Diag", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.tensor_diag_part', v1=['linalg.tensor_diag_part', 'diag_part']) @deprecated_endpoints('diag_part') def diag_part(input, name=None): r"""Returns the diagonal part of the tensor. This operation returns a tensor with the `diagonal` part of the `input`. The `diagonal` part is computed as follows: Assume `input` has dimensions `[D1,..., Dk, D1,..., Dk]`, then the output is a tensor of rank `k` with dimensions `[D1,..., Dk]` where: `diagonal[i1,..., ik] = input[i1, ..., ik, i1,..., ik]`. For example: ``` # 'input' is [[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]] tf.diag_part(input) ==> [1, 2, 3, 4] ``` Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`. Rank k tensor where k is even and not zero. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "DiagPart", name, tld.op_callbacks, input) return _result except _core._FallbackException: try: return diag_part_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( diag_part, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "DiagPart", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( diag_part, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "DiagPart", _inputs_flat, _attrs, _result) _result, = _result return _result DiagPart = tf_export("raw_ops.DiagPart")(_ops.to_raw_op(diag_part)) def diag_part_eager_fallback(input, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"DiagPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "DiagPart", _inputs_flat, _attrs, _result) _result, = _result return _result def edit_distance(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=True, name=None): r"""Computes the (possibly normalized) Levenshtein Edit Distance. The inputs are variable-length sequences provided by SparseTensors (hypothesis_indices, hypothesis_values, hypothesis_shape) and (truth_indices, truth_values, truth_shape). The inputs are: Args: hypothesis_indices: A `Tensor` of type `int64`. The indices of the hypothesis list SparseTensor. This is an N x R int64 matrix. hypothesis_values: A `Tensor`. The values of the hypothesis list SparseTensor. This is an N-length vector. hypothesis_shape: A `Tensor` of type `int64`. The shape of the hypothesis list SparseTensor. This is an R-length vector. truth_indices: A `Tensor` of type `int64`. The indices of the truth list SparseTensor. This is an M x R int64 matrix. truth_values: A `Tensor`. Must have the same type as `hypothesis_values`. The values of the truth list SparseTensor. This is an M-length vector. truth_shape: A `Tensor` of type `int64`. truth indices, vector. normalize: An optional `bool`. Defaults to `True`. boolean (if true, edit distances are normalized by length of truth). The output is: name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "EditDistance", name, tld.op_callbacks, hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, "normalize", normalize) return _result except _core._FallbackException: try: return edit_distance_eager_fallback( hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize=normalize, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if normalize is None: normalize = True normalize = _execute.make_bool(normalize, "normalize") _, _, _op, _outputs = _op_def_library._apply_op_helper( "EditDistance", hypothesis_indices=hypothesis_indices, hypothesis_values=hypothesis_values, hypothesis_shape=hypothesis_shape, truth_indices=truth_indices, truth_values=truth_values, truth_shape=truth_shape, normalize=normalize, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("normalize", _op._get_attr_bool("normalize"), "T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "EditDistance", _inputs_flat, _attrs, _result) _result, = _result return _result EditDistance = tf_export("raw_ops.EditDistance")(_ops.to_raw_op(edit_distance)) def edit_distance_eager_fallback(hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape, normalize, name, ctx): if normalize is None: normalize = True normalize = _execute.make_bool(normalize, "normalize") _attr_T, _inputs_T = _execute.args_to_matching_eager([hypothesis_values, truth_values], ctx) (hypothesis_values, truth_values) = _inputs_T hypothesis_indices = _ops.convert_to_tensor(hypothesis_indices, _dtypes.int64) hypothesis_shape = _ops.convert_to_tensor(hypothesis_shape, _dtypes.int64) truth_indices = _ops.convert_to_tensor(truth_indices, _dtypes.int64) truth_shape = _ops.convert_to_tensor(truth_shape, _dtypes.int64) _inputs_flat = [hypothesis_indices, hypothesis_values, hypothesis_shape, truth_indices, truth_values, truth_shape] _attrs = ("normalize", normalize, "T", _attr_T) _result = _execute.execute(b"EditDistance", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "EditDistance", _inputs_flat, _attrs, _result) _result, = _result return _result def empty(shape, dtype, init=False, name=None): r"""Creates a tensor with the given shape. This operation creates a tensor of `shape` and `dtype`. Args: shape: A `Tensor` of type `int32`. 1-D. Represents the shape of the output tensor. dtype: A `tf.DType`. init: An optional `bool`. Defaults to `False`. If True, initialize the returned tensor with the default value of dtype. Otherwise, the implementation is free not to initializethe tensor's content. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Empty", name, tld.op_callbacks, shape, "dtype", dtype, "init", init) return _result except _core._FallbackException: try: return empty_eager_fallback( shape, dtype=dtype, init=init, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") if init is None: init = False init = _execute.make_bool(init, "init") _, _, _op, _outputs = _op_def_library._apply_op_helper( "Empty", shape=shape, dtype=dtype, init=init, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("dtype", _op._get_attr_type("dtype"), "init", _op._get_attr_bool("init")) _inputs_flat = _op.inputs _execute.record_gradient( "Empty", _inputs_flat, _attrs, _result) _result, = _result return _result Empty = tf_export("raw_ops.Empty")(_ops.to_raw_op(empty)) def empty_eager_fallback(shape, dtype, init, name, ctx): dtype = _execute.make_type(dtype, "dtype") if init is None: init = False init = _execute.make_bool(init, "init") shape = _ops.convert_to_tensor(shape, _dtypes.int32) _inputs_flat = [shape] _attrs = ("dtype", dtype, "init", init) _result = _execute.execute(b"Empty", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Empty", _inputs_flat, _attrs, _result) _result, = _result return _result def ensure_shape(input, shape, name=None): r"""Ensures that the tensor's shape matches the expected shape. Raises an error if the input tensor's shape does not match the specified shape. Returns the input tensor otherwise. Args: input: A `Tensor`. A tensor, whose shape is to be validated. shape: A `tf.TensorShape` or list of `ints`. The expected (possibly partially specified) shape of the input tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "EnsureShape", name, tld.op_callbacks, input, "shape", shape) return _result except _core._FallbackException: try: return ensure_shape_eager_fallback( input, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. shape = _execute.make_shape(shape, "shape") _, _, _op, _outputs = _op_def_library._apply_op_helper( "EnsureShape", input=input, shape=shape, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("shape", _op.get_attr("shape"), "T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "EnsureShape", _inputs_flat, _attrs, _result) _result, = _result return _result EnsureShape = tf_export("raw_ops.EnsureShape")(_ops.to_raw_op(ensure_shape)) def ensure_shape_eager_fallback(input, shape, name, ctx): shape = _execute.make_shape(shape, "shape") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("shape", shape, "T", _attr_T) _result = _execute.execute(b"EnsureShape", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "EnsureShape", _inputs_flat, _attrs, _result) _result, = _result return _result def expand_dims(input, axis, name=None): r"""Inserts a dimension of 1 into a tensor's shape. Given a tensor `input`, this operation inserts a dimension of 1 at the dimension index `axis` of `input`'s shape. The dimension index `axis` starts at zero; if you specify a negative number for `axis` it is counted backward from the end. This operation is useful if you want to add a batch dimension to a single element. For example, if you have a single image of shape `[height, width, channels]`, you can make it a batch of 1 image with `expand_dims(image, 0)`, which will make the shape `[1, height, width, channels]`. Other examples: ``` # 't' is a tensor of shape [2] shape(expand_dims(t, 0)) ==> [1, 2] shape(expand_dims(t, 1)) ==> [2, 1] shape(expand_dims(t, -1)) ==> [2, 1] # 't2' is a tensor of shape [2, 3, 5] shape(expand_dims(t2, 0)) ==> [1, 2, 3, 5] shape(expand_dims(t2, 2)) ==> [2, 3, 1, 5] shape(expand_dims(t2, 3)) ==> [2, 3, 5, 1] ``` This operation requires that: `-1-input.dims() <= dim <= input.dims()` This operation is related to `squeeze()`, which removes dimensions of size 1. Args: input: A `Tensor`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D (scalar). Specifies the dimension index at which to expand the shape of `input`. Must be in the range `[-rank(input) - 1, rank(input)]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ExpandDims", name, tld.op_callbacks, input, axis) return _result except _core._FallbackException: try: return expand_dims_eager_fallback( input, axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "ExpandDims", input=input, dim=axis, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tdim", _op._get_attr_type("Tdim")) _inputs_flat = _op.inputs _execute.record_gradient( "ExpandDims", _inputs_flat, _attrs, _result) _result, = _result return _result ExpandDims = tf_export("raw_ops.ExpandDims")(_ops.to_raw_op(expand_dims)) def expand_dims_eager_fallback(input, axis, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _attr_Tdim, (axis,) = _execute.args_to_matching_eager([axis], ctx, _dtypes.int32) _inputs_flat = [input, axis] _attrs = ("T", _attr_T, "Tdim", _attr_Tdim) _result = _execute.execute(b"ExpandDims", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ExpandDims", _inputs_flat, _attrs, _result) _result, = _result return _result def extract_image_patches(images, ksizes, strides, rates, padding, name=None): r"""Extract `patches` from `images` and put them in the "depth" output dimension. Args: images: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `int16`, `int32`, `int64`, `uint8`, `uint16`, `uint32`, `uint64`, `complex64`, `complex128`, `bool`. 4-D Tensor with shape `[batch, in_rows, in_cols, depth]`. ksizes: A list of `ints` that has length `>= 4`. The size of the sliding window for each dimension of `images`. strides: A list of `ints` that has length `>= 4`. How far the centers of two consecutive patches are in the images. Must be: `[1, stride_rows, stride_cols, 1]`. rates: A list of `ints` that has length `>= 4`. Must be: `[1, rate_rows, rate_cols, 1]`. This is the input stride, specifying how far two consecutive patch samples are in the input. Equivalent to extracting patches with `patch_sizes_eff = patch_sizes + (patch_sizes - 1) * (rates - 1)`, followed by subsampling them spatially by a factor of `rates`. This is equivalent to `rate` in dilated (a.k.a. Atrous) convolutions. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `images`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ExtractImagePatches", name, tld.op_callbacks, images, "ksizes", ksizes, "strides", strides, "rates", rates, "padding", padding) return _result except _core._FallbackException: try: return extract_image_patches_eager_fallback( images, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_image_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_image_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] if not isinstance(rates, (list, tuple)): raise TypeError( "Expected list for 'rates' argument to " "'extract_image_patches' Op, not %r." % rates) rates = [_execute.make_int(_i, "rates") for _i in rates] padding = _execute.make_str(padding, "padding") _, _, _op, _outputs = _op_def_library._apply_op_helper( "ExtractImagePatches", images=images, ksizes=ksizes, strides=strides, rates=rates, padding=padding, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "rates", _op.get_attr("rates"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding")) _inputs_flat = _op.inputs _execute.record_gradient( "ExtractImagePatches", _inputs_flat, _attrs, _result) _result, = _result return _result ExtractImagePatches = tf_export("raw_ops.ExtractImagePatches")(_ops.to_raw_op(extract_image_patches)) def extract_image_patches_eager_fallback(images, ksizes, strides, rates, padding, name, ctx): if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_image_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_image_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] if not isinstance(rates, (list, tuple)): raise TypeError( "Expected list for 'rates' argument to " "'extract_image_patches' Op, not %r." % rates) rates = [_execute.make_int(_i, "rates") for _i in rates] padding = _execute.make_str(padding, "padding") _attr_T, (images,) = _execute.args_to_matching_eager([images], ctx) _inputs_flat = [images] _attrs = ("ksizes", ksizes, "strides", strides, "rates", rates, "T", _attr_T, "padding", padding) _result = _execute.execute(b"ExtractImagePatches", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ExtractImagePatches", _inputs_flat, _attrs, _result) _result, = _result return _result
[文档]@_dispatch.add_dispatch_list @tf_export('extract_volume_patches') def extract_volume_patches(input, ksizes, strides, padding, name=None): r"""Extract `patches` from `input` and put them in the "depth" output dimension. 3D extension of `extract_image_patches`. Args: input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`. 5-D Tensor with shape `[batch, in_planes, in_rows, in_cols, depth]`. ksizes: A list of `ints` that has length `>= 5`. The size of the sliding window for each dimension of `input`. strides: A list of `ints` that has length `>= 5`. 1-D of length 5. How far the centers of two consecutive patches are in `input`. Must be: `[1, stride_planes, stride_rows, stride_cols, 1]`. padding: A `string` from: `"SAME", "VALID"`. The type of padding algorithm to use. We specify the size-related attributes as: ```python ksizes = [1, ksize_planes, ksize_rows, ksize_cols, 1] strides = [1, stride_planes, strides_rows, strides_cols, 1] ``` name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ExtractVolumePatches", name, tld.op_callbacks, input, "ksizes", ksizes, "strides", strides, "padding", padding) return _result except _core._FallbackException: try: return extract_volume_patches_eager_fallback( input, ksizes=ksizes, strides=strides, padding=padding, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( extract_volume_patches, input=input, ksizes=ksizes, strides=strides, padding=padding, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_volume_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_volume_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] padding = _execute.make_str(padding, "padding") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "ExtractVolumePatches", input=input, ksizes=ksizes, strides=strides, padding=padding, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( extract_volume_patches, input=input, ksizes=ksizes, strides=strides, padding=padding, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("ksizes", _op.get_attr("ksizes"), "strides", _op.get_attr("strides"), "T", _op._get_attr_type("T"), "padding", _op.get_attr("padding")) _inputs_flat = _op.inputs _execute.record_gradient( "ExtractVolumePatches", _inputs_flat, _attrs, _result) _result, = _result return _result
ExtractVolumePatches = tf_export("raw_ops.ExtractVolumePatches")(_ops.to_raw_op(extract_volume_patches)) def extract_volume_patches_eager_fallback(input, ksizes, strides, padding, name, ctx): if not isinstance(ksizes, (list, tuple)): raise TypeError( "Expected list for 'ksizes' argument to " "'extract_volume_patches' Op, not %r." % ksizes) ksizes = [_execute.make_int(_i, "ksizes") for _i in ksizes] if not isinstance(strides, (list, tuple)): raise TypeError( "Expected list for 'strides' argument to " "'extract_volume_patches' Op, not %r." % strides) strides = [_execute.make_int(_i, "strides") for _i in strides] padding = _execute.make_str(padding, "padding") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("ksizes", ksizes, "strides", strides, "T", _attr_T, "padding", padding) _result = _execute.execute(b"ExtractVolumePatches", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ExtractVolumePatches", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_args', v1=['quantization.fake_quant_with_min_max_args', 'fake_quant_with_min_max_args']) @deprecated_endpoints('fake_quant_with_min_max_args') def fake_quant_with_min_max_args(inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None): r"""Fake-quantize the 'inputs' tensor, type float to 'outputs' tensor of same type. Attributes `[min; max]` define the clamping range for the `inputs` data. `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. Before quantization, `min` and `max` values are adjusted with the following logic. It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, the behavior can be unexpected: If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. Quantization is called fake since the output is still in floating point. Args: inputs: A `Tensor` of type `float32`. min: An optional `float`. Defaults to `-6`. max: An optional `float`. Defaults to `6`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "FakeQuantWithMinMaxArgs", name, tld.op_callbacks, inputs, "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_args_eager_fallback( inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "FakeQuantWithMinMaxArgs", inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range")) _inputs_flat = _op.inputs _execute.record_gradient( "FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result) _result, = _result return _result FakeQuantWithMinMaxArgs = tf_export("raw_ops.FakeQuantWithMinMaxArgs")(_ops.to_raw_op(fake_quant_with_min_max_args)) def fake_quant_with_min_max_args_eager_fallback(inputs, min, max, num_bits, narrow_range, name, ctx): if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) _inputs_flat = [inputs] _attrs = ("min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxArgs", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "FakeQuantWithMinMaxArgs", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_args_gradient', v1=['quantization.fake_quant_with_min_max_args_gradient', 'fake_quant_with_min_max_args_gradient']) @deprecated_endpoints('fake_quant_with_min_max_args_gradient') def fake_quant_with_min_max_args_gradient(gradients, inputs, min=-6, max=6, num_bits=8, narrow_range=False, name=None): r"""Compute gradients for a FakeQuantWithMinMaxArgs operation. Args: gradients: A `Tensor` of type `float32`. Backpropagated gradients above the FakeQuantWithMinMaxArgs operation. inputs: A `Tensor` of type `float32`. Values passed as inputs to the FakeQuantWithMinMaxArgs operation. min: An optional `float`. Defaults to `-6`. max: An optional `float`. Defaults to `6`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "FakeQuantWithMinMaxArgsGradient", name, tld.op_callbacks, gradients, inputs, "min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_args_gradient_eager_fallback( gradients, inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "FakeQuantWithMinMaxArgsGradient", gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_args_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("min", _op.get_attr("min"), "max", _op.get_attr("max"), "num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range")) _inputs_flat = _op.inputs _execute.record_gradient( "FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result) _result, = _result return _result FakeQuantWithMinMaxArgsGradient = tf_export("raw_ops.FakeQuantWithMinMaxArgsGradient")(_ops.to_raw_op(fake_quant_with_min_max_args_gradient)) def fake_quant_with_min_max_args_gradient_eager_fallback(gradients, inputs, min, max, num_bits, narrow_range, name, ctx): if min is None: min = -6 min = _execute.make_float(min, "min") if max is None: max = 6 max = _execute.make_float(max, "max") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) _inputs_flat = [gradients, inputs] _attrs = ("min", min, "max", max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxArgsGradient", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "FakeQuantWithMinMaxArgsGradient", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars', v1=['quantization.fake_quant_with_min_max_vars', 'fake_quant_with_min_max_vars']) @deprecated_endpoints('fake_quant_with_min_max_vars') def fake_quant_with_min_max_vars(inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Fake-quantize the 'inputs' tensor of type float via global float scalars `min` and `max` to 'outputs' tensor of same shape as `inputs`. `[min; max]` define the clamping range for the `inputs` data. `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. Before quantization, `min` and `max` values are adjusted with the following logic. It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, the behavior can be unexpected: If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. This operation has a gradient and thus allows for training `min` and `max` values. Args: inputs: A `Tensor` of type `float32`. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "FakeQuantWithMinMaxVars", name, tld.op_callbacks, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_eager_fallback( inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "FakeQuantWithMinMaxVars", inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range")) _inputs_flat = _op.inputs _execute.record_gradient( "FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result) _result, = _result return _result FakeQuantWithMinMaxVars = tf_export("raw_ops.FakeQuantWithMinMaxVars")(_ops.to_raw_op(fake_quant_with_min_max_vars)) def fake_quant_with_min_max_vars_eager_fallback(inputs, min, max, num_bits, narrow_range, name, ctx): if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVars", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "FakeQuantWithMinMaxVars", _inputs_flat, _attrs, _result) _result, = _result return _result _FakeQuantWithMinMaxVarsGradientOutput = collections.namedtuple( "FakeQuantWithMinMaxVarsGradient", ["backprops_wrt_input", "backprop_wrt_min", "backprop_wrt_max"]) @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars_gradient', v1=['quantization.fake_quant_with_min_max_vars_gradient', 'fake_quant_with_min_max_vars_gradient']) @deprecated_endpoints('fake_quant_with_min_max_vars_gradient') def fake_quant_with_min_max_vars_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Compute gradients for a FakeQuantWithMinMaxVars operation. Args: gradients: A `Tensor` of type `float32`. Backpropagated gradients above the FakeQuantWithMinMaxVars operation. inputs: A `Tensor` of type `float32`. Values passed as inputs to the FakeQuantWithMinMaxVars operation. min, max: Quantization interval, scalar floats. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. The bitwidth of the quantization; between 2 and 8, inclusive. narrow_range: An optional `bool`. Defaults to `False`. Whether to quantize into 2^num_bits - 1 distinct values. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max). backprops_wrt_input: A `Tensor` of type `float32`. backprop_wrt_min: A `Tensor` of type `float32`. backprop_wrt_max: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "FakeQuantWithMinMaxVarsGradient", name, tld.op_callbacks, gradients, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_gradient_eager_fallback( gradients, inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "FakeQuantWithMinMaxVarsGradient", gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range")) _inputs_flat = _op.inputs _execute.record_gradient( "FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result) _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result) return _result FakeQuantWithMinMaxVarsGradient = tf_export("raw_ops.FakeQuantWithMinMaxVarsGradient")(_ops.to_raw_op(fake_quant_with_min_max_vars_gradient)) def fake_quant_with_min_max_vars_gradient_eager_fallback(gradients, inputs, min, max, num_bits, narrow_range, name, ctx): if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [gradients, inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVarsGradient", 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "FakeQuantWithMinMaxVarsGradient", _inputs_flat, _attrs, _result) _result = _FakeQuantWithMinMaxVarsGradientOutput._make(_result) return _result @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars_per_channel', v1=['quantization.fake_quant_with_min_max_vars_per_channel', 'fake_quant_with_min_max_vars_per_channel']) @deprecated_endpoints('fake_quant_with_min_max_vars_per_channel') def fake_quant_with_min_max_vars_per_channel(inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Fake-quantize the 'inputs' tensor of type float and one of the shapes: `[d]`, `[b, d]` `[b, h, w, d]` via per-channel floats `min` and `max` of shape `[d]` to 'outputs' tensor of same shape as `inputs`. `[min; max]` define the clamping range for the `inputs` data. `inputs` values are quantized into the quantization range (`[0; 2^num_bits - 1]` when `narrow_range` is false and `[1; 2^num_bits - 1]` when it is true) and then de-quantized and output as floats in `[min; max]` interval. `num_bits` is the bitwidth of the quantization; between 2 and 16, inclusive. Before quantization, `min` and `max` values are adjusted with the following logic. It is suggested to have `min <= 0 <= max`. If `0` is not in the range of values, the behavior can be unexpected: If `0 < min < max`: `min_adj = 0` and `max_adj = max - min`. If `min < max < 0`: `min_adj = min - max` and `max_adj = 0`. If `min <= 0 <= max`: `scale = (max - min) / (2^num_bits - 1) `, `min_adj = scale * round(min / scale)` and `max_adj = max + min_adj - min`. This operation has a gradient and thus allows for training `min` and `max` values. Args: inputs: A `Tensor` of type `float32`. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. narrow_range: An optional `bool`. Defaults to `False`. name: A name for the operation (optional). Returns: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "FakeQuantWithMinMaxVarsPerChannel", name, tld.op_callbacks, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_per_channel_eager_fallback( inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "FakeQuantWithMinMaxVarsPerChannel", inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range")) _inputs_flat = _op.inputs _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result) _result, = _result return _result FakeQuantWithMinMaxVarsPerChannel = tf_export("raw_ops.FakeQuantWithMinMaxVarsPerChannel")(_ops.to_raw_op(fake_quant_with_min_max_vars_per_channel)) def fake_quant_with_min_max_vars_per_channel_eager_fallback(inputs, min, max, num_bits, narrow_range, name, ctx): if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannel", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannel", _inputs_flat, _attrs, _result) _result, = _result return _result _FakeQuantWithMinMaxVarsPerChannelGradientOutput = collections.namedtuple( "FakeQuantWithMinMaxVarsPerChannelGradient", ["backprops_wrt_input", "backprop_wrt_min", "backprop_wrt_max"]) @_dispatch.add_dispatch_list @tf_export('quantization.fake_quant_with_min_max_vars_per_channel_gradient', v1=['quantization.fake_quant_with_min_max_vars_per_channel_gradient', 'fake_quant_with_min_max_vars_per_channel_gradient']) @deprecated_endpoints('fake_quant_with_min_max_vars_per_channel_gradient') def fake_quant_with_min_max_vars_per_channel_gradient(gradients, inputs, min, max, num_bits=8, narrow_range=False, name=None): r"""Compute gradients for a FakeQuantWithMinMaxVarsPerChannel operation. Args: gradients: A `Tensor` of type `float32`. Backpropagated gradients above the FakeQuantWithMinMaxVars operation, shape one of: `[d]`, `[b, d]`, `[b, h, w, d]`. inputs: A `Tensor` of type `float32`. Values passed as inputs to the FakeQuantWithMinMaxVars operation, shape same as `gradients`. min, max: Quantization interval, floats of shape `[d]`. min: A `Tensor` of type `float32`. max: A `Tensor` of type `float32`. num_bits: An optional `int`. Defaults to `8`. The bitwidth of the quantization; between 2 and 16, inclusive. narrow_range: An optional `bool`. Defaults to `False`. Whether to quantize into 2^num_bits - 1 distinct values. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (backprops_wrt_input, backprop_wrt_min, backprop_wrt_max). backprops_wrt_input: A `Tensor` of type `float32`. backprop_wrt_min: A `Tensor` of type `float32`. backprop_wrt_max: A `Tensor` of type `float32`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "FakeQuantWithMinMaxVarsPerChannelGradient", name, tld.op_callbacks, gradients, inputs, min, max, "num_bits", num_bits, "narrow_range", narrow_range) _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result) return _result except _core._FallbackException: try: return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback( gradients, inputs, min, max, num_bits=num_bits, narrow_range=narrow_range, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "FakeQuantWithMinMaxVarsPerChannelGradient", gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( fake_quant_with_min_max_vars_per_channel_gradient, gradients=gradients, inputs=inputs, min=min, max=max, num_bits=num_bits, narrow_range=narrow_range, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("num_bits", _op._get_attr_int("num_bits"), "narrow_range", _op._get_attr_bool("narrow_range")) _inputs_flat = _op.inputs _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result) _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result) return _result FakeQuantWithMinMaxVarsPerChannelGradient = tf_export("raw_ops.FakeQuantWithMinMaxVarsPerChannelGradient")(_ops.to_raw_op(fake_quant_with_min_max_vars_per_channel_gradient)) def fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback(gradients, inputs, min, max, num_bits, narrow_range, name, ctx): if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") gradients = _ops.convert_to_tensor(gradients, _dtypes.float32) inputs = _ops.convert_to_tensor(inputs, _dtypes.float32) min = _ops.convert_to_tensor(min, _dtypes.float32) max = _ops.convert_to_tensor(max, _dtypes.float32) _inputs_flat = [gradients, inputs, min, max] _attrs = ("num_bits", num_bits, "narrow_range", narrow_range) _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannelGradient", 3, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "FakeQuantWithMinMaxVarsPerChannelGradient", _inputs_flat, _attrs, _result) _result = _FakeQuantWithMinMaxVarsPerChannelGradientOutput._make(_result) return _result def fill(dims, value, name=None): r"""Creates a tensor filled with a scalar value. This operation creates a tensor of shape `dims` and fills it with `value`. For example: ``` # Output tensor has shape [2, 3]. fill([2, 3], 9) ==> [[9, 9, 9] [9, 9, 9]] ``` `tf.fill` differs from `tf.constant` in a few ways: * `tf.fill` only supports scalar contents, whereas `tf.constant` supports Tensor values. * `tf.fill` creates an Op in the computation graph that constructs the actual Tensor value at runtime. This is in contrast to `tf.constant` which embeds the entire Tensor into the graph with a `Const` node. * Because `tf.fill` evaluates at graph runtime, it supports dynamic shapes based on other runtime Tensors, unlike `tf.constant`. Args: dims: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D. Represents the shape of the output tensor. value: A `Tensor`. 0-D (scalar). Value to fill the returned tensor. @compatibility(numpy) Equivalent to np.full @end_compatibility name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `value`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Fill", name, tld.op_callbacks, dims, value) return _result except _core._FallbackException: try: return fill_eager_fallback( dims, value, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "Fill", dims=dims, value=value, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "index_type", _op._get_attr_type("index_type")) _inputs_flat = _op.inputs _execute.record_gradient( "Fill", _inputs_flat, _attrs, _result) _result, = _result return _result Fill = tf_export("raw_ops.Fill")(_ops.to_raw_op(fill)) def fill_eager_fallback(dims, value, name, ctx): _attr_T, (value,) = _execute.args_to_matching_eager([value], ctx) _attr_index_type, (dims,) = _execute.args_to_matching_eager([dims], ctx, _dtypes.int32) _inputs_flat = [dims, value] _attrs = ("T", _attr_T, "index_type", _attr_index_type) _result = _execute.execute(b"Fill", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Fill", _inputs_flat, _attrs, _result) _result, = _result return _result def fingerprint(data, method, name=None): r"""Generates fingerprint values. Generates fingerprint values of `data`. Fingerprint op considers the first dimension of `data` as the batch dimension, and `output[i]` contains the fingerprint value generated from contents in `data[i, ...]` for all `i`. Fingerprint op writes fingerprint values as byte arrays. For example, the default method `farmhash64` generates a 64-bit fingerprint value at a time. This 8-byte value is written out as an `uint8` array of size 8, in little-endian order. For example, suppose that `data` has data type `DT_INT32` and shape (2, 3, 4), and that the fingerprint method is `farmhash64`. In this case, the output shape is (2, 8), where 2 is the batch dimension size of `data`, and 8 is the size of each fingerprint value in bytes. `output[0, :]` is generated from 12 integers in `data[0, :, :]` and similarly `output[1, :]` is generated from other 12 integers in `data[1, :, :]`. Note that this op fingerprints the raw underlying buffer, and it does not fingerprint Tensor's metadata such as data type and/or shape. For example, the fingerprint values are invariant under reshapes and bitcasts as long as the batch dimension remain the same: ``` Fingerprint(data) == Fingerprint(Reshape(data, ...)) Fingerprint(data) == Fingerprint(Bitcast(data, ...)) ``` For string data, one should expect `Fingerprint(data) != Fingerprint(ReduceJoin(data))` in general. Args: data: A `Tensor`. Must have rank 1 or higher. method: A `Tensor` of type `string`. Fingerprint method used by this op. Currently available method is `farmhash::fingerprint64`. name: A name for the operation (optional). Returns: A `Tensor` of type `uint8`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Fingerprint", name, tld.op_callbacks, data, method) return _result except _core._FallbackException: try: return fingerprint_eager_fallback( data, method, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "Fingerprint", data=data, method=method, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "Fingerprint", _inputs_flat, _attrs, _result) _result, = _result return _result Fingerprint = tf_export("raw_ops.Fingerprint")(_ops.to_raw_op(fingerprint)) def fingerprint_eager_fallback(data, method, name, ctx): _attr_T, (data,) = _execute.args_to_matching_eager([data], ctx) method = _ops.convert_to_tensor(method, _dtypes.string) _inputs_flat = [data, method] _attrs = ("T", _attr_T) _result = _execute.execute(b"Fingerprint", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Fingerprint", _inputs_flat, _attrs, _result) _result, = _result return _result def gather(params, indices, validate_indices=True, name=None): r"""Gather slices from `params` according to `indices`. `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `indices.shape + params.shape[1:]` where: ```python # Scalar indices output[:, ..., :] = params[indices, :, ... :] # Vector indices output[i, :, ..., :] = params[indices[i], :, ... :] # Higher rank indices output[i, ..., j, :, ... :] = params[indices[i, ..., j], :, ..., :] ``` If `indices` is a permutation and `len(indices) == params.shape[0]` then this operation will permute `params` accordingly. `validate_indices`: DEPRECATED. If this operation is assigned to CPU, values in `indices` are always validated to be within range. If assigned to GPU, out-of-bound indices result in safe but unspecified behavior, which may include raising an error. <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> </div> Args: params: A `Tensor`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. validate_indices: An optional `bool`. Defaults to `True`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Gather", name, tld.op_callbacks, params, indices, "validate_indices", validate_indices) return _result except _core._FallbackException: try: return gather_eager_fallback( params, indices, validate_indices=validate_indices, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if validate_indices is None: validate_indices = True validate_indices = _execute.make_bool(validate_indices, "validate_indices") _, _, _op, _outputs = _op_def_library._apply_op_helper( "Gather", params=params, indices=indices, validate_indices=validate_indices, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("validate_indices", _op._get_attr_bool("validate_indices"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices")) _inputs_flat = _op.inputs _execute.record_gradient( "Gather", _inputs_flat, _attrs, _result) _result, = _result return _result Gather = tf_export("raw_ops.Gather")(_ops.to_raw_op(gather)) def gather_eager_fallback(params, indices, validate_indices, name, ctx): if validate_indices is None: validate_indices = True validate_indices = _execute.make_bool(validate_indices, "validate_indices") _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], ctx) _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx) _inputs_flat = [params, indices] _attrs = ("validate_indices", validate_indices, "Tparams", _attr_Tparams, "Tindices", _attr_Tindices) _result = _execute.execute(b"Gather", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Gather", _inputs_flat, _attrs, _result) _result, = _result return _result def gather_nd(params, indices, name=None): r"""Gather slices from `params` into a Tensor with shape specified by `indices`. `indices` is a K-dimensional integer tensor, best thought of as a (K-1)-dimensional tensor of indices into `params`, where each element defines a slice of `params`: output[\\(i_0, ..., i_{K-2}\\)] = params[indices[\\(i_0, ..., i_{K-2}\\)]] Whereas in `tf.gather` `indices` defines slices into the `axis` dimension of `params`, in `tf.gather_nd`, `indices` defines slices into the first `N` dimensions of `params`, where `N = indices.shape[-1]`. The last dimension of `indices` can be at most the rank of `params`: indices.shape[-1] <= params.rank The last dimension of `indices` corresponds to elements (if `indices.shape[-1] == params.rank`) or slices (if `indices.shape[-1] < params.rank`) along dimension `indices.shape[-1]` of `params`. The output tensor has shape indices.shape[:-1] + params.shape[indices.shape[-1]:] Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value. Some examples below. Simple indexing into a matrix: ```python indices = [[0, 0], [1, 1]] params = [['a', 'b'], ['c', 'd']] output = ['a', 'd'] ``` Slice indexing into a matrix: ```python indices = [[1], [0]] params = [['a', 'b'], ['c', 'd']] output = [['c', 'd'], ['a', 'b']] ``` Indexing into a 3-tensor: ```python indices = [[1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['a1', 'b1'], ['c1', 'd1']]] indices = [[0, 1], [1, 0]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['c0', 'd0'], ['a1', 'b1']] indices = [[0, 0, 1], [1, 0, 1]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = ['b0', 'b1'] ``` Batched indexing into a matrix: ```python indices = [[[0, 0]], [[0, 1]]] params = [['a', 'b'], ['c', 'd']] output = [['a'], ['b']] ``` Batched slice indexing into a matrix: ```python indices = [[[1]], [[0]]] params = [['a', 'b'], ['c', 'd']] output = [[['c', 'd']], [['a', 'b']]] ``` Batched indexing into a 3-tensor: ```python indices = [[[1]], [[0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[[['a1', 'b1'], ['c1', 'd1']]], [[['a0', 'b0'], ['c0', 'd0']]]] indices = [[[0, 1], [1, 0]], [[0, 0], [1, 1]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [[['c0', 'd0'], ['a1', 'b1']], [['a0', 'b0'], ['c1', 'd1']]] indices = [[[0, 0, 1], [1, 0, 1]], [[0, 1, 1], [1, 1, 0]]] params = [[['a0', 'b0'], ['c0', 'd0']], [['a1', 'b1'], ['c1', 'd1']]] output = [['b0', 'b1'], ['d0', 'c1']] ``` See also `tf.gather` and `tf.batch_gather`. Args: params: A `Tensor`. The tensor from which to gather values. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "GatherNd", name, tld.op_callbacks, params, indices) return _result except _core._FallbackException: try: return gather_nd_eager_fallback( params, indices, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "GatherNd", params=params, indices=indices, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices")) _inputs_flat = _op.inputs _execute.record_gradient( "GatherNd", _inputs_flat, _attrs, _result) _result, = _result return _result GatherNd = tf_export("raw_ops.GatherNd")(_ops.to_raw_op(gather_nd)) def gather_nd_eager_fallback(params, indices, name, ctx): _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], ctx) _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx) _inputs_flat = [params, indices] _attrs = ("Tparams", _attr_Tparams, "Tindices", _attr_Tindices) _result = _execute.execute(b"GatherNd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "GatherNd", _inputs_flat, _attrs, _result) _result, = _result return _result def gather_v2(params, indices, axis, batch_dims=0, name=None): r"""Gather slices from `params` axis `axis` according to `indices`. `indices` must be an integer tensor of any dimension (usually 0-D or 1-D). Produces an output tensor with shape `params.shape[:axis] + indices.shape + params.shape[axis + 1:]` where: ```python # Scalar indices (output is rank(params) - 1). output[a_0, ..., a_n, b_0, ..., b_n] = params[a_0, ..., a_n, indices, b_0, ..., b_n] # Vector indices (output is rank(params)). output[a_0, ..., a_n, i, b_0, ..., b_n] = params[a_0, ..., a_n, indices[i], b_0, ..., b_n] # Higher rank indices (output is rank(params) + rank(indices) - 1). output[a_0, ..., a_n, i, ..., j, b_0, ... b_n] = params[a_0, ..., a_n, indices[i, ..., j], b_0, ..., b_n] ``` <div style="width:70%; margin:auto; margin-bottom:10px; margin-top:20px;"> <img style="width:100%" src="https://www.tensorflow.org/images/Gather.png" alt> </div> Note that on CPU, if an out of bound index is found, an error is returned. On GPU, if an out of bound index is found, a 0 is stored in the corresponding output value. See also `tf.batch_gather` and `tf.gather_nd`. Args: params: A `Tensor`. The tensor from which to gather values. Must be at least rank `axis + 1`. indices: A `Tensor`. Must be one of the following types: `int32`, `int64`. Index tensor. Must be in range `[0, params.shape[axis])`. axis: A `Tensor`. Must be one of the following types: `int32`, `int64`. The axis in `params` to gather `indices` from. Defaults to the first dimension. Supports negative indexes. batch_dims: An optional `int`. Defaults to `0`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `params`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "GatherV2", name, tld.op_callbacks, params, indices, axis, "batch_dims", batch_dims) return _result except _core._FallbackException: try: return gather_v2_eager_fallback( params, indices, axis, batch_dims=batch_dims, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if batch_dims is None: batch_dims = 0 batch_dims = _execute.make_int(batch_dims, "batch_dims") _, _, _op, _outputs = _op_def_library._apply_op_helper( "GatherV2", params=params, indices=indices, axis=axis, batch_dims=batch_dims, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("batch_dims", _op._get_attr_int("batch_dims"), "Tparams", _op._get_attr_type("Tparams"), "Tindices", _op._get_attr_type("Tindices"), "Taxis", _op._get_attr_type("Taxis")) _inputs_flat = _op.inputs _execute.record_gradient( "GatherV2", _inputs_flat, _attrs, _result) _result, = _result return _result GatherV2 = tf_export("raw_ops.GatherV2")(_ops.to_raw_op(gather_v2)) def gather_v2_eager_fallback(params, indices, axis, batch_dims, name, ctx): if batch_dims is None: batch_dims = 0 batch_dims = _execute.make_int(batch_dims, "batch_dims") _attr_Tparams, (params,) = _execute.args_to_matching_eager([params], ctx) _attr_Tindices, (indices,) = _execute.args_to_matching_eager([indices], ctx) _attr_Taxis, (axis,) = _execute.args_to_matching_eager([axis], ctx) _inputs_flat = [params, indices, axis] _attrs = ("batch_dims", batch_dims, "Tparams", _attr_Tparams, "Tindices", _attr_Tindices, "Taxis", _attr_Taxis) _result = _execute.execute(b"GatherV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "GatherV2", _inputs_flat, _attrs, _result) _result, = _result return _result
[文档]@_dispatch.add_dispatch_list @tf_export('guarantee_const') def guarantee_const(input, name=None): r"""Gives a guarantee to the TF runtime that the input tensor is a constant. The runtime is then free to make optimizations based on this. Only accepts value typed tensors as inputs and rejects resource variable handles as input. Returns the input tensor without modification. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "GuaranteeConst", name, tld.op_callbacks, input) return _result except _core._FallbackException: try: return guarantee_const_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( guarantee_const, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "GuaranteeConst", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( guarantee_const, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "GuaranteeConst", _inputs_flat, _attrs, _result) _result, = _result return _result
GuaranteeConst = tf_export("raw_ops.GuaranteeConst")(_ops.to_raw_op(guarantee_const)) def guarantee_const_eager_fallback(input, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"GuaranteeConst", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "GuaranteeConst", _inputs_flat, _attrs, _result) _result, = _result return _result def identity(input, name=None): r"""Return a tensor with the same shape and contents as the input tensor or value. Args: input: A `Tensor`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Identity", name, tld.op_callbacks, input) return _result except _core._FallbackException: try: return identity_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "Identity", input=input, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "Identity", _inputs_flat, _attrs, _result) _result, = _result return _result Identity = tf_export("raw_ops.Identity")(_ops.to_raw_op(identity)) def identity_eager_fallback(input, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"Identity", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Identity", _inputs_flat, _attrs, _result) _result, = _result return _result
[文档]@_dispatch.add_dispatch_list @tf_export('identity_n') def identity_n(input, name=None): r"""Returns a list of tensors with the same shapes and contents as the input tensors. This op can be used to override the gradient for complicated functions. For example, suppose y = f(x) and we wish to apply a custom function g for backprop such that dx = g(dy). In Python, ```python with tf.get_default_graph().gradient_override_map( {'IdentityN': 'OverrideGradientWithG'}): y, _ = identity_n([f(x), x]) @tf.RegisterGradient('OverrideGradientWithG') def ApplyG(op, dy, _): return [None, g(dy)] # Do not backprop to f(x). ``` Args: input: A list of `Tensor` objects. name: A name for the operation (optional). Returns: A list of `Tensor` objects. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "IdentityN", name, tld.op_callbacks, input) return _result except _core._FallbackException: try: return identity_n_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( identity_n, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "IdentityN", input=input, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( identity_n, input=input, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op.get_attr("T")) _inputs_flat = _op.inputs _execute.record_gradient( "IdentityN", _inputs_flat, _attrs, _result) return _result
IdentityN = tf_export("raw_ops.IdentityN")(_ops.to_raw_op(identity_n)) def identity_n_eager_fallback(input, name, ctx): _attr_T, input = _execute.convert_to_mixed_eager_tensors(input, ctx) _inputs_flat = list(input) _attrs = ("T", _attr_T) _result = _execute.execute(b"IdentityN", len(input), inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "IdentityN", _inputs_flat, _attrs, _result) return _result def immutable_const(dtype, shape, memory_region_name, name=None): r"""Returns immutable tensor from memory region. The current implementation memmaps the tensor from a file. Args: dtype: A `tf.DType`. Type of the returned tensor. shape: A `tf.TensorShape` or list of `ints`. Shape of the returned tensor. memory_region_name: A `string`. Name of readonly memory region used by the tensor, see NewReadOnlyMemoryRegionFromFile in tensorflow::Env. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ImmutableConst", name, tld.op_callbacks, "dtype", dtype, "shape", shape, "memory_region_name", memory_region_name) return _result except _core._FallbackException: try: return immutable_const_eager_fallback( dtype=dtype, shape=shape, memory_region_name=memory_region_name, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") memory_region_name = _execute.make_str(memory_region_name, "memory_region_name") _, _, _op, _outputs = _op_def_library._apply_op_helper( "ImmutableConst", dtype=dtype, shape=shape, memory_region_name=memory_region_name, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape"), "memory_region_name", _op.get_attr("memory_region_name")) _inputs_flat = _op.inputs _execute.record_gradient( "ImmutableConst", _inputs_flat, _attrs, _result) _result, = _result return _result ImmutableConst = tf_export("raw_ops.ImmutableConst")(_ops.to_raw_op(immutable_const)) def immutable_const_eager_fallback(dtype, shape, memory_region_name, name, ctx): dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") memory_region_name = _execute.make_str(memory_region_name, "memory_region_name") _inputs_flat = [] _attrs = ("dtype", dtype, "shape", shape, "memory_region_name", memory_region_name) _result = _execute.execute(b"ImmutableConst", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ImmutableConst", _inputs_flat, _attrs, _result) _result, = _result return _result def inplace_add(x, i, v, name=None): r""" Adds v into specified rows of x. Computes y = x; y[i, :] += v; return y. Args: x: A `Tensor`. A `Tensor` of type T. i: A `Tensor` of type `int32`. A vector. Indices into the left-most dimension of `x`. v: A `Tensor`. Must have the same type as `x`. A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "InplaceAdd", name, tld.op_callbacks, x, i, v) return _result except _core._FallbackException: try: return inplace_add_eager_fallback( x, i, v, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "InplaceAdd", x=x, i=i, v=v, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "InplaceAdd", _inputs_flat, _attrs, _result) _result, = _result return _result InplaceAdd = tf_export("raw_ops.InplaceAdd")(_ops.to_raw_op(inplace_add)) def inplace_add_eager_fallback(x, i, v, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], ctx) (x, v) = _inputs_T i = _ops.convert_to_tensor(i, _dtypes.int32) _inputs_flat = [x, i, v] _attrs = ("T", _attr_T) _result = _execute.execute(b"InplaceAdd", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "InplaceAdd", _inputs_flat, _attrs, _result) _result, = _result return _result def inplace_sub(x, i, v, name=None): r""" Subtracts `v` into specified rows of `x`. Computes y = x; y[i, :] -= v; return y. Args: x: A `Tensor`. A `Tensor` of type T. i: A `Tensor` of type `int32`. A vector. Indices into the left-most dimension of `x`. v: A `Tensor`. Must have the same type as `x`. A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "InplaceSub", name, tld.op_callbacks, x, i, v) return _result except _core._FallbackException: try: return inplace_sub_eager_fallback( x, i, v, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "InplaceSub", x=x, i=i, v=v, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "InplaceSub", _inputs_flat, _attrs, _result) _result, = _result return _result InplaceSub = tf_export("raw_ops.InplaceSub")(_ops.to_raw_op(inplace_sub)) def inplace_sub_eager_fallback(x, i, v, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], ctx) (x, v) = _inputs_T i = _ops.convert_to_tensor(i, _dtypes.int32) _inputs_flat = [x, i, v] _attrs = ("T", _attr_T) _result = _execute.execute(b"InplaceSub", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "InplaceSub", _inputs_flat, _attrs, _result) _result, = _result return _result def inplace_update(x, i, v, name=None): r""" Updates specified rows with values in `v`. Computes `x[i, :] = v; return x`. Args: x: A `Tensor`. A tensor of type `T`. i: A `Tensor` of type `int32`. A vector. Indices into the left-most dimension of `x`. v: A `Tensor`. Must have the same type as `x`. A `Tensor` of type T. Same dimension sizes as x except the first dimension, which must be the same as i's size. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "InplaceUpdate", name, tld.op_callbacks, x, i, v) return _result except _core._FallbackException: try: return inplace_update_eager_fallback( x, i, v, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "InplaceUpdate", x=x, i=i, v=v, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "InplaceUpdate", _inputs_flat, _attrs, _result) _result, = _result return _result InplaceUpdate = tf_export("raw_ops.InplaceUpdate")(_ops.to_raw_op(inplace_update)) def inplace_update_eager_fallback(x, i, v, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([x, v], ctx) (x, v) = _inputs_T i = _ops.convert_to_tensor(i, _dtypes.int32) _inputs_flat = [x, i, v] _attrs = ("T", _attr_T) _result = _execute.execute(b"InplaceUpdate", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "InplaceUpdate", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('math.invert_permutation', v1=['math.invert_permutation', 'invert_permutation']) @deprecated_endpoints('invert_permutation') def invert_permutation(x, name=None): r"""Computes the inverse permutation of a tensor. This operation computes the inverse of an index permutation. It takes a 1-D integer tensor `x`, which represents the indices of a zero-based array, and swaps each value with its index position. In other words, for an output tensor `y` and an input tensor `x`, this operation computes the following: `y[x[i]] = i for i in [0, 1, ..., len(x) - 1]` The values must include 0. There can be no duplicate values or negative values. For example: ``` # tensor `x` is [3, 4, 0, 2, 1] invert_permutation(x) ==> [2, 4, 3, 0, 1] ``` Args: x: A `Tensor`. Must be one of the following types: `int32`, `int64`. 1-D. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "InvertPermutation", name, tld.op_callbacks, x) return _result except _core._FallbackException: try: return invert_permutation_eager_fallback( x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( invert_permutation, x=x, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "InvertPermutation", x=x, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( invert_permutation, x=x, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "InvertPermutation", _inputs_flat, _attrs, _result) _result, = _result return _result InvertPermutation = tf_export("raw_ops.InvertPermutation")(_ops.to_raw_op(invert_permutation)) def invert_permutation_eager_fallback(x, name, ctx): _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx, _dtypes.int32) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"InvertPermutation", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "InvertPermutation", _inputs_flat, _attrs, _result) _result, = _result return _result _ListDiffOutput = collections.namedtuple( "ListDiff", ["out", "idx"]) def list_diff(x, y, out_idx=_dtypes.int32, name=None): r"""Computes the difference between two lists of numbers or strings. Given a list `x` and a list `y`, this operation returns a list `out` that represents all values that are in `x` but not in `y`. The returned list `out` is sorted in the same order that the numbers appear in `x` (duplicates are preserved). This operation also returns a list `idx` that represents the position of each `out` element in `x`. In other words: `out[i] = x[idx[i]] for i in [0, 1, ..., len(out) - 1]` For example, given this input: ``` x = [1, 2, 3, 4, 5, 6] y = [1, 3, 5] ``` This operation would return: ``` out ==> [2, 4, 6] idx ==> [1, 3, 5] ``` Args: x: A `Tensor`. 1-D. Values to keep. y: A `Tensor`. Must have the same type as `x`. 1-D. Values to remove. out_idx: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A tuple of `Tensor` objects (out, idx). out: A `Tensor`. Has the same type as `x`. idx: A `Tensor` of type `out_idx`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ListDiff", name, tld.op_callbacks, x, y, "out_idx", out_idx) _result = _ListDiffOutput._make(_result) return _result except _core._FallbackException: try: return list_diff_eager_fallback( x, y, out_idx=out_idx, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _, _, _op, _outputs = _op_def_library._apply_op_helper( "ListDiff", x=x, y=y, out_idx=out_idx, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "out_idx", _op._get_attr_type("out_idx")) _inputs_flat = _op.inputs _execute.record_gradient( "ListDiff", _inputs_flat, _attrs, _result) _result = _ListDiffOutput._make(_result) return _result ListDiff = tf_export("raw_ops.ListDiff")(_ops.to_raw_op(list_diff)) def list_diff_eager_fallback(x, y, out_idx, name, ctx): if out_idx is None: out_idx = _dtypes.int32 out_idx = _execute.make_type(out_idx, "out_idx") _attr_T, _inputs_T = _execute.args_to_matching_eager([x, y], ctx) (x, y) = _inputs_T _inputs_flat = [x, y] _attrs = ("T", _attr_T, "out_idx", out_idx) _result = _execute.execute(b"ListDiff", 2, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ListDiff", _inputs_flat, _attrs, _result) _result = _ListDiffOutput._make(_result) return _result def lower_bound(sorted_inputs, values, out_type=_dtypes.int32, name=None): r"""Applies lower_bound(sorted_search_values, values) along each row. Each set of rows with the same index in (sorted_inputs, values) is treated independently. The resulting row is the equivalent of calling `np.searchsorted(sorted_inputs, values, side='left')`. The result is not a global index to the entire `Tensor`, but rather just the index in the last dimension. A 2-D example: sorted_sequence = [[0, 3, 9, 9, 10], [1, 2, 3, 4, 5]] values = [[2, 4, 9], [0, 2, 6]] result = LowerBound(sorted_sequence, values) result == [[1, 2, 2], [0, 1, 5]] Args: sorted_inputs: A `Tensor`. 2-D Tensor where each row is ordered. values: A `Tensor`. Must have the same type as `sorted_inputs`. 2-D Tensor with the same numbers of rows as `sorted_search_values`. Contains the values that will be searched for in `sorted_search_values`. out_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`. name: A name for the operation (optional). Returns: A `Tensor` of type `out_type`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "LowerBound", name, tld.op_callbacks, sorted_inputs, values, "out_type", out_type) return _result except _core._FallbackException: try: return lower_bound_eager_fallback( sorted_inputs, values, out_type=out_type, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _, _, _op, _outputs = _op_def_library._apply_op_helper( "LowerBound", sorted_inputs=sorted_inputs, values=values, out_type=out_type, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "out_type", _op._get_attr_type("out_type")) _inputs_flat = _op.inputs _execute.record_gradient( "LowerBound", _inputs_flat, _attrs, _result) _result, = _result return _result LowerBound = tf_export("raw_ops.LowerBound")(_ops.to_raw_op(lower_bound)) def lower_bound_eager_fallback(sorted_inputs, values, out_type, name, ctx): if out_type is None: out_type = _dtypes.int32 out_type = _execute.make_type(out_type, "out_type") _attr_T, _inputs_T = _execute.args_to_matching_eager([sorted_inputs, values], ctx) (sorted_inputs, values) = _inputs_T _inputs_flat = [sorted_inputs, values] _attrs = ("T", _attr_T, "out_type", out_type) _result = _execute.execute(b"LowerBound", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "LowerBound", _inputs_flat, _attrs, _result) _result, = _result return _result @_dispatch.add_dispatch_list @tf_export('linalg.band_part', v1=['linalg.band_part', 'matrix_band_part']) @deprecated_endpoints('matrix_band_part') def matrix_band_part(input, num_lower, num_upper, name=None): r"""Copy a tensor setting everything outside a central band in each innermost matrix to zero. The `band` part is computed as follows: Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a tensor with the same shape where `band[i, j, k, ..., m, n] = in_band(m, n) * input[i, j, k, ..., m, n]`. The indicator function `in_band(m, n) = (num_lower < 0 || (m-n) <= num_lower)) && (num_upper < 0 || (n-m) <= num_upper)`. For example: ``` # if 'input' is [[ 0, 1, 2, 3] [-1, 0, 1, 2] [-2, -1, 0, 1] [-3, -2, -1, 0]], tf.matrix_band_part(input, 1, -1) ==> [[ 0, 1, 2, 3] [-1, 0, 1, 2] [ 0, -1, 0, 1] [ 0, 0, -1, 0]], tf.matrix_band_part(input, 2, 1) ==> [[ 0, 1, 0, 0] [-1, 0, 1, 0] [-2, -1, 0, 1] [ 0, -2, -1, 0]] ``` Useful special cases: ``` tf.matrix_band_part(input, 0, -1) ==> Upper triangular part. tf.matrix_band_part(input, -1, 0) ==> Lower triangular part. tf.matrix_band_part(input, 0, 0) ==> Diagonal. ``` Args: input: A `Tensor`. Rank `k` tensor. num_lower: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D tensor. Number of subdiagonals to keep. If negative, keep entire lower triangle. num_upper: A `Tensor`. Must have the same type as `num_lower`. 0-D tensor. Number of superdiagonals to keep. If negative, keep entire upper triangle. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixBandPart", name, tld.op_callbacks, input, num_lower, num_upper) return _result except _core._FallbackException: try: return matrix_band_part_eager_fallback( input, num_lower, num_upper, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except (TypeError, ValueError): result = _dispatch.dispatch( matrix_band_part, input=input, num_lower=num_lower, num_upper=num_upper, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. try: _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixBandPart", input=input, num_lower=num_lower, num_upper=num_upper, name=name) except (TypeError, ValueError): result = _dispatch.dispatch( matrix_band_part, input=input, num_lower=num_lower, num_upper=num_upper, name=name) if result is not _dispatch.OpDispatcher.NOT_SUPPORTED: return result raise _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tindex", _op._get_attr_type("Tindex")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixBandPart", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixBandPart = tf_export("raw_ops.MatrixBandPart")(_ops.to_raw_op(matrix_band_part)) def matrix_band_part_eager_fallback(input, num_lower, num_upper, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _attr_Tindex, _inputs_Tindex = _execute.args_to_matching_eager([num_lower, num_upper], ctx, _dtypes.int64) (num_lower, num_upper) = _inputs_Tindex _inputs_flat = [input, num_lower, num_upper] _attrs = ("T", _attr_T, "Tindex", _attr_Tindex) _result = _execute.execute(b"MatrixBandPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixBandPart", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_diag(diagonal, name=None): r"""Returns a batched diagonal tensor with a given batched diagonal values. Given a `diagonal`, this operation returns a tensor with the `diagonal` and everything else padded with zeros. The diagonal is computed as follows: Assume `diagonal` has `k` dimensions `[I, J, K, ..., N]`, then the output is a tensor of rank `k+1` with dimensions [I, J, K, ..., N, N]` where: `output[i, j, k, ..., m, n] = 1{m=n} * diagonal[i, j, k, ..., n]`. For example: ``` # 'diagonal' is [[1, 2, 3, 4], [5, 6, 7, 8]] and diagonal.shape = (2, 4) tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]] which has shape (2, 4, 4) ``` Args: diagonal: A `Tensor`. Rank `k`, where `k >= 1`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `diagonal`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixDiag", name, tld.op_callbacks, diagonal) return _result except _core._FallbackException: try: return matrix_diag_eager_fallback( diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixDiag", diagonal=diagonal, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixDiag", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixDiag = tf_export("raw_ops.MatrixDiag")(_ops.to_raw_op(matrix_diag)) def matrix_diag_eager_fallback(diagonal, name, ctx): _attr_T, (diagonal,) = _execute.args_to_matching_eager([diagonal], ctx) _inputs_flat = [diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixDiag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixDiag", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_diag_part(input, name=None): r"""Returns the batched diagonal part of a batched tensor. This operation returns a tensor with the `diagonal` part of the batched `input`. The `diagonal` part is computed as follows: Assume `input` has `k` dimensions `[I, J, K, ..., M, N]`, then the output is a tensor of rank `k - 1` with dimensions `[I, J, K, ..., min(M, N)]` where: `diagonal[i, j, k, ..., n] = input[i, j, k, ..., n, n]`. The input must be at least a matrix. For example: ``` # 'input' is [[[1, 0, 0, 0] [0, 2, 0, 0] [0, 0, 3, 0] [0, 0, 0, 4]], [[5, 0, 0, 0] [0, 6, 0, 0] [0, 0, 7, 0] [0, 0, 0, 8]]] and input.shape = (2, 4, 4) tf.matrix_diag_part(input) ==> [[1, 2, 3, 4], [5, 6, 7, 8]] which has shape (2, 4) ``` Args: input: A `Tensor`. Rank `k` tensor where `k >= 2`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixDiagPart", name, tld.op_callbacks, input) return _result except _core._FallbackException: try: return matrix_diag_part_eager_fallback( input, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixDiagPart", input=input, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixDiagPart", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixDiagPart = tf_export("raw_ops.MatrixDiagPart")(_ops.to_raw_op(matrix_diag_part)) def matrix_diag_part_eager_fallback(input, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixDiagPart", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixDiagPart", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_diag_part_v2(input, k, padding_value, name=None): r"""Returns the batched diagonal part of a batched tensor. Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched `input`. Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. Let `max_diag_len` be the maximum length among all diagonals to be extracted, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` Let `num_diags` be the number of diagonals to extract, `num_diags = k[1] - k[0] + 1`. If `num_diags == 1`, the output tensor is of rank `r - 1` with shape `[I, J, ..., L, max_diag_len]` and values: ``` diagonal[i, j, ..., l, n] = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise. ``` where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. Otherwise, the output tensor has rank `r` with dimensions `[I, J, ..., L, num_diags, max_diag_len]` with values: ``` diagonal[i, j, ..., l, m, n] = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise. ``` where `d = k[1] - m`, `y = max(-d, 0)`, and `x = max(d, 0)`. The input must be at least a matrix. For example: ``` input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) [5, 6, 7, 8], [9, 8, 7, 6]], [[5, 4, 3, 2], [1, 2, 3, 4], [5, 6, 7, 8]]]) # A main diagonal from each batch. tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) [5, 2, 7]] # A superdiagonal from each batch. tf.matrix_diag_part(input, k = 1) ==> [[2, 7, 6], # Output shape: (2, 3) [4, 3, 8]] # A tridiagonal band from each batch. tf.matrix_diag_part(input, k = (-1, 1)) ==> [[[2, 7, 6], # Output shape: (2, 3, 3) [1, 6, 7], [5, 8, 0]], [[4, 3, 8], [5, 2, 7], [1, 6, 0]]] # Padding value = 9 tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) ==> [[[4, 9, 9], # Output shape: (2, 3, 3) [3, 8, 9], [2, 7, 6]], [[2, 9, 9], [3, 4, 9], [4, 3, 8]]] ``` Args: input: A `Tensor`. Rank `r` tensor where `r >= 2`. k: A `Tensor` of type `int32`. Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. padding_value: A `Tensor`. Must have the same type as `input`. The value to fill the area outside the specified diagonal band with. Default is 0. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixDiagPartV2", name, tld.op_callbacks, input, k, padding_value) return _result except _core._FallbackException: try: return matrix_diag_part_v2_eager_fallback( input, k, padding_value, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixDiagPartV2", input=input, k=k, padding_value=padding_value, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixDiagPartV2", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixDiagPartV2 = tf_export("raw_ops.MatrixDiagPartV2")(_ops.to_raw_op(matrix_diag_part_v2)) def matrix_diag_part_v2_eager_fallback(input, k, padding_value, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([input, padding_value], ctx) (input, padding_value) = _inputs_T k = _ops.convert_to_tensor(k, _dtypes.int32) _inputs_flat = [input, k, padding_value] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixDiagPartV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixDiagPartV2", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_diag_part_v3(input, k, padding_value, align="RIGHT_LEFT", name=None): r"""Returns the batched diagonal part of a batched tensor. Returns a tensor with the `k[0]`-th to `k[1]`-th diagonals of the batched `input`. Assume `input` has `r` dimensions `[I, J, ..., L, M, N]`. Let `max_diag_len` be the maximum length among all diagonals to be extracted, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` Let `num_diags` be the number of diagonals to extract, `num_diags = k[1] - k[0] + 1`. If `num_diags == 1`, the output tensor is of rank `r - 1` with shape `[I, J, ..., L, max_diag_len]` and values: ``` diagonal[i, j, ..., l, n] = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise. ``` where `y = max(-k[1], 0)`, `x = max(k[1], 0)`. Otherwise, the output tensor has rank `r` with dimensions `[I, J, ..., L, num_diags, max_diag_len]` with values: ``` diagonal[i, j, ..., l, m, n] = input[i, j, ..., l, n+y, n+x] ; if 0 <= n+y < M and 0 <= n+x < N, padding_value ; otherwise. ``` where `d = k[1] - m`, `y = max(-d, 0) - offset`, and `x = max(d, 0) - offset`. `offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise ``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. The input must be at least a matrix. For example: ``` input = np.array([[[1, 2, 3, 4], # Input shape: (2, 3, 4) [5, 6, 7, 8], [9, 8, 7, 6]], [[5, 4, 3, 2], [1, 2, 3, 4], [5, 6, 7, 8]]]) # A main diagonal from each batch. tf.matrix_diag_part(input) ==> [[1, 6, 7], # Output shape: (2, 3) [5, 2, 7]] # A superdiagonal from each batch. tf.matrix_diag_part(input, k = 1) ==> [[2, 7, 6], # Output shape: (2, 3) [4, 3, 8]] # A band from each batch. tf.matrix_diag_part(input, k = (-1, 2)) ==> [[[0, 3, 8], # Output shape: (2, 4, 3) [2, 7, 6], [1, 6, 7], [5, 8, 0]], [[0, 3, 4], [4, 3, 8], [5, 2, 7], [1, 6, 0]]] # LEFT_RIGHT alignment. tf.matrix_diag_part(input, k = (-1, 2), align="LEFT_RIGHT") ==> [[[3, 8, 0], # Output shape: (2, 4, 3) [2, 7, 6], [1, 6, 7], [0, 5, 8]], [[3, 4, 0], [4, 3, 8], [5, 2, 7], [0, 1, 6]]] # max_diag_len can be shorter than the main diagonal. tf.matrix_diag_part(input, k = (-2, -1)) ==> [[[5, 8], [9, 0]], [[1, 6], [5, 0]]] # padding_value = 9 tf.matrix_diag_part(input, k = (1, 3), padding_value = 9) ==> [[[9, 9, 4], # Output shape: (2, 3, 3) [9, 3, 8], [2, 7, 6]], [[9, 9, 2], [9, 3, 4], [4, 3, 8]]] ``` Args: input: A `Tensor`. Rank `r` tensor where `r >= 2`. k: A `Tensor` of type `int32`. Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. padding_value: A `Tensor`. Must have the same type as `input`. The value to fill the area outside the specified diagonal band with. Default is 0. align: An optional `string` from: `"LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"`. Defaults to `"RIGHT_LEFT"`. Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixDiagPartV3", name, tld.op_callbacks, input, k, padding_value, "align", align) return _result except _core._FallbackException: try: return matrix_diag_part_v3_eager_fallback( input, k, padding_value, align=align, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if align is None: align = "RIGHT_LEFT" align = _execute.make_str(align, "align") _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixDiagPartV3", input=input, k=k, padding_value=padding_value, align=align, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "align", _op.get_attr("align")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixDiagPartV3", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixDiagPartV3 = tf_export("raw_ops.MatrixDiagPartV3")(_ops.to_raw_op(matrix_diag_part_v3)) def matrix_diag_part_v3_eager_fallback(input, k, padding_value, align, name, ctx): if align is None: align = "RIGHT_LEFT" align = _execute.make_str(align, "align") _attr_T, _inputs_T = _execute.args_to_matching_eager([input, padding_value], ctx) (input, padding_value) = _inputs_T k = _ops.convert_to_tensor(k, _dtypes.int32) _inputs_flat = [input, k, padding_value] _attrs = ("T", _attr_T, "align", align) _result = _execute.execute(b"MatrixDiagPartV3", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixDiagPartV3", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_diag_v2(diagonal, k, num_rows, num_cols, padding_value, name=None): r"""Returns a batched diagonal tensor with given batched diagonal values. Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th diagonals of a matrix, with everything else padded with `padding`. `num_rows` and `num_cols` specify the dimension of the innermost matrix of the output. If both are not specified, the op assumes the innermost matrix is square and infers its size from `k` and the innermost dimension of `diagonal`. If only one of them is specified, the op assumes the unspecified value is the smallest possible based on other criteria. Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`. The second innermost dimension of `diagonal` has double meaning. When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and the output tensor is: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper padding_value ; otherwise ``` Otherwise, `M` is treated as the number of diagonals for the matrix in the same batch (`M = k[1]-k[0]+1`), and the output tensor is: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] padding_value ; otherwise ``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. For example: ``` # The main diagonal. diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) [5, 6, 7, 8]]) tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]], [[5, 0, 0, 0], [0, 6, 0, 0], [0, 0, 7, 0], [0, 0, 0, 8]]] # A superdiagonal (per batch). diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) [4, 5, 6]]) tf.matrix_diag(diagonal, k = 1) ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 0]], [[0, 4, 0, 0], [0, 0, 5, 0], [0, 0, 0, 6], [0, 0, 0, 0]]] # A band of diagonals. diagonals = np.array([[[1, 2, 3], # Input shape: (2, 2, 3) [4, 5, 0]], [[6, 7, 9], [9, 1, 0]]]) tf.matrix_diag(diagonals, k = (-1, 0)) ==> [[[1, 0, 0], # Output shape: (2, 3, 3) [4, 2, 0], [0, 5, 3]], [[6, 0, 0], [9, 7, 0], [0, 1, 9]]] # Rectangular matrix. diagonal = np.array([1, 2]) # Input shape: (2) tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) ==> [[0, 0, 0, 0], # Output shape: (3, 4) [1, 0, 0, 0], [0, 2, 0, 0]] # Rectangular matrix with inferred num_cols and padding_value = 9. tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) ==> [[9, 9], # Output shape: (3, 2) [1, 9], [9, 2]] ``` Args: diagonal: A `Tensor`. Rank `r`, where `r >= 1` k: A `Tensor` of type `int32`. Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. num_rows: A `Tensor` of type `int32`. The number of rows of the output matrix. If it is not provided, the op assumes the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`. num_cols: A `Tensor` of type `int32`. The number of columns of the output matrix. If it is not provided, the op assumes the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`. padding_value: A `Tensor`. Must have the same type as `diagonal`. The number to fill the area outside the specified diagonal band with. Default is 0. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `diagonal`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixDiagV2", name, tld.op_callbacks, diagonal, k, num_rows, num_cols, padding_value) return _result except _core._FallbackException: try: return matrix_diag_v2_eager_fallback( diagonal, k, num_rows, num_cols, padding_value, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixDiagV2", diagonal=diagonal, k=k, num_rows=num_rows, num_cols=num_cols, padding_value=padding_value, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixDiagV2", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixDiagV2 = tf_export("raw_ops.MatrixDiagV2")(_ops.to_raw_op(matrix_diag_v2)) def matrix_diag_v2_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([diagonal, padding_value], ctx) (diagonal, padding_value) = _inputs_T k = _ops.convert_to_tensor(k, _dtypes.int32) num_rows = _ops.convert_to_tensor(num_rows, _dtypes.int32) num_cols = _ops.convert_to_tensor(num_cols, _dtypes.int32) _inputs_flat = [diagonal, k, num_rows, num_cols, padding_value] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixDiagV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixDiagV2", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_diag_v3(diagonal, k, num_rows, num_cols, padding_value, align="RIGHT_LEFT", name=None): r"""Returns a batched diagonal tensor with given batched diagonal values. Returns a tensor with the contents in `diagonal` as `k[0]`-th to `k[1]`-th diagonals of a matrix, with everything else padded with `padding`. `num_rows` and `num_cols` specify the dimension of the innermost matrix of the output. If both are not specified, the op assumes the innermost matrix is square and infers its size from `k` and the innermost dimension of `diagonal`. If only one of them is specified, the op assumes the unspecified value is the smallest possible based on other criteria. Let `diagonal` have `r` dimensions `[I, J, ..., L, M, N]`. The output tensor has rank `r+1` with shape `[I, J, ..., L, M, num_rows, num_cols]` when only one diagonal is given (`k` is an integer or `k[0] == k[1]`). Otherwise, it has rank `r` with shape `[I, J, ..., L, num_rows, num_cols]`. The second innermost dimension of `diagonal` has double meaning. When `k` is scalar or `k[0] == k[1]`, `M` is part of the batch size [I, J, ..., M], and the output tensor is: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, n-max(d_upper, 0)] ; if n - m == d_upper padding_value ; otherwise ``` Otherwise, `M` is treated as the number of diagonals for the matrix in the same batch (`M = k[1]-k[0]+1`), and the output tensor is: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] padding_value ; otherwise ``` where `d = n - m`, `diag_index = [k] - d`, and `index_in_diag = n - max(d, 0) + offset`. `offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise ``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. For example: ``` # The main diagonal. diagonal = np.array([[1, 2, 3, 4], # Input shape: (2, 4) [5, 6, 7, 8]]) tf.matrix_diag(diagonal) ==> [[[1, 0, 0, 0], # Output shape: (2, 4, 4) [0, 2, 0, 0], [0, 0, 3, 0], [0, 0, 0, 4]], [[5, 0, 0, 0], [0, 6, 0, 0], [0, 0, 7, 0], [0, 0, 0, 8]]] # A superdiagonal (per batch). diagonal = np.array([[1, 2, 3], # Input shape: (2, 3) [4, 5, 6]]) tf.matrix_diag(diagonal, k = 1) ==> [[[0, 1, 0, 0], # Output shape: (2, 4, 4) [0, 0, 2, 0], [0, 0, 0, 3], [0, 0, 0, 0]], [[0, 4, 0, 0], [0, 0, 5, 0], [0, 0, 0, 6], [0, 0, 0, 0]]] # A tridiagonal band (per batch). diagonals = np.array([[[0, 8, 9], # Input shape: (2, 2, 3) [1, 2, 3], [4, 5, 0]], [[0, 2, 3], [6, 7, 9], [9, 1, 0]]]) tf.matrix_diag(diagonals, k = (-1, 1)) ==> [[[1, 8, 0], # Output shape: (2, 3, 3) [4, 2, 9], [0, 5, 3]], [[6, 2, 0], [9, 7, 3], [0, 1, 9]]] # LEFT_RIGHT alignment. diagonals = np.array([[[8, 9, 0], # Input shape: (2, 2, 3) [1, 2, 3], [0, 4, 5]], [[2, 3, 0], [6, 7, 9], [0, 9, 1]]]) tf.matrix_diag(diagonals, k = (-1, 1), align="LEFT_RIGHT") ==> [[[1, 8, 0], # Output shape: (2, 3, 3) [4, 2, 9], [0, 5, 3]], [[6, 2, 0], [9, 7, 3], [0, 1, 9]]] # Rectangular matrix. diagonal = np.array([1, 2]) # Input shape: (2) tf.matrix_diag(diagonal, k = -1, num_rows = 3, num_cols = 4) ==> [[0, 0, 0, 0], # Output shape: (3, 4) [1, 0, 0, 0], [0, 2, 0, 0]] # Rectangular matrix with inferred num_cols and padding_value = 9. tf.matrix_diag(diagonal, k = -1, num_rows = 3, padding_value = 9) ==> [[9, 9], # Output shape: (3, 2) [1, 9], [9, 2]] ``` Args: diagonal: A `Tensor`. Rank `r`, where `r >= 1` k: A `Tensor` of type `int32`. Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. num_rows: A `Tensor` of type `int32`. The number of rows of the output matrix. If it is not provided, the op assumes the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`. num_cols: A `Tensor` of type `int32`. The number of columns of the output matrix. If it is not provided, the op assumes the output matrix is a square matrix and infers the matrix size from k and the innermost dimension of `diagonal`. padding_value: A `Tensor`. Must have the same type as `diagonal`. The number to fill the area outside the specified diagonal band with. Default is 0. align: An optional `string` from: `"LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"`. Defaults to `"RIGHT_LEFT"`. Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `diagonal`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixDiagV3", name, tld.op_callbacks, diagonal, k, num_rows, num_cols, padding_value, "align", align) return _result except _core._FallbackException: try: return matrix_diag_v3_eager_fallback( diagonal, k, num_rows, num_cols, padding_value, align=align, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if align is None: align = "RIGHT_LEFT" align = _execute.make_str(align, "align") _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixDiagV3", diagonal=diagonal, k=k, num_rows=num_rows, num_cols=num_cols, padding_value=padding_value, align=align, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "align", _op.get_attr("align")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixDiagV3", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixDiagV3 = tf_export("raw_ops.MatrixDiagV3")(_ops.to_raw_op(matrix_diag_v3)) def matrix_diag_v3_eager_fallback(diagonal, k, num_rows, num_cols, padding_value, align, name, ctx): if align is None: align = "RIGHT_LEFT" align = _execute.make_str(align, "align") _attr_T, _inputs_T = _execute.args_to_matching_eager([diagonal, padding_value], ctx) (diagonal, padding_value) = _inputs_T k = _ops.convert_to_tensor(k, _dtypes.int32) num_rows = _ops.convert_to_tensor(num_rows, _dtypes.int32) num_cols = _ops.convert_to_tensor(num_cols, _dtypes.int32) _inputs_flat = [diagonal, k, num_rows, num_cols, padding_value] _attrs = ("T", _attr_T, "align", align) _result = _execute.execute(b"MatrixDiagV3", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixDiagV3", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_set_diag(input, diagonal, name=None): r"""Returns a batched matrix tensor with new batched diagonal values. Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the main diagonal of the innermost matrices. These will be overwritten by the values in `diagonal`. The output is computed as follows: Assume `input` has `k+1` dimensions `[I, J, K, ..., M, N]` and `diagonal` has `k` dimensions `[I, J, K, ..., min(M, N)]`. Then the output is a tensor of rank `k+1` with dimensions `[I, J, K, ..., M, N]` where: * `output[i, j, k, ..., m, n] = diagonal[i, j, k, ..., n]` for `m == n`. * `output[i, j, k, ..., m, n] = input[i, j, k, ..., m, n]` for `m != n`. Args: input: A `Tensor`. Rank `k+1`, where `k >= 1`. diagonal: A `Tensor`. Must have the same type as `input`. Rank `k`, where `k >= 1`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixSetDiag", name, tld.op_callbacks, input, diagonal) return _result except _core._FallbackException: try: return matrix_set_diag_eager_fallback( input, diagonal, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixSetDiag", input=input, diagonal=diagonal, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixSetDiag", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixSetDiag = tf_export("raw_ops.MatrixSetDiag")(_ops.to_raw_op(matrix_set_diag)) def matrix_set_diag_eager_fallback(input, diagonal, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], ctx) (input, diagonal) = _inputs_T _inputs_flat = [input, diagonal] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixSetDiag", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixSetDiag", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_set_diag_v2(input, diagonal, k, name=None): r"""Returns a batched matrix tensor with new batched diagonal values. Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the specified diagonals of the innermost matrices. These will be overwritten by the values in `diagonal`. `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. If `k` is scalar or `k[0] == k[1]`: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] input[i, j, ..., l, m, n] ; otherwise ``` Otherwise, ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] input[i, j, ..., l, m, n] ; otherwise ``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0)`. For example: ``` # The main diagonal. input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) [7, 7, 7, 7], [7, 7, 7, 7]], [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]]]) diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) [4, 5, 6]]) tf.matrix_set_diag(diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) [7, 2, 7, 7], [7, 7, 3, 7]], [[4, 7, 7, 7], [7, 5, 7, 7], [7, 7, 6, 7]]] # A superdiagonal (per batch). tf.matrix_set_diag(diagonal, k = 1) ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) [7, 7, 2, 7], [7, 7, 7, 3]], [[7, 4, 7, 7], [7, 7, 5, 7], [7, 7, 7, 6]]] # A band of diagonals. diagonals = np.array([[[1, 2, 3], # Diagonal shape: (2, 2, 3) [4, 5, 0]], [[6, 1, 2], [3, 4, 0]]]) tf.matrix_set_diag(diagonals, k = (-1, 0)) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) [4, 2, 7, 7], [0, 5, 3, 7]], [[6, 7, 7, 7], [3, 1, 7, 7], [7, 4, 2, 7]]] ``` Args: input: A `Tensor`. Rank `r+1`, where `r >= 1`. diagonal: A `Tensor`. Must have the same type as `input`. Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. `k >= 1`. k: A `Tensor` of type `int32`. Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixSetDiagV2", name, tld.op_callbacks, input, diagonal, k) return _result except _core._FallbackException: try: return matrix_set_diag_v2_eager_fallback( input, diagonal, k, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixSetDiagV2", input=input, diagonal=diagonal, k=k, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixSetDiagV2", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixSetDiagV2 = tf_export("raw_ops.MatrixSetDiagV2")(_ops.to_raw_op(matrix_set_diag_v2)) def matrix_set_diag_v2_eager_fallback(input, diagonal, k, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], ctx) (input, diagonal) = _inputs_T k = _ops.convert_to_tensor(k, _dtypes.int32) _inputs_flat = [input, diagonal, k] _attrs = ("T", _attr_T) _result = _execute.execute(b"MatrixSetDiagV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixSetDiagV2", _inputs_flat, _attrs, _result) _result, = _result return _result def matrix_set_diag_v3(input, diagonal, k, align="RIGHT_LEFT", name=None): r"""Returns a batched matrix tensor with new batched diagonal values. Given `input` and `diagonal`, this operation returns a tensor with the same shape and values as `input`, except for the specified diagonals of the innermost matrices. These will be overwritten by the values in `diagonal`. `input` has `r+1` dimensions `[I, J, ..., L, M, N]`. When `k` is scalar or `k[0] == k[1]`, `diagonal` has `r` dimensions `[I, J, ..., L, max_diag_len]`. Otherwise, it has `r+1` dimensions `[I, J, ..., L, num_diags, max_diag_len]`. `num_diags` is the number of diagonals, `num_diags = k[1] - k[0] + 1`. `max_diag_len` is the longest diagonal in the range `[k[0], k[1]]`, `max_diag_len = min(M + min(k[1], 0), N + min(-k[0], 0))` The output is a tensor of rank `k+1` with dimensions `[I, J, ..., L, M, N]`. If `k` is scalar or `k[0] == k[1]`: ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, n-max(k[1], 0)] ; if n - m == k[1] input[i, j, ..., l, m, n] ; otherwise ``` Otherwise, ``` output[i, j, ..., l, m, n] = diagonal[i, j, ..., l, diag_index, index_in_diag] ; if k[0] <= d <= k[1] input[i, j, ..., l, m, n] ; otherwise ``` where `d = n - m`, `diag_index = k[1] - d`, and `index_in_diag = n - max(d, 0) + offset`. `offset` is zero except when the alignment of the diagonal is to the right. ``` offset = max_diag_len - diag_len(d) ; if (`align` in {RIGHT_LEFT, RIGHT_RIGHT} and `d >= 0`) or (`align` in {LEFT_RIGHT, RIGHT_RIGHT} and `d <= 0`) 0 ; otherwise ``` where `diag_len(d) = min(cols - max(d, 0), rows + min(d, 0))`. For example: ``` # The main diagonal. input = np.array([[[7, 7, 7, 7], # Input shape: (2, 3, 4) [7, 7, 7, 7], [7, 7, 7, 7]], [[7, 7, 7, 7], [7, 7, 7, 7], [7, 7, 7, 7]]]) diagonal = np.array([[1, 2, 3], # Diagonal shape: (2, 3) [4, 5, 6]]) tf.matrix_set_diag(input, diagonal) ==> [[[1, 7, 7, 7], # Output shape: (2, 3, 4) [7, 2, 7, 7], [7, 7, 3, 7]], [[4, 7, 7, 7], [7, 5, 7, 7], [7, 7, 6, 7]]] # A superdiagonal (per batch). tf.matrix_set_diag(input, diagonal, k = 1) ==> [[[7, 1, 7, 7], # Output shape: (2, 3, 4) [7, 7, 2, 7], [7, 7, 7, 3]], [[7, 4, 7, 7], [7, 7, 5, 7], [7, 7, 7, 6]]] # A band of diagonals. diagonals = np.array([[[0, 9, 1], # Diagonal shape: (2, 4, 3) [6, 5, 8], [1, 2, 3], [4, 5, 0]], [[0, 1, 2], [5, 6, 4], [6, 1, 2], [3, 4, 0]]]) tf.matrix_set_diag(input, diagonals, k = (-1, 2)) ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) [4, 2, 5, 1], [7, 5, 3, 8]], [[6, 5, 1, 7], [3, 1, 6, 2], [7, 4, 2, 4]]] # LEFT_RIGHT alignment. diagonals = np.array([[[9, 1, 0], # Diagonal shape: (2, 4, 3) [6, 5, 8], [1, 2, 3], [0, 4, 5]], [[1, 2, 0], [5, 6, 4], [6, 1, 2], [0, 3, 4]]]) tf.matrix_set_diag(input, diagonals, k = (-1, 2), align="LEFT_RIGHT") ==> [[[1, 6, 9, 7], # Output shape: (2, 3, 4) [4, 2, 5, 1], [7, 5, 3, 8]], [[6, 5, 1, 7], [3, 1, 6, 2], [7, 4, 2, 4]]] ``` Args: input: A `Tensor`. Rank `r+1`, where `r >= 1`. diagonal: A `Tensor`. Must have the same type as `input`. Rank `r` when `k` is an integer or `k[0] == k[1]`. Otherwise, it has rank `r+1`. `k >= 1`. k: A `Tensor` of type `int32`. Diagonal offset(s). Positive value means superdiagonal, 0 refers to the main diagonal, and negative value means subdiagonals. `k` can be a single integer (for a single diagonal) or a pair of integers specifying the low and high ends of a matrix band. `k[0]` must not be larger than `k[1]`. align: An optional `string` from: `"LEFT_RIGHT", "RIGHT_LEFT", "LEFT_LEFT", "RIGHT_RIGHT"`. Defaults to `"RIGHT_LEFT"`. Some diagonals are shorter than `max_diag_len` and need to be padded. `align` is a string specifying how superdiagonals and subdiagonals should be aligned, respectively. There are four possible alignments: "RIGHT_LEFT" (default), "LEFT_RIGHT", "LEFT_LEFT", and "RIGHT_RIGHT". "RIGHT_LEFT" aligns superdiagonals to the right (left-pads the row) and subdiagonals to the left (right-pads the row). It is the packing format LAPACK uses. cuSPARSE uses "LEFT_RIGHT", which is the opposite alignment. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MatrixSetDiagV3", name, tld.op_callbacks, input, diagonal, k, "align", align) return _result except _core._FallbackException: try: return matrix_set_diag_v3_eager_fallback( input, diagonal, k, align=align, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if align is None: align = "RIGHT_LEFT" align = _execute.make_str(align, "align") _, _, _op, _outputs = _op_def_library._apply_op_helper( "MatrixSetDiagV3", input=input, diagonal=diagonal, k=k, align=align, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "align", _op.get_attr("align")) _inputs_flat = _op.inputs _execute.record_gradient( "MatrixSetDiagV3", _inputs_flat, _attrs, _result) _result, = _result return _result MatrixSetDiagV3 = tf_export("raw_ops.MatrixSetDiagV3")(_ops.to_raw_op(matrix_set_diag_v3)) def matrix_set_diag_v3_eager_fallback(input, diagonal, k, align, name, ctx): if align is None: align = "RIGHT_LEFT" align = _execute.make_str(align, "align") _attr_T, _inputs_T = _execute.args_to_matching_eager([input, diagonal], ctx) (input, diagonal) = _inputs_T k = _ops.convert_to_tensor(k, _dtypes.int32) _inputs_flat = [input, diagonal, k] _attrs = ("T", _attr_T, "align", align) _result = _execute.execute(b"MatrixSetDiagV3", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MatrixSetDiagV3", _inputs_flat, _attrs, _result) _result, = _result return _result def mirror_pad(input, paddings, mode, name=None): r"""Pads a tensor with mirrored values. This operation pads a `input` with mirrored values according to the `paddings` you specify. `paddings` is an integer tensor with shape `[n, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many values to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many values to add after the contents of `input` in that dimension. Both `paddings[D, 0]` and `paddings[D, 1]` must be no greater than `input.dim_size(D)` (or `input.dim_size(D) - 1`) if `copy_border` is true (if false, respectively). The padded size of each dimension D of the output is: `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` For example: ``` # 't' is [[1, 2, 3], [4, 5, 6]]. # 'paddings' is [[1, 1]], [2, 2]]. # 'mode' is SYMMETRIC. # rank of 't' is 2. pad(t, paddings) ==> [[2, 1, 1, 2, 3, 3, 2] [2, 1, 1, 2, 3, 3, 2] [5, 4, 4, 5, 6, 6, 5] [5, 4, 4, 5, 6, 6, 5]] ``` Args: input: A `Tensor`. The input tensor to be padded. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. A two-column matrix specifying the padding sizes. The number of rows must be the same as the rank of `input`. mode: A `string` from: `"REFLECT", "SYMMETRIC"`. Either `REFLECT` or `SYMMETRIC`. In reflect mode the padded regions do not include the borders, while in symmetric mode the padded regions do include the borders. For example, if `input` is `[1, 2, 3]` and `paddings` is `[0, 2]`, then the output is `[1, 2, 3, 2, 1]` in reflect mode, and it is `[1, 2, 3, 3, 2]` in symmetric mode. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MirrorPad", name, tld.op_callbacks, input, paddings, "mode", mode) return _result except _core._FallbackException: try: return mirror_pad_eager_fallback( input, paddings, mode=mode, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. mode = _execute.make_str(mode, "mode") _, _, _op, _outputs = _op_def_library._apply_op_helper( "MirrorPad", input=input, paddings=paddings, mode=mode, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode")) _inputs_flat = _op.inputs _execute.record_gradient( "MirrorPad", _inputs_flat, _attrs, _result) _result, = _result return _result MirrorPad = tf_export("raw_ops.MirrorPad")(_ops.to_raw_op(mirror_pad)) def mirror_pad_eager_fallback(input, paddings, mode, name, ctx): mode = _execute.make_str(mode, "mode") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], ctx, _dtypes.int32) _inputs_flat = [input, paddings] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "mode", mode) _result = _execute.execute(b"MirrorPad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MirrorPad", _inputs_flat, _attrs, _result) _result, = _result return _result def mirror_pad_grad(input, paddings, mode, name=None): r"""Gradient op for `MirrorPad` op. This op folds a mirror-padded tensor. This operation folds the padded areas of `input` by `MirrorPad` according to the `paddings` you specify. `paddings` must be the same as `paddings` argument given to the corresponding `MirrorPad` op. The folded size of each dimension D of the output is: `input.dim_size(D) - paddings(D, 0) - paddings(D, 1)` For example: ``` # 't' is [[1, 2, 3], [4, 5, 6], [7, 8, 9]]. # 'paddings' is [[0, 1]], [0, 1]]. # 'mode' is SYMMETRIC. # rank of 't' is 2. pad(t, paddings) ==> [[ 1, 5] [11, 28]] ``` Args: input: A `Tensor`. The input tensor to be folded. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. A two-column matrix specifying the padding sizes. The number of rows must be the same as the rank of `input`. mode: A `string` from: `"REFLECT", "SYMMETRIC"`. The mode used in the `MirrorPad` op. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "MirrorPadGrad", name, tld.op_callbacks, input, paddings, "mode", mode) return _result except _core._FallbackException: try: return mirror_pad_grad_eager_fallback( input, paddings, mode=mode, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. mode = _execute.make_str(mode, "mode") _, _, _op, _outputs = _op_def_library._apply_op_helper( "MirrorPadGrad", input=input, paddings=paddings, mode=mode, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings"), "mode", _op.get_attr("mode")) _inputs_flat = _op.inputs _execute.record_gradient( "MirrorPadGrad", _inputs_flat, _attrs, _result) _result, = _result return _result MirrorPadGrad = tf_export("raw_ops.MirrorPadGrad")(_ops.to_raw_op(mirror_pad_grad)) def mirror_pad_grad_eager_fallback(input, paddings, mode, name, ctx): mode = _execute.make_str(mode, "mode") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], ctx, _dtypes.int32) _inputs_flat = [input, paddings] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings, "mode", mode) _result = _execute.execute(b"MirrorPadGrad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "MirrorPadGrad", _inputs_flat, _attrs, _result) _result, = _result return _result def one_hot(indices, depth, on_value, off_value, axis=-1, name=None): r"""Returns a one-hot tensor. The locations represented by indices in `indices` take value `on_value`, while all other locations take value `off_value`. If the input `indices` is rank `N`, the output will have rank `N+1`, The new axis is created at dimension `axis` (default: the new axis is appended at the end). If `indices` is a scalar the output shape will be a vector of length `depth`. If `indices` is a vector of length `features`, the output shape will be: ``` features x depth if axis == -1 depth x features if axis == 0 ``` If `indices` is a matrix (batch) with shape `[batch, features]`, the output shape will be: ``` batch x features x depth if axis == -1 batch x depth x features if axis == 1 depth x batch x features if axis == 0 ``` Examples ========= Suppose that ``` indices = [0, 2, -1, 1] depth = 3 on_value = 5.0 off_value = 0.0 axis = -1 ``` Then output is `[4 x 3]`: ``` output = [5.0 0.0 0.0] // one_hot(0) [0.0 0.0 5.0] // one_hot(2) [0.0 0.0 0.0] // one_hot(-1) [0.0 5.0 0.0] // one_hot(1) ``` Suppose that ``` indices = [0, 2, -1, 1] depth = 3 on_value = 0.0 off_value = 3.0 axis = 0 ``` Then output is `[3 x 4]`: ``` output = [0.0 3.0 3.0 3.0] [3.0 3.0 3.0 0.0] [3.0 3.0 3.0 3.0] [3.0 0.0 3.0 3.0] // ^ one_hot(0) // ^ one_hot(2) // ^ one_hot(-1) // ^ one_hot(1) ``` Suppose that ``` indices = [[0, 2], [1, -1]] depth = 3 on_value = 1.0 off_value = 0.0 axis = -1 ``` Then output is `[2 x 2 x 3]`: ``` output = [ [1.0, 0.0, 0.0] // one_hot(0) [0.0, 0.0, 1.0] // one_hot(2) ][ [0.0, 1.0, 0.0] // one_hot(1) [0.0, 0.0, 0.0] // one_hot(-1) ] ``` Args: indices: A `Tensor`. Must be one of the following types: `uint8`, `int32`, `int64`. A tensor of indices. depth: A `Tensor` of type `int32`. A scalar defining the depth of the one hot dimension. on_value: A `Tensor`. A scalar defining the value to fill in output when `indices[j] = i`. off_value: A `Tensor`. Must have the same type as `on_value`. A scalar defining the value to fill in output when `indices[j] != i`. axis: An optional `int`. Defaults to `-1`. The axis to fill (default: -1, a new inner-most axis). name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `on_value`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "OneHot", name, tld.op_callbacks, indices, depth, on_value, off_value, "axis", axis) return _result except _core._FallbackException: try: return one_hot_eager_fallback( indices, depth, on_value, off_value, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _, _, _op, _outputs = _op_def_library._apply_op_helper( "OneHot", indices=indices, depth=depth, on_value=on_value, off_value=off_value, axis=axis, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("axis", _op._get_attr_int("axis"), "T", _op._get_attr_type("T"), "TI", _op._get_attr_type("TI")) _inputs_flat = _op.inputs _execute.record_gradient( "OneHot", _inputs_flat, _attrs, _result) _result, = _result return _result OneHot = tf_export("raw_ops.OneHot")(_ops.to_raw_op(one_hot)) def one_hot_eager_fallback(indices, depth, on_value, off_value, axis, name, ctx): if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _attr_T, _inputs_T = _execute.args_to_matching_eager([on_value, off_value], ctx) (on_value, off_value) = _inputs_T _attr_TI, (indices,) = _execute.args_to_matching_eager([indices], ctx, _dtypes.int64) depth = _ops.convert_to_tensor(depth, _dtypes.int32) _inputs_flat = [indices, depth, on_value, off_value] _attrs = ("axis", axis, "T", _attr_T, "TI", _attr_TI) _result = _execute.execute(b"OneHot", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "OneHot", _inputs_flat, _attrs, _result) _result, = _result return _result def ones_like(x, name=None): r"""Returns a tensor of ones with the same shape and type as x. Args: x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`, `int8`, `uint8`, `int16`, `uint16`, `int32`, `int64`, `complex64`, `complex128`, `bool`. a tensor of type T. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `x`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "OnesLike", name, tld.op_callbacks, x) return _result except _core._FallbackException: try: return ones_like_eager_fallback( x, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "OnesLike", x=x, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "OnesLike", _inputs_flat, _attrs, _result) _result, = _result return _result OnesLike = tf_export("raw_ops.OnesLike")(_ops.to_raw_op(ones_like)) def ones_like_eager_fallback(x, name, ctx): _attr_T, (x,) = _execute.args_to_matching_eager([x], ctx) _inputs_flat = [x] _attrs = ("T", _attr_T) _result = _execute.execute(b"OnesLike", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "OnesLike", _inputs_flat, _attrs, _result) _result, = _result return _result def pack(values, axis=0, name=None): r"""Packs a list of `N` rank-`R` tensors into one rank-`(R+1)` tensor. Packs the `N` tensors in `values` into a tensor with rank one higher than each tensor in `values`, by packing them along the `axis` dimension. Given a list of tensors of shape `(A, B, C)`; if `axis == 0` then the `output` tensor will have the shape `(N, A, B, C)`. if `axis == 1` then the `output` tensor will have the shape `(A, N, B, C)`. Etc. For example: ``` # 'x' is [1, 4] # 'y' is [2, 5] # 'z' is [3, 6] pack([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. pack([x, y, z], axis=1) => [[1, 2, 3], [4, 5, 6]] ``` This is the opposite of `unpack`. Args: values: A list of at least 1 `Tensor` objects with the same type. Must be of same shape and type. axis: An optional `int`. Defaults to `0`. Dimension along which to pack. Negative values wrap around, so the valid range is `[-(R+1), R+1)`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Pack", name, tld.op_callbacks, values, "axis", axis) return _result except _core._FallbackException: try: return pack_eager_fallback( values, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'pack' Op, not %r." % values) _attr_N = len(values) if axis is None: axis = 0 axis = _execute.make_int(axis, "axis") _, _, _op, _outputs = _op_def_library._apply_op_helper( "Pack", values=values, axis=axis, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "axis", _op._get_attr_int("axis")) _inputs_flat = _op.inputs _execute.record_gradient( "Pack", _inputs_flat, _attrs, _result) _result, = _result return _result Pack = tf_export("raw_ops.Pack")(_ops.to_raw_op(pack)) def pack_eager_fallback(values, axis, name, ctx): if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'pack' Op, not %r." % values) _attr_N = len(values) if axis is None: axis = 0 axis = _execute.make_int(axis, "axis") _attr_T, values = _execute.args_to_matching_eager(list(values), ctx) _inputs_flat = list(values) _attrs = ("N", _attr_N, "T", _attr_T, "axis", axis) _result = _execute.execute(b"Pack", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Pack", _inputs_flat, _attrs, _result) _result, = _result return _result def pad(input, paddings, name=None): r"""Pads a tensor with zeros. This operation pads a `input` with zeros according to the `paddings` you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many zeros to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many zeros to add after the contents of `input` in that dimension. The padded size of each dimension D of the output is: `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` For example: ``` # 't' is [[1, 1], [2, 2]] # 'paddings' is [[1, 1], [2, 2]] # rank of 't' is 2 pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, 0, 0]] ``` Args: input: A `Tensor`. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Pad", name, tld.op_callbacks, input, paddings) return _result except _core._FallbackException: try: return pad_eager_fallback( input, paddings, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "Pad", input=input, paddings=paddings, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings")) _inputs_flat = _op.inputs _execute.record_gradient( "Pad", _inputs_flat, _attrs, _result) _result, = _result return _result Pad = tf_export("raw_ops.Pad")(_ops.to_raw_op(pad)) def pad_eager_fallback(input, paddings, name, ctx): _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], ctx, _dtypes.int32) _inputs_flat = [input, paddings] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings) _result = _execute.execute(b"Pad", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Pad", _inputs_flat, _attrs, _result) _result, = _result return _result def pad_v2(input, paddings, constant_values, name=None): r"""Pads a tensor. This operation pads `input` according to the `paddings` and `constant_values` you specify. `paddings` is an integer tensor with shape `[Dn, 2]`, where n is the rank of `input`. For each dimension D of `input`, `paddings[D, 0]` indicates how many padding values to add before the contents of `input` in that dimension, and `paddings[D, 1]` indicates how many padding values to add after the contents of `input` in that dimension. `constant_values` is a scalar tensor of the same type as `input` that indicates the value to use for padding `input`. The padded size of each dimension D of the output is: `paddings(D, 0) + input.dim_size(D) + paddings(D, 1)` For example: ``` # 't' is [[1, 1], [2, 2]] # 'paddings' is [[1, 1], [2, 2]] # 'constant_values' is 0 # rank of 't' is 2 pad(t, paddings) ==> [[0, 0, 0, 0, 0, 0] [0, 0, 1, 1, 0, 0] [0, 0, 2, 2, 0, 0] [0, 0, 0, 0, 0, 0]] ``` Args: input: A `Tensor`. paddings: A `Tensor`. Must be one of the following types: `int32`, `int64`. constant_values: A `Tensor`. Must have the same type as `input`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "PadV2", name, tld.op_callbacks, input, paddings, constant_values) return _result except _core._FallbackException: try: return pad_v2_eager_fallback( input, paddings, constant_values, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. _, _, _op, _outputs = _op_def_library._apply_op_helper( "PadV2", input=input, paddings=paddings, constant_values=constant_values, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "Tpaddings", _op._get_attr_type("Tpaddings")) _inputs_flat = _op.inputs _execute.record_gradient( "PadV2", _inputs_flat, _attrs, _result) _result, = _result return _result PadV2 = tf_export("raw_ops.PadV2")(_ops.to_raw_op(pad_v2)) def pad_v2_eager_fallback(input, paddings, constant_values, name, ctx): _attr_T, _inputs_T = _execute.args_to_matching_eager([input, constant_values], ctx) (input, constant_values) = _inputs_T _attr_Tpaddings, (paddings,) = _execute.args_to_matching_eager([paddings], ctx, _dtypes.int32) _inputs_flat = [input, paddings, constant_values] _attrs = ("T", _attr_T, "Tpaddings", _attr_Tpaddings) _result = _execute.execute(b"PadV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "PadV2", _inputs_flat, _attrs, _result) _result, = _result return _result def parallel_concat(values, shape, name=None): r"""Concatenates a list of `N` tensors along the first dimension. The input tensors are all required to have size 1 in the first dimension. For example: ``` # 'x' is [[1, 4]] # 'y' is [[2, 5]] # 'z' is [[3, 6]] parallel_concat([x, y, z]) => [[1, 4], [2, 5], [3, 6]] # Pack along first dim. ``` The difference between concat and parallel_concat is that concat requires all of the inputs be computed before the operation will begin but doesn't require that the input shapes be known during graph construction. Parallel concat will copy pieces of the input into the output as they become available, in some situations this can provide a performance benefit. Args: values: A list of at least 1 `Tensor` objects with the same type. Tensors to be concatenated. All must have size 1 in the first dimension and same shape. shape: A `tf.TensorShape` or list of `ints`. the final shape of the result; should be equal to the shapes of any input but with the number of input values in the first dimension. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `values`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "ParallelConcat", name, tld.op_callbacks, values, "shape", shape) return _result except _core._FallbackException: try: return parallel_concat_eager_fallback( values, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'parallel_concat' Op, not %r." % values) _attr_N = len(values) shape = _execute.make_shape(shape, "shape") _, _, _op, _outputs = _op_def_library._apply_op_helper( "ParallelConcat", values=values, shape=shape, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"), "shape", _op.get_attr("shape")) _inputs_flat = _op.inputs _execute.record_gradient( "ParallelConcat", _inputs_flat, _attrs, _result) _result, = _result return _result ParallelConcat = tf_export("raw_ops.ParallelConcat")(_ops.to_raw_op(parallel_concat)) def parallel_concat_eager_fallback(values, shape, name, ctx): if not isinstance(values, (list, tuple)): raise TypeError( "Expected list for 'values' argument to " "'parallel_concat' Op, not %r." % values) _attr_N = len(values) shape = _execute.make_shape(shape, "shape") _attr_T, values = _execute.args_to_matching_eager(list(values), ctx) _inputs_flat = list(values) _attrs = ("N", _attr_N, "T", _attr_T, "shape", shape) _result = _execute.execute(b"ParallelConcat", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "ParallelConcat", _inputs_flat, _attrs, _result) _result, = _result return _result def placeholder(dtype, shape=None, name=None): r"""A placeholder op for a value that will be fed into the computation. N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime. Args: dtype: A `tf.DType`. The type of elements in the tensor. shape: An optional `tf.TensorShape` or list of `ints`. Defaults to `None`. (Optional) The shape of the tensor. If the shape has 0 dimensions, the shape is unconstrained. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "Placeholder", name, tld.op_callbacks, "dtype", dtype, "shape", shape) return _result except _core._FallbackException: try: return placeholder_eager_fallback( dtype=dtype, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") if shape is None: shape = None shape = _execute.make_shape(shape, "shape") _, _, _op, _outputs = _op_def_library._apply_op_helper( "Placeholder", dtype=dtype, shape=shape, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape")) _inputs_flat = _op.inputs _execute.record_gradient( "Placeholder", _inputs_flat, _attrs, _result) _result, = _result return _result Placeholder = tf_export("raw_ops.Placeholder")(_ops.to_raw_op(placeholder)) def placeholder_eager_fallback(dtype, shape, name, ctx): dtype = _execute.make_type(dtype, "dtype") if shape is None: shape = None shape = _execute.make_shape(shape, "shape") _inputs_flat = [] _attrs = ("dtype", dtype, "shape", shape) _result = _execute.execute(b"Placeholder", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "Placeholder", _inputs_flat, _attrs, _result) _result, = _result return _result def placeholder_v2(dtype, shape, name=None): r"""A placeholder op for a value that will be fed into the computation. N.B. This operation will fail with an error if it is executed. It is intended as a way to represent a value that will always be fed, and to provide attrs that enable the fed value to be checked at runtime. Args: dtype: A `tf.DType`. The type of elements in the tensor. shape: A `tf.TensorShape` or list of `ints`. The shape of the tensor. The shape can be any partially-specified shape. To be unconstrained, pass in a shape with unknown rank. name: A name for the operation (optional). Returns: A `Tensor` of type `dtype`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "PlaceholderV2", name, tld.op_callbacks, "dtype", dtype, "shape", shape) return _result except _core._FallbackException: try: return placeholder_v2_eager_fallback( dtype=dtype, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") _, _, _op, _outputs = _op_def_library._apply_op_helper( "PlaceholderV2", dtype=dtype, shape=shape, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape")) _inputs_flat = _op.inputs _execute.record_gradient( "PlaceholderV2", _inputs_flat, _attrs, _result) _result, = _result return _result PlaceholderV2 = tf_export("raw_ops.PlaceholderV2")(_ops.to_raw_op(placeholder_v2)) def placeholder_v2_eager_fallback(dtype, shape, name, ctx): dtype = _execute.make_type(dtype, "dtype") shape = _execute.make_shape(shape, "shape") _inputs_flat = [] _attrs = ("dtype", dtype, "shape", shape) _result = _execute.execute(b"PlaceholderV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "PlaceholderV2", _inputs_flat, _attrs, _result) _result, = _result return _result def placeholder_with_default(input, shape, name=None): r"""A placeholder op that passes through `input` when its output is not fed. Args: input: A `Tensor`. The default value to produce when `output` is not fed. shape: A `tf.TensorShape` or list of `ints`. The (possibly partial) shape of the tensor. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "PlaceholderWithDefault", name, tld.op_callbacks, input, "shape", shape) return _result except _core._FallbackException: try: return placeholder_with_default_eager_fallback( input, shape=shape, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. shape = _execute.make_shape(shape, "shape") _, _, _op, _outputs = _op_def_library._apply_op_helper( "PlaceholderWithDefault", input=input, shape=shape, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("dtype", _op._get_attr_type("dtype"), "shape", _op.get_attr("shape")) _inputs_flat = _op.inputs _execute.record_gradient( "PlaceholderWithDefault", _inputs_flat, _attrs, _result) _result, = _result return _result PlaceholderWithDefault = tf_export("raw_ops.PlaceholderWithDefault")(_ops.to_raw_op(placeholder_with_default)) def placeholder_with_default_eager_fallback(input, shape, name, ctx): shape = _execute.make_shape(shape, "shape") _attr_dtype, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("dtype", _attr_dtype, "shape", shape) _result = _execute.execute(b"PlaceholderWithDefault", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "PlaceholderWithDefault", _inputs_flat, _attrs, _result) _result, = _result return _result def prevent_gradient(input, message="", name=None): r"""An identity op that triggers an error if a gradient is requested. When executed in a graph, this op outputs its input tensor as-is. When building ops to compute gradients, the TensorFlow gradient system will return an error when trying to lookup the gradient of this op, because no gradient must ever be registered for this function. This op exists to prevent subtle bugs from silently returning unimplemented gradients in some corner cases. Args: input: A `Tensor`. any tensor. message: An optional `string`. Defaults to `""`. Will be printed in the error when anyone tries to differentiate this operation. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "PreventGradient", name, tld.op_callbacks, input, "message", message) return _result except _core._FallbackException: try: return prevent_gradient_eager_fallback( input, message=message, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if message is None: message = "" message = _execute.make_str(message, "message") _, _, _op, _outputs = _op_def_library._apply_op_helper( "PreventGradient", input=input, message=message, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("T", _op._get_attr_type("T"), "message", _op.get_attr("message")) _inputs_flat = _op.inputs _execute.record_gradient( "PreventGradient", _inputs_flat, _attrs, _result) _result, = _result return _result PreventGradient = tf_export("raw_ops.PreventGradient")(_ops.to_raw_op(prevent_gradient)) def prevent_gradient_eager_fallback(input, message, name, ctx): if message is None: message = "" message = _execute.make_str(message, "message") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("T", _attr_T, "message", message) _result = _execute.execute(b"PreventGradient", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "PreventGradient", _inputs_flat, _attrs, _result) _result, = _result return _result def quantize_and_dequantize(input, signed_input=True, num_bits=8, range_given=False, input_min=0, input_max=0, name=None): r"""Use QuantizeAndDequantizeV2 instead. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. signed_input: An optional `bool`. Defaults to `True`. num_bits: An optional `int`. Defaults to `8`. range_given: An optional `bool`. Defaults to `False`. input_min: An optional `float`. Defaults to `0`. input_max: An optional `float`. Defaults to `0`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "QuantizeAndDequantize", name, tld.op_callbacks, input, "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "input_min", input_min, "input_max", input_max) return _result except _core._FallbackException: try: return quantize_and_dequantize_eager_fallback( input, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if input_min is None: input_min = 0 input_min = _execute.make_float(input_min, "input_min") if input_max is None: input_max = 0 input_max = _execute.make_float(input_max, "input_max") _, _, _op, _outputs = _op_def_library._apply_op_helper( "QuantizeAndDequantize", input=input, signed_input=signed_input, num_bits=num_bits, range_given=range_given, input_min=input_min, input_max=input_max, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "input_min", _op.get_attr("input_min"), "input_max", _op.get_attr("input_max"), "T", _op._get_attr_type("T")) _inputs_flat = _op.inputs _execute.record_gradient( "QuantizeAndDequantize", _inputs_flat, _attrs, _result) _result, = _result return _result QuantizeAndDequantize = tf_export("raw_ops.QuantizeAndDequantize")(_ops.to_raw_op(quantize_and_dequantize)) def quantize_and_dequantize_eager_fallback(input, signed_input, num_bits, range_given, input_min, input_max, name, ctx): if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if input_min is None: input_min = 0 input_min = _execute.make_float(input_min, "input_min") if input_max is None: input_max = 0 input_max = _execute.make_float(input_max, "input_max") _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx) _inputs_flat = [input] _attrs = ("signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "input_min", input_min, "input_max", input_max, "T", _attr_T) _result = _execute.execute(b"QuantizeAndDequantize", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "QuantizeAndDequantize", _inputs_flat, _attrs, _result) _result, = _result return _result def quantize_and_dequantize_v2(input, input_min, input_max, signed_input=True, num_bits=8, range_given=False, round_mode="HALF_TO_EVEN", narrow_range=False, axis=-1, name=None): r"""Quantizes then dequantizes a tensor. This op simulates the precision loss from the quantized forward pass by: 1. Quantizing the tensor to fixed point numbers, which should match the target quantization method when it is used in inference. 2. Dequantizing it back to floating point numbers for the following ops, most likely matmul. There are different ways to quantize. This version uses only scaling, so 0.0 maps to 0. From the specified 'num_bits' in the quantized output type, it determines minimum and maximum representable quantized values. e.g. * [-128, 127] for signed, num_bits = 8, or * [0, 255] for unsigned, num_bits = 8. If range_given == False, the initial input_min, input_max will be determined automatically as the minimum and maximum values in the input tensor, otherwise the specified values of input_min, input_max are used. Note: If the input_min, input_max are specified, they do not need to equal the actual minimum and maximum values in the tensor. e.g. in some cases it may be beneficial to specify these values such that the low probability extremes of the input distribution are clipped. This op determines the maximum scale_factor that would map the initial [input_min, input_max] range to a range that lies within the representable quantized range. It determines the scale from one of input_min and input_max, then updates the other one to maximize the representable range. e.g. * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 5.0]: it would use a scale_factor of -128 / -10.0 = 12.8 In this case, it would update input_max to be 127 / 12.8 = 9.921875 * if the output is signed, num_bits = 8, [input_min, input_max] = [-10.0, 10.0]: it would use a scale_factor of 127 / 10.0 = 12.7 In this case, it would update input_min to be 128.0 / 12.7 = -10.07874 * if the output is unsigned, input_min is forced to be 0, and only the specified input_max is used. After determining the scale_factor and updating the input range, it applies the following to each value in the 'input' tensor. output = round(clamp(value, input_min, input_max) * scale_factor) / scale_factor. The above round function rounds the value based on the given round_mode. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. Tensor to quantize and then dequantize. input_min: A `Tensor`. Must have the same type as `input`. If `range_given == True`, this specifies the minimum input value that needs to be represented, otherwise it is determined from the min value of the `input` tensor. input_max: A `Tensor`. Must have the same type as `input`. If `range_given == True`, this specifies the maximum input value that needs to be represented, otherwise it is determined from the max value of the `input` tensor. signed_input: An optional `bool`. Defaults to `True`. Whether the quantization is signed or unsigned. (actually this parameter should have been called <b>`signed_output`</b>) num_bits: An optional `int`. Defaults to `8`. The bitwidth of the quantization. range_given: An optional `bool`. Defaults to `False`. Whether the range is given or should be determined from the `input` tensor. round_mode: An optional `string` from: `"HALF_TO_EVEN", "HALF_UP"`. Defaults to `"HALF_TO_EVEN"`. The 'round_mode' attribute controls which rounding tie-breaking algorithm is used when rounding float values to their quantized equivalents. The following rounding modes are currently supported: * HALF_TO_EVEN: this is the default round_mode. * HALF_UP: round towards positive. In this mode 7.5 rounds up to 8 and -7.5 rounds up to -7. narrow_range: An optional `bool`. Defaults to `False`. If True, then the absolute value of the quantized minimum value is the same as the quantized maximum value, instead of 1 greater. i.e. for 8 bit quantization, the minimum value is -127 instead of -128. axis: An optional `int`. Defaults to `-1`. If specified, this axis is treated as a channel or slice axis, and a separate quantization range is used for each channel or slice along this axis. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "QuantizeAndDequantizeV2", name, tld.op_callbacks, input, input_min, input_max, "signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis) return _result except _core._FallbackException: try: return quantize_and_dequantize_v2_eager_fallback( input, input_min, input_max, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode, narrow_range=narrow_range, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if round_mode is None: round_mode = "HALF_TO_EVEN" round_mode = _execute.make_str(round_mode, "round_mode") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _, _, _op, _outputs = _op_def_library._apply_op_helper( "QuantizeAndDequantizeV2", input=input, input_min=input_min, input_max=input_max, signed_input=signed_input, num_bits=num_bits, range_given=range_given, round_mode=round_mode, narrow_range=narrow_range, axis=axis, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("signed_input", _op._get_attr_bool("signed_input"), "num_bits", _op._get_attr_int("num_bits"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "round_mode", _op.get_attr("round_mode"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis")) _inputs_flat = _op.inputs _execute.record_gradient( "QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result) _result, = _result return _result QuantizeAndDequantizeV2 = tf_export("raw_ops.QuantizeAndDequantizeV2")(_ops.to_raw_op(quantize_and_dequantize_v2)) def quantize_and_dequantize_v2_eager_fallback(input, input_min, input_max, signed_input, num_bits, range_given, round_mode, narrow_range, axis, name, ctx): if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if num_bits is None: num_bits = 8 num_bits = _execute.make_int(num_bits, "num_bits") if range_given is None: range_given = False range_given = _execute.make_bool(range_given, "range_given") if round_mode is None: round_mode = "HALF_TO_EVEN" round_mode = _execute.make_str(round_mode, "round_mode") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], ctx) (input, input_min, input_max) = _inputs_T _inputs_flat = [input, input_min, input_max] _attrs = ("signed_input", signed_input, "num_bits", num_bits, "range_given", range_given, "T", _attr_T, "round_mode", round_mode, "narrow_range", narrow_range, "axis", axis) _result = _execute.execute(b"QuantizeAndDequantizeV2", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "QuantizeAndDequantizeV2", _inputs_flat, _attrs, _result) _result, = _result return _result def quantize_and_dequantize_v3(input, input_min, input_max, num_bits, signed_input=True, range_given=True, narrow_range=False, axis=-1, name=None): r"""Quantizes then dequantizes a tensor. This is almost identical to QuantizeAndDequantizeV2, except that num_bits is a tensor, so its value can change during training. Args: input: A `Tensor`. Must be one of the following types: `bfloat16`, `half`, `float32`, `float64`. input_min: A `Tensor`. Must have the same type as `input`. input_max: A `Tensor`. Must have the same type as `input`. num_bits: A `Tensor` of type `int32`. signed_input: An optional `bool`. Defaults to `True`. range_given: An optional `bool`. Defaults to `True`. narrow_range: An optional `bool`. Defaults to `False`. axis: An optional `int`. Defaults to `-1`. name: A name for the operation (optional). Returns: A `Tensor`. Has the same type as `input`. """ _ctx = _context._context or _context.context() tld = _ctx._thread_local_data if tld.is_eager: try: _result = pywrap_tfe.TFE_Py_FastPathExecute( _ctx._context_handle, tld.device_name, "QuantizeAndDequantizeV3", name, tld.op_callbacks, input, input_min, input_max, num_bits, "signed_input", signed_input, "range_given", range_given, "narrow_range", narrow_range, "axis", axis) return _result except _core._FallbackException: try: return quantize_and_dequantize_v3_eager_fallback( input, input_min, input_max, num_bits, signed_input=signed_input, range_given=range_given, narrow_range=narrow_range, axis=axis, name=name, ctx=_ctx) except _core._SymbolicException: pass # Add nodes to the TensorFlow graph. except _core._NotOkStatusException as e: _ops.raise_from_not_ok_status(e, name) # Add nodes to the TensorFlow graph. if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if range_given is None: range_given = True range_given = _execute.make_bool(range_given, "range_given") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _, _, _op, _outputs = _op_def_library._apply_op_helper( "QuantizeAndDequantizeV3", input=input, input_min=input_min, input_max=input_max, num_bits=num_bits, signed_input=signed_input, range_given=range_given, narrow_range=narrow_range, axis=axis, name=name) _result = _outputs[:] if _execute.must_record_gradient(): _attrs = ("signed_input", _op._get_attr_bool("signed_input"), "range_given", _op._get_attr_bool("range_given"), "T", _op._get_attr_type("T"), "narrow_range", _op._get_attr_bool("narrow_range"), "axis", _op._get_attr_int("axis")) _inputs_flat = _op.inputs _execute.record_gradient( "QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result) _result, = _result return _result QuantizeAndDequantizeV3 = tf_export("raw_ops.QuantizeAndDequantizeV3")(_ops.to_raw_op(quantize_and_dequantize_v3)) def quantize_and_dequantize_v3_eager_fallback(input, input_min, input_max, num_bits, signed_input, range_given, narrow_range, axis, name, ctx): if signed_input is None: signed_input = True signed_input = _execute.make_bool(signed_input, "signed_input") if range_given is None: range_given = True range_given = _execute.make_bool(range_given, "range_given") if narrow_range is None: narrow_range = False narrow_range = _execute.make_bool(narrow_range, "narrow_range") if axis is None: axis = -1 axis = _execute.make_int(axis, "axis") _attr_T, _inputs_T = _execute.args_to_matching_eager([input, input_min, input_max], ctx) (input, input_min, input_max) = _inputs_T num_bits = _ops.convert_to_tensor(num_bits, _dtypes.int32) _inputs_flat = [input, input_min, input_max, num_bits] _attrs = ("signed_input", signed_input, "range_given", range_given, "T", _attr_T, "narrow_range", narrow_range, "axis", axis) _result = _execute.execute(b"QuantizeAndDequantizeV3", 1, inputs=_inputs_flat, attrs=_attrs, ctx=ctx, name=name) if _execute.must_record_gradient(): _execute.record_gradient( "QuantizeAndDequantizeV3", _inputs_flat, _attrs, _result) _result, = _result return _result _QuantizeV2Output = collections.namedtuple( "QuantizeV2", ["output", "output_min", "output_max"]) def quantize_v2(input, min_range, max_range, T, mode="MIN_COMBINED", round_mode="HALF_AWAY_FROM_ZERO", narrow_range=False, axis=-1, ensure_minimum_range=0.01, name=None): r"""Quantize the 'input' tensor of type float to 'output' tensor of type 'T'. [min_range, max_range] are scalar floats that specify the range for the 'input' data. The 'mode' attribute controls exactly which calculations are used to convert the float values to their quantized equivalents. The 'round_mode' attribute controls which rounding tie-breaking algorithm is used when rounding float values to their quantized equivalents. In 'MIN_COMBINED' mode, each value of the tensor will undergo the following: ``` out[i] = (in[i] - min_range) * range(T) / (max_range - min_range) if T == qint8: out[i] -= (range(T) + 1) / 2.0 ``` here `range(T) = numeric_limits<T>::max() - numeric_limits<T>::min()` *MIN_COMBINED Mode Example* Assume the input is type float and has a possible range of [0.0, 6.0] and the output type is quint8 ([0, 255]). The min_range and max_range values should be specified as 0.0 and 6.0. Quantizing from float to quint8 will multiply each value of the input by 255/6 and cast to quint8. If the output type was qint8 ([-128, 127]), the operation will additionally subtract each value by 128 prior to casting, so that the range of values aligns with the range of qint8. If the mode is 'MIN_FIRST', then this approach is used: ``` num_discrete_values = 1 << (# of bits in T) range_adjust = num_discrete_values / (num_discrete_values - 1) range = (range_max - range_min) * range_adjust range_scale = num_discrete_values / range quantized = round(input * range_scale) - round(range_min * range_scale) + numeric_limits<T>::min() quantized = max(quantized, numeric_limits<T>::min()) quantized = min(quantized, numeric_limits<T>::max()) ``` The biggest difference between this and MIN_COMBINED is that the minimum range is rounded first, before it's subtracted from the rounded value. With MIN_COMBINED, a small bias is introduced where repeated iterations of quantizing and dequantizing will introduce a larger and larger error. *SCALED mode Example* `SCALED` mode matches the quantization approach used in `QuantizeAndDequantize{V2|V3}`. If the mode is `SCALED`, the quantization is performed by multiplying each input value by a scaling_factor. The scaling_factor is determined from `min_range` and `max_range` to be as large as possible such that the range from `min_range` to `max_range` is representable within values of type T. ```c++ const int min_T = std::numeric_limits<T>::min(); const int max_T = std::numeric_limits<T>::max(); const float max_f