Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/math_ops.py: 43%
1297 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Math Operations.
17Note: Functions taking `Tensor` arguments can also take anything accepted by
18`tf.convert_to_tensor`.
20Note: Elementwise binary operations in TensorFlow follow [numpy-style
21broadcasting](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
23TensorFlow provides a variety of math functions including:
25* Basic arithmetic operators and trigonometric functions.
26* Special math functions (like: `tf.math.igamma` and `tf.math.zeta`)
27* Complex number functions (like: `tf.math.imag` and `tf.math.angle`)
28* Reductions and scans (like: `tf.math.reduce_mean` and `tf.math.cumsum`)
29* Segment functions (like: `tf.math.segment_sum`)
31See: `tf.linalg` for matrix and tensor functions.
33<a id=Segmentation></a>
35## About Segmentation
37TensorFlow provides several operations that you can use to perform common
38math computations on tensor segments.
39Here a segmentation is a partitioning of a tensor along
40the first dimension, i.e. it defines a mapping from the first dimension onto
41`segment_ids`. The `segment_ids` tensor should be the size of
42the first dimension, `d0`, with consecutive IDs in the range `0` to `k`,
43where `k<d0`.
44In particular, a segmentation of a matrix tensor is a mapping of rows to
45segments.
47For example:
49```python
50c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
51tf.math.segment_sum(c, tf.constant([0, 0, 1]))
52# ==> [[0 0 0 0]
53# [5 6 7 8]]
54```
56The standard `segment_*` functions assert that the segment indices are sorted.
57If you have unsorted indices use the equivalent `unsorted_segment_` function.
58These functions take an additional argument `num_segments` so that the output
59tensor can be efficiently allocated.
61``` python
62c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
63tf.math.unsorted_segment_sum(c, tf.constant([0, 1, 0]), num_segments=2)
64# ==> [[ 6, 8, 10, 12],
65# [-1, -2, -3, -4]]
66```
68"""
69import builtins
70import numbers
71import numpy as np
73from tensorflow.python.eager import context
74from tensorflow.python.framework import constant_op
75from tensorflow.python.framework import dtypes
76from tensorflow.python.framework import indexed_slices
77from tensorflow.python.framework import ops
78from tensorflow.python.framework import sparse_tensor
79from tensorflow.python.framework import tensor_conversion_registry
80from tensorflow.python.framework import tensor_shape
81from tensorflow.python.framework import tensor_util
82from tensorflow.python.ops import array_ops
83from tensorflow.python.ops import array_ops_stack
84from tensorflow.python.ops import gen_array_ops
85from tensorflow.python.ops import gen_bitwise_ops
86from tensorflow.python.ops import gen_data_flow_ops
87from tensorflow.python.ops import gen_math_ops
88from tensorflow.python.ops import gen_nn_ops
89from tensorflow.python.ops import gen_sparse_ops
90# go/tf-wildcard-import
91# pylint: disable=wildcard-import
92from tensorflow.python.ops.gen_math_ops import *
93# pylint: enable=wildcard-import
94from tensorflow.python.platform import tf_logging as logging
95from tensorflow.python.util import compat
96from tensorflow.python.util import deprecation
97from tensorflow.python.util import dispatch
98from tensorflow.python.util import nest
99from tensorflow.python.util import tf_decorator
100from tensorflow.python.util import traceback_utils
101from tensorflow.python.util.compat import collections_abc
102from tensorflow.python.util.lazy_loader import LazyLoader
103from tensorflow.python.util.tf_export import tf_export
106np_dtypes = LazyLoader(
107 "np_dtypes", globals(),
108 "tensorflow.python.ops.numpy_ops.np_dtypes")
111# Aliases for some automatically-generated names.
112nextafter = gen_math_ops.next_after
115@tf_export("linspace", v1=["lin_space", "linspace"])
116@dispatch.add_dispatch_support
117@deprecation.deprecated_endpoints("lin_space")
118def linspace_nd(start, stop, num, name=None, axis=0):
119 r"""Generates evenly-spaced values in an interval along a given axis.
121 A sequence of `num` evenly-spaced values are generated beginning at `start`
122 along a given `axis`.
123 If `num > 1`, the values in the sequence increase by
124 `(stop - start) / (num - 1)`, so that the last one is exactly `stop`.
125 If `num <= 0`, `ValueError` is raised.
127 Matches
128 [np.linspace](https://docs.scipy.org/doc/numpy/reference/generated/numpy.linspace.html)'s
129 behaviour
130 except when `num == 0`.
132 For example:
134 ```
135 tf.linspace(10.0, 12.0, 3, name="linspace") => [ 10.0 11.0 12.0]
136 ```
138 `Start` and `stop` can be tensors of arbitrary size:
140 >>> tf.linspace([0., 5.], [10., 40.], 5, axis=0)
141 <tf.Tensor: shape=(5, 2), dtype=float32, numpy=
142 array([[ 0. , 5. ],
143 [ 2.5 , 13.75],
144 [ 5. , 22.5 ],
145 [ 7.5 , 31.25],
146 [10. , 40. ]], dtype=float32)>
148 `Axis` is where the values will be generated (the dimension in the
149 returned tensor which corresponds to the axis will be equal to `num`)
151 >>> tf.linspace([0., 5.], [10., 40.], 5, axis=-1)
152 <tf.Tensor: shape=(2, 5), dtype=float32, numpy=
153 array([[ 0. , 2.5 , 5. , 7.5 , 10. ],
154 [ 5. , 13.75, 22.5 , 31.25, 40. ]], dtype=float32)>
158 Args:
159 start: A `Tensor`. Must be one of the following types: `bfloat16`,
160 `float32`, `float64`. N-D tensor. First entry in the range.
161 stop: A `Tensor`. Must have the same type and shape as `start`. N-D tensor.
162 Last entry in the range.
163 num: A `Tensor`. Must be one of the following types: `int32`, `int64`. 0-D
164 tensor. Number of values to generate.
165 name: A name for the operation (optional).
166 axis: Axis along which the operation is performed (used only when N-D
167 tensors are provided).
169 Returns:
170 A `Tensor`. Has the same type as `start`.
171 """
173 with ops.name_scope(name, "linspace", [start, stop]):
174 start = ops.convert_to_tensor(start, name="start")
175 # stop must be convertible to the same dtype as start
176 stop = ops.convert_to_tensor(stop, name="stop", dtype=start.dtype)
177 num_int = array_ops.convert_to_int_tensor(num, name="num")
178 num = cast(num_int, dtype=start.dtype)
180 broadcast_shape = array_ops.broadcast_dynamic_shape(
181 array_ops.shape(start), array_ops.shape(stop))
182 start = array_ops.broadcast_to(start, broadcast_shape)
183 stop = array_ops.broadcast_to(stop, broadcast_shape)
185 expanded_start = array_ops.expand_dims(start, axis=axis)
186 expanded_stop = array_ops.expand_dims(stop, axis=axis)
188 shape = array_ops.shape(expanded_start)
189 ndims = array_ops.shape(shape)[0]
191 axis = array_ops.where_v2(axis >= 0, axis, ndims + axis)
193 # The purpose is to avoid having negative values when repeating.
194 num_fill = gen_math_ops.maximum(num_int - 2, 0)
195 # To avoid having negative values in the range or zero division
196 # the result is sliced in the end so a correct result is returned for
197 # num == 1, and num == 0.
198 n_steps = gen_math_ops.maximum(num_int - 1, 1)
199 delta = (expanded_stop - expanded_start) / cast(n_steps,
200 expanded_stop.dtype)
201 # Re-cast tensors as delta.
202 expanded_start = cast(expanded_start, delta.dtype)
203 expanded_stop = cast(expanded_stop, delta.dtype)
204 # If num < 0, we will throw exception in the range
205 # otherwise use the same div for delta
206 range_end = array_ops.where_v2(num_int >= 0, n_steps, -1)
207 # Even though range supports an output dtype, its limited
208 # (e.g. doesn't support half at the moment).
209 desired_range = cast(range(1, range_end, dtype=dtypes.int64), delta.dtype)
210 mask = gen_math_ops.equal(axis, range(ndims))
211 # desired_range_shape is [1. 1. 1. ... 1. num_fill 1. 1. ... 1.], where the
212 # index of num_fill is equal to axis.
213 desired_range_shape = array_ops.where_v2(mask, num_fill, 1)
214 desired_range = array_ops.reshape(desired_range, desired_range_shape)
216 res = expanded_start + delta * desired_range
218 # Add the start and endpoints to the result, and slice out the desired
219 # portion.
220 all_tensors = (expanded_start, res, expanded_stop)
221 concatenated = array_ops.concat(all_tensors, axis=axis)
222 begin = array_ops.zeros_like(shape)
223 size = array_ops.where_v2(mask, num_int, shape)
225 return array_ops.slice(concatenated, begin, size)
228linspace = linspace_nd
230arg_max = deprecation.deprecated(None, "Use `tf.math.argmax` instead")(arg_max) # pylint: disable=used-before-assignment
231arg_min = deprecation.deprecated(None, "Use `tf.math.argmin` instead")(arg_min) # pylint: disable=used-before-assignment
232tf_export(v1=["arg_max"])(dispatch.add_dispatch_support(arg_max))
233tf_export(v1=["arg_min"])(dispatch.add_dispatch_support(arg_min))
236# This is set by resource_variable_ops.py. It is included in this way since
237# there is a circular dependency between math_ops and resource_variable_ops
238_resource_variable_type = None
241def _set_doc(doc):
243 def _decorator(func):
244 func.__doc__ = doc
245 return func
247 return _decorator
250# pylint: disable=redefined-builtin
251@tf_export(v1=["math.argmax", "argmax"])
252@dispatch.add_dispatch_support
253@deprecation.deprecated_args(None, "Use the `axis` argument instead",
254 "dimension")
255@_set_doc(
256 gen_math_ops.arg_max.__doc__.replace("dimensions",
257 "axes").replace("dimension", "axis"))
258def argmax(input,
259 axis=None,
260 name=None,
261 dimension=None,
262 output_type=dtypes.int64):
263 axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
264 dimension)
265 return argmax_v2(input, axis, output_type, name)
268@tf_export("math.argmax", "argmax", v1=[])
269@dispatch.add_dispatch_support
270def argmax_v2(input, axis=None, output_type=dtypes.int64, name=None):
271 """Returns the index with the largest value across axes of a tensor.
273 In case of identity returns the smallest index.
275 For example:
277 >>> A = tf.constant([2, 20, 30, 3, 6])
278 >>> tf.math.argmax(A) # A[2] is maximum in tensor A
279 <tf.Tensor: shape=(), dtype=int64, numpy=2>
280 >>> B = tf.constant([[2, 20, 30, 3, 6], [3, 11, 16, 1, 8],
281 ... [14, 45, 23, 5, 27]])
282 >>> tf.math.argmax(B, 0)
283 <tf.Tensor: shape=(5,), dtype=int64, numpy=array([2, 2, 0, 2, 2])>
284 >>> tf.math.argmax(B, 1)
285 <tf.Tensor: shape=(3,), dtype=int64, numpy=array([2, 2, 1])>
286 >>> C = tf.constant([0, 0, 0, 0])
287 >>> tf.math.argmax(C) # Returns smallest index in case of ties
288 <tf.Tensor: shape=(), dtype=int64, numpy=0>
290 Args:
291 input: A `Tensor`.
292 axis: An integer, the axis to reduce across. Default to 0.
293 output_type: An optional output dtype (`tf.int32` or `tf.int64`). Defaults
294 to `tf.int64`.
295 name: An optional name for the operation.
297 Returns:
298 A `Tensor` of type `output_type`.
299 """
300 if axis is None:
301 axis = 0
302 return gen_math_ops.arg_max(input, axis, name=name, output_type=output_type)
305@tf_export(v1=["math.argmin", "argmin"])
306@dispatch.add_dispatch_support
307@deprecation.deprecated_args(None, "Use the `axis` argument instead",
308 "dimension")
309@_set_doc(
310 gen_math_ops.arg_min.__doc__.replace("dimensions",
311 "axes").replace("dimension", "axis"))
312def argmin(input,
313 axis=None,
314 name=None,
315 dimension=None,
316 output_type=dtypes.int64):
317 axis = deprecation.deprecated_argument_lookup("axis", axis, "dimension",
318 dimension)
319 return argmin_v2(input, axis, output_type, name)
322@tf_export("math.argmin", "argmin", v1=[])
323@dispatch.add_dispatch_support
324def argmin_v2(input, axis=None, output_type=dtypes.int64, name=None):
325 """Returns the index with the smallest value across axes of a tensor.
327 Returns the smallest index in case of ties.
329 Args:
330 input: A `Tensor`. Must be one of the following types: `float32`, `float64`,
331 `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`,
332 `quint8`, `qint32`, `bfloat16`, `uint16`, `complex128`, `half`, `uint32`,
333 `uint64`.
334 axis: A `Tensor`. Must be one of the following types: `int32`, `int64`.
335 int32 or int64, must be in the range `-rank(input), rank(input))`.
336 Describes which axis of the input Tensor to reduce across. For vectors,
337 use axis = 0.
338 output_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to
339 `tf.int64`.
340 name: A name for the operation (optional).
342 Returns:
343 A `Tensor` of type `output_type`.
345 Usage:
346 ```python
347 import tensorflow as tf
348 a = [1, 10, 26.9, 2.8, 166.32, 62.3]
349 b = tf.math.argmin(input = a)
350 c = tf.keras.backend.eval(b)
351 # c = 0
352 # here a[0] = 1 which is the smallest element of a across axis 0
353 ```
354 """
355 if axis is None:
356 axis = 0
357 return gen_math_ops.arg_min(input, axis, name=name, output_type=output_type)
360# pylint: enable=redefined-builtin
363# pylint: disable=anomalous-backslash-in-string,protected-access
364# pylint: disable=g-docstring-has-escape
365@tf_export("math.abs", "abs")
366@dispatch.register_unary_elementwise_api
367@dispatch.add_dispatch_support
368def abs(x, name=None): # pylint: disable=redefined-builtin
369 r"""Computes the absolute value of a tensor.
371 Given a tensor of integer or floating-point values, this operation returns a
372 tensor of the same type, where each element contains the absolute value of the
373 corresponding element in the input.
375 Given a tensor `x` of complex numbers, this operation returns a tensor of type
376 `float32` or `float64` that is the absolute value of each element in `x`. For
377 a complex number \\(a + bj\\), its absolute value is computed as
378 \\(\sqrt{a^2 + b^2}\\).
380 For example:
382 >>> # real number
383 >>> x = tf.constant([-2.25, 3.25])
384 >>> tf.abs(x)
385 <tf.Tensor: shape=(2,), dtype=float32,
386 numpy=array([2.25, 3.25], dtype=float32)>
388 >>> # complex number
389 >>> x = tf.constant([[-2.25 + 4.75j], [-3.25 + 5.75j]])
390 >>> tf.abs(x)
391 <tf.Tensor: shape=(2, 1), dtype=float64, numpy=
392 array([[5.25594901],
393 [6.60492241]])>
395 Args:
396 x: A `Tensor` or `SparseTensor` of type `float16`, `float32`, `float64`,
397 `int32`, `int64`, `complex64` or `complex128`.
398 name: A name for the operation (optional).
400 Returns:
401 A `Tensor` or `SparseTensor` of the same size, type and sparsity as `x`,
402 with absolute values. Note, for `complex64` or `complex128` input, the
403 returned `Tensor` will be of type `float32` or `float64`, respectively.
404 """
405 with ops.name_scope(name, "Abs", [x]) as name:
406 x = ops.convert_to_tensor(x, name="x")
407 if x.dtype.is_complex:
408 return gen_math_ops.complex_abs(x, Tout=x.dtype.real_dtype, name=name)
409 return gen_math_ops._abs(x, name=name)
412# pylint: enable=g-docstring-has-escape
415# pylint: disable=redefined-builtin
416def _bucketize(input, boundaries, name=None):
417 return gen_math_ops.bucketize(input=input, boundaries=boundaries, name=name)
420# pylint: enable=redefined-builtin
423class DivideDelegateWithName:
424 """Use Python2/Python3 division delegation to implement divide for tensors."""
426 def __init__(self, x, name):
427 """Construct DivideDelegateWithName.
429 Args:
430 x: Tensor to use as left operand in operator overloads
431 name: The name that is preferred for the op created.
432 """
433 self.x = x
434 self.name = name
436 def __truediv__(self, y):
437 return _truediv_python3(self.x, y, self.name)
439 def __floordiv__(self, y):
440 return floordiv(self.x, y, self.name)
442 def __div__(self, y):
443 return _div_python2(self.x, y, self.name)
446@tf_export("math.divide", "divide")
447@dispatch.register_binary_elementwise_api
448@dispatch.add_dispatch_support
449def divide(x, y, name=None):
450 """Computes Python style division of `x` by `y`.
452 For example:
454 >>> x = tf.constant([16, 12, 11])
455 >>> y = tf.constant([4, 6, 2])
456 >>> tf.divide(x,y)
457 <tf.Tensor: shape=(3,), dtype=float64,
458 numpy=array([4. , 2. , 5.5])>
460 Args:
461 x: A `Tensor`
462 y: A `Tensor`
463 name: A name for the operation (optional).
465 Returns:
466 A `Tensor` with same shape as input
467 """
469 if name is not None:
470 # Cannot use tensors operator overload, because it has no way to track
471 # override names. Use a dummy class to track the runtime division behavior
472 return DivideDelegateWithName(x, name) / y
473 else:
474 # We do conversion here to make sure at least x is a tensor.
475 if not tensor_util.is_tf_type(x):
476 dtype = y.dtype.base_dtype if tensor_util.is_tf_type(y) else None
477 x = ops.convert_to_tensor(x, dtype=dtype)
478 return x / y
481@tf_export("math.multiply", "multiply")
482@dispatch.register_binary_elementwise_api
483@dispatch.add_dispatch_support
484def multiply(x, y, name=None):
485 """Returns an element-wise x * y.
487 For example:
489 >>> x = tf.constant(([1, 2, 3, 4]))
490 >>> tf.math.multiply(x, x)
491 <tf.Tensor: shape=(4,), dtype=..., numpy=array([ 1, 4, 9, 16], dtype=int32)>
493 Since `tf.math.multiply` will convert its arguments to `Tensor`s, you can also
494 pass in non-`Tensor` arguments:
496 >>> tf.math.multiply(7,6)
497 <tf.Tensor: shape=(), dtype=int32, numpy=42>
499 If `x.shape` is not the same as `y.shape`, they will be broadcast to a
500 compatible shape. (More about broadcasting
501 [here](https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).)
503 For example:
505 >>> x = tf.ones([1, 2]);
506 >>> y = tf.ones([2, 1]);
507 >>> x * y # Taking advantage of operator overriding
508 <tf.Tensor: shape=(2, 2), dtype=float32, numpy=
509 array([[1., 1.],
510 [1., 1.]], dtype=float32)>
512 The reduction version of this elementwise operation is `tf.math.reduce_prod`
514 Args:
515 x: A Tensor. Must be one of the following types: `bfloat16`,
516 `half`, `float32`, `float64`, `uint8`, `int8`, `uint16`,
517 `int16`, `int32`, `int64`, `complex64`, `complex128`.
518 y: A `Tensor`. Must have the same type as `x`.
519 name: A name for the operation (optional).
521 Returns:
523 A `Tensor`. Has the same type as `x`.
525 Raises:
527 * InvalidArgumentError: When `x` and `y` have incompatible shapes or types.
528 """
530 return gen_math_ops.mul(x, y, name)
533# TODO(aselle): put deprecation in after another round of global code changes
534@deprecation.deprecated(
535 "2016-12-30",
536 "`tf.mul(x, y)` is deprecated; use `tf.math.multiply(x, y)` or `x * y`")
537def _mul(x, y, name=None):
538 return gen_math_ops.mul(x, y, name)
541_mul.__doc__ = (
542 gen_math_ops.mul.__doc__ + ("" if _mul.__doc__ is None else _mul.__doc__))
545@tf_export("math.subtract", "subtract")
546@dispatch.register_binary_elementwise_api
547@dispatch.add_dispatch_support
548def subtract(x, y, name=None):
549 return gen_math_ops.sub(x, y, name)
552subtract.__doc__ = gen_math_ops.sub.__doc__
555# TODO(aselle): put deprecation in after another round of global code changes
556@deprecation.deprecated(
557 "2016-12-30",
558 "`tf.sub(x, y)` is deprecated, please use `tf.subtract(x, y)` or `x - y`")
559def _sub(x, y, name=None):
560 return gen_math_ops.sub(x, y, name)
563_sub.__doc__ = (
564 gen_math_ops.sub.__doc__ + ("" if _sub.__doc__ is None else _sub.__doc__))
566negative = gen_math_ops.neg
569# pylint: disable=g-docstring-has-escape
570@deprecation.deprecated(
571 "2016-12-30",
572 "`tf.neg(x)` is deprecated, please use `tf.negative(x)` or `-x`")
573def _neg(x, name=None):
574 """Computes numerical negative value element-wise.
576 I.e., \\(y = -x\\).
578 Args:
579 x: A `Tensor` or `SparseTensor`. Must be one of the following types: `half`,
580 `float32`, `float64`, `int32`, `int64`, `complex64`, `complex128`.
581 name: A name for the operation (optional).
583 Returns:
584 A `Tensor` or `SparseTensor`, respectively. Has the same type as `x`.
585 """
586 return negative(x, name)
589# pylint: enable=g-docstring-has-escape
592@tf_export(v1=["math.scalar_mul", "scalar_mul"])
593@dispatch.register_binary_elementwise_api
594@dispatch.add_dispatch_support
595def scalar_mul(scalar, x, name=None):
596 """Multiplies a scalar times a `Tensor` or `IndexedSlices` object.
598 This is a special case of `tf.math.multiply`, where the first value must be a
599 `scalar`. Unlike the general form of `tf.math.multiply`, this is operation is
600 guaranteed to be efficient for `tf.IndexedSlices`.
602 >>> x = tf.reshape(tf.range(30, dtype=tf.float32), [10, 3])
603 >>> with tf.GradientTape() as g:
604 ... g.watch(x)
605 ... y = tf.gather(x, [1, 2]) # IndexedSlices
606 ... z = tf.math.scalar_mul(10.0, y)
608 Args:
609 scalar: A 0-D scalar `Tensor`. Must have known shape.
610 x: A `Tensor` or `IndexedSlices` to be scaled.
611 name: A name for the operation (optional).
613 Returns:
614 `scalar * x` of the same type (`Tensor` or `IndexedSlices`) as `x`.
616 Raises:
617 ValueError: if scalar is not a 0-D `scalar`.
618 """
619 base_dtype = dtypes.as_dtype(x.dtype).base_dtype
620 scalar = ops.convert_to_tensor(
621 scalar, dtype=base_dtype, name="scalar")
622 shape = scalar.get_shape()
623 if shape.ndims == 0:
624 if isinstance(x, indexed_slices.IndexedSlices):
625 return indexed_slices.IndexedSlices(
626 gen_math_ops.mul(scalar, x.values, name), x.indices, x.dense_shape)
627 else:
628 return gen_math_ops.mul(scalar, x, name)
629 else:
630 raise ValueError(
631 f"The input scalar must be a 0-D value. Received shape {shape}.")
634@tf_export("math.softplus", "nn.softplus", v1=["math.softplus", "nn.softplus"])
635@dispatch.register_unary_elementwise_api
636@dispatch.add_dispatch_support
637def softplus(features, name=None):
638 """Computes elementwise softplus: `softplus(x) = log(exp(x) + 1)`.
640 `softplus` is a smooth approximation of `relu`. Like `relu`, `softplus` always
641 takes on positive values.
643 <img style="width:100%" src="https://www.tensorflow.org/images/softplus.png">
645 Example:
647 >>> import tensorflow as tf
648 >>> tf.math.softplus(tf.range(0, 2, dtype=tf.float32)).numpy()
649 array([0.6931472, 1.3132616], dtype=float32)
651 Args:
652 features: `Tensor`
653 name: Optional: name to associate with this operation.
654 Returns:
655 `Tensor`
656 """
657 return gen_nn_ops.softplus(features, name)
660@tf_export("math.scalar_mul", "scalar_mul", v1=[])
661@dispatch.register_binary_elementwise_api
662@dispatch.add_dispatch_support
663@_set_doc(scalar_mul.__doc__)
664def scalar_mul_v2(scalar, x, name=None):
665 with ops.name_scope(name, "scalar_mul", [x]) as name:
666 return scalar_mul(scalar, x, name)
669@tf_export("math.pow", "pow")
670@dispatch.register_binary_elementwise_api
671@dispatch.add_dispatch_support
672def pow(x, y, name=None): # pylint: disable=redefined-builtin
673 r"""Computes the power of one value to another.
675 Given a tensor `x` and a tensor `y`, this operation computes \\(x^y\\) for
676 corresponding elements in `x` and `y`. For example:
678 ```python
679 x = tf.constant([[2, 2], [3, 3]])
680 y = tf.constant([[8, 16], [2, 3]])
681 tf.pow(x, y) # [[256, 65536], [9, 27]]
682 ```
684 Args:
685 x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
686 `complex64`, or `complex128`.
687 y: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, `int64`,
688 `complex64`, or `complex128`.
689 name: A name for the operation (optional).
691 Returns:
692 A `Tensor`.
693 """
694 with ops.name_scope(name, "Pow", [x]) as name:
695 return gen_math_ops._pow(x, y, name=name)
698# pylint: disable=redefined-builtin,redefined-outer-name
699@tf_export("dtypes.complex", "complex")
700@dispatch.register_binary_elementwise_api
701@dispatch.add_dispatch_support
702def complex(real, imag, name=None):
703 r"""Converts two real numbers to a complex number.
705 Given a tensor `real` representing the real part of a complex number, and a
706 tensor `imag` representing the imaginary part of a complex number, this
707 operation returns complex numbers elementwise of the form \\(a + bj\\), where
708 *a* represents the `real` part and *b* represents the `imag` part.
710 The input tensors `real` and `imag` must have the same shape.
712 For example:
714 ```python
715 real = tf.constant([2.25, 3.25])
716 imag = tf.constant([4.75, 5.75])
717 tf.complex(real, imag) # [[2.25 + 4.75j], [3.25 + 5.75j]]
718 ```
720 Args:
721 real: A `Tensor`. Must be one of the following types: `float32`, `float64`.
722 imag: A `Tensor`. Must have the same type as `real`.
723 name: A name for the operation (optional).
725 Returns:
726 A `Tensor` of type `complex64` or `complex128`.
728 Raises:
729 TypeError: Real and imag must be correct types
730 """
731 real = ops.convert_to_tensor(real, name="real")
732 imag = ops.convert_to_tensor(imag, name="imag")
733 with ops.name_scope(name, "Complex", [real, imag]) as name:
734 input_types = (real.dtype, imag.dtype)
735 if input_types == (dtypes.float64, dtypes.float64):
736 Tout = dtypes.complex128
737 elif input_types == (dtypes.float32, dtypes.float32):
738 Tout = dtypes.complex64
739 else:
740 raise TypeError(
741 f"The `real` and `imag` components have incorrect types: "
742 f"{real.dtype.name} {imag.dtype.name}. They must be consistent, and "
743 f"one of {[dtypes.float32, dtypes.float64]}")
744 return gen_math_ops._complex(real, imag, Tout=Tout, name=name)
747@tf_export("math.sign", "sign")
748@dispatch.register_unary_elementwise_api
749@dispatch.add_dispatch_support
750def sign(x, name=None):
751 r"""Returns an element-wise indication of the sign of a number.
753 `y = sign(x) = -1 if x < 0; 0 if x == 0; 1 if x > 0`.
755 For complex numbers, `y = sign(x) = x / |x| if x != 0, otherwise y = 0`.
757 Example usage:
759 >>> # real number
760 >>> tf.math.sign([0., 2., -3.])
761 <tf.Tensor: shape=(3,), dtype=float32,
762 numpy=array([ 0., 1., -1.], dtype=float32)>
764 >>> # complex number
765 >>> tf.math.sign([1 + 1j, 0 + 0j])
766 <tf.Tensor: shape=(2,), dtype=complex128,
767 numpy=array([0.70710678+0.70710678j, 0. +0.j ])>
769 Args:
770 x: A Tensor. Must be one of the following types: bfloat16, half, float32,
771 float64, int32, int64, complex64, complex128.
772 name: A name for the operation (optional).
774 Returns:
775 A Tensor. Has the same type as x.
777 If x is a SparseTensor, returns SparseTensor(x.indices,
778 tf.math.sign(x.values, ...), x.dense_shape).
779 """
780 x = ops.convert_to_tensor(x)
781 if x.dtype.is_complex:
782 return gen_math_ops.div_no_nan(
783 x,
784 cast(
785 gen_math_ops.complex_abs(
786 x,
787 Tout=dtypes.float32
788 if x.dtype == dtypes.complex64 else dtypes.float64),
789 dtype=x.dtype),
790 name=name)
791 return gen_math_ops.sign(x, name=name)
794@tf_export("math.real", v1=["math.real", "real"])
795@dispatch.register_unary_elementwise_api
796@dispatch.add_dispatch_support
797@deprecation.deprecated_endpoints("real")
798def real(input, name=None):
799 r"""Returns the real part of a complex (or real) tensor.
801 Given a tensor `input`, this operation returns a tensor of type `float` that
802 is the real part of each element in `input` considered as a complex number.
804 For example:
806 ```python
807 x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
808 tf.math.real(x) # [-2.25, 3.25]
809 ```
811 If `input` is already real, it is returned unchanged.
813 Args:
814 input: A `Tensor`. Must have numeric type.
815 name: A name for the operation (optional).
817 Returns:
818 A `Tensor` of type `float32` or `float64`.
819 """
820 with ops.name_scope(name, "Real", [input]) as name:
821 input = ops.convert_to_tensor(input, name="input")
822 if input.dtype.is_complex:
823 real_dtype = input.dtype.real_dtype
824 return gen_math_ops.real(input, Tout=real_dtype, name=name)
825 else:
826 return input
829@tf_export("math.imag", v1=["math.imag", "imag"])
830@dispatch.register_unary_elementwise_api
831@dispatch.add_dispatch_support
832@deprecation.deprecated_endpoints("imag")
833def imag(input, name=None):
834 r"""Returns the imaginary part of a complex (or real) tensor.
836 Given a tensor `input`, this operation returns a tensor of type `float` that
837 is the imaginary part of each element in `input` considered as a complex
838 number. If `input` is real, a tensor of all zeros is returned.
840 For example:
842 ```python
843 x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
844 tf.math.imag(x) # [4.75, 5.75]
845 ```
847 Args:
848 input: A `Tensor`. Must be one of the following types: `float`, `double`,
849 `complex64`, `complex128`.
850 name: A name for the operation (optional).
852 Returns:
853 A `Tensor` of type `float32` or `float64`.
854 """
855 with ops.name_scope(name, "Imag", [input]) as name:
856 input = ops.convert_to_tensor(input, name="input")
857 if input.dtype.is_complex:
858 return gen_math_ops.imag(input, Tout=input.dtype.real_dtype, name=name)
859 else:
860 return array_ops.zeros_like(input)
863@tf_export("math.angle", v1=["math.angle", "angle"])
864@dispatch.register_unary_elementwise_api
865@dispatch.add_dispatch_support
866@deprecation.deprecated_endpoints("angle")
867def angle(input, name=None):
868 r"""Returns the element-wise argument of a complex (or real) tensor.
870 Given a tensor `input`, this operation returns a tensor of type `float` that
871 is the argument of each element in `input` considered as a complex number.
873 The elements in `input` are considered to be complex numbers of the form
874 \\(a + bj\\), where *a* is the real part and *b* is the imaginary part.
875 If `input` is real then *b* is zero by definition.
877 The argument returned by this function is of the form \\(atan2(b, a)\\).
878 If `input` is real, a tensor of all zeros is returned.
880 For example:
882 ```
883 input = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j], dtype=tf.complex64)
884 tf.math.angle(input).numpy()
885 # ==> array([2.0131705, 1.056345 ], dtype=float32)
886 ```
888 Args:
889 input: A `Tensor`. Must be one of the following types: `float`, `double`,
890 `complex64`, `complex128`.
891 name: A name for the operation (optional).
893 Returns:
894 A `Tensor` of type `float32` or `float64`.
895 """
896 with ops.name_scope(name, "Angle", [input]) as name:
897 input = ops.convert_to_tensor(input, name="input")
898 if input.dtype.is_complex:
899 return gen_math_ops.angle(input, Tout=input.dtype.real_dtype, name=name)
900 else:
901 return array_ops.where(input < 0, np.pi * array_ops.ones_like(input),
902 array_ops.zeros_like(input))
905# pylint: enable=redefined-outer-name,redefined-builtin
908@tf_export("math.round", "round")
909@dispatch.register_unary_elementwise_api
910@dispatch.add_dispatch_support
911def round(x, name=None): # pylint: disable=redefined-builtin
912 """Rounds the values of a tensor to the nearest integer, element-wise.
914 Rounds half to even. Also known as bankers rounding. If you want to round
915 according to the current system rounding mode use tf::cint.
916 For example:
918 ```python
919 x = tf.constant([0.9, 2.5, 2.3, 1.5, -4.5])
920 tf.round(x) # [ 1.0, 2.0, 2.0, 2.0, -4.0 ]
921 ```
923 Args:
924 x: A `Tensor` of type `float16`, `float32`, `float64`, `int32`, or `int64`.
925 name: A name for the operation (optional).
927 Returns:
928 A `Tensor` of same shape and type as `x`.
929 """
930 x = ops.convert_to_tensor(x, name="x")
931 if x.dtype.is_integer:
932 return x
933 else:
934 return gen_math_ops.round(x, name=name)
937# TODO(mdan): Include a full_type argument to replace dtype.
938@tf_export("cast", "dtypes.cast")
939@dispatch.register_unary_elementwise_api
940@dispatch.add_dispatch_support
941def cast(x, dtype, name=None):
942 """Casts a tensor to a new type.
944 The operation casts `x` (in case of `Tensor`) or `x.values`
945 (in case of `SparseTensor` or `IndexedSlices`) to `dtype`.
947 For example:
949 >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
950 >>> tf.cast(x, tf.int32)
951 <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
953 Notice `tf.cast` has an alias `tf.dtypes.cast`:
955 >>> x = tf.constant([1.8, 2.2], dtype=tf.float32)
956 >>> tf.dtypes.cast(x, tf.int32)
957 <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 2], dtype=int32)>
959 The operation supports data types (for `x` and `dtype`) of
960 `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`, `int64`,
961 `float16`, `float32`, `float64`, `complex64`, `complex128`, `bfloat16`.
962 In case of casting from complex types (`complex64`, `complex128`) to real
963 types, only the real part of `x` is returned. In case of casting from real
964 types to complex types (`complex64`, `complex128`), the imaginary part of the
965 returned value is set to `0`. The handling of complex types here matches the
966 behavior of numpy.
968 Note casting nan and inf values to integral types has undefined behavior.
970 Note this operation can lead to a loss of precision when converting native
971 Python `float` and `complex` variables to `tf.float64` or `tf.complex128`
972 tensors, since the input is first converted to the `float32` data type and
973 then widened. It is recommended to use `tf.convert_to_tensor` instead of
974 `tf.cast` for any non-tensor inputs.
976 Args:
977 x: A `Tensor` or `SparseTensor` or `IndexedSlices` of numeric type. It could
978 be `uint8`, `uint16`, `uint32`, `uint64`, `int8`, `int16`, `int32`,
979 `int64`, `float16`, `float32`, `float64`, `complex64`, `complex128`,
980 `bfloat16`.
981 dtype: The destination type. The list of supported dtypes is the same as
982 `x`.
983 name: A name for the operation (optional).
985 Returns:
986 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` and
987 same type as `dtype`.
989 Raises:
990 TypeError: If `x` cannot be cast to the `dtype`.
992 """
993 base_type = dtypes.as_dtype(dtype).base_dtype
994 if isinstance(x,
995 (ops.Tensor, _resource_variable_type)) and base_type == x.dtype:
996 return x
997 with ops.name_scope(name, "Cast", [x]) as name:
998 if isinstance(x, sparse_tensor.SparseTensor):
999 values_cast = cast(x.values, base_type, name=name)
1000 x = sparse_tensor.SparseTensor(x.indices, values_cast, x.dense_shape)
1001 elif isinstance(x, indexed_slices.IndexedSlices):
1002 values_cast = cast(x.values, base_type, name=name)
1003 x = indexed_slices.IndexedSlices(values_cast, x.indices, x.dense_shape)
1004 else:
1005 # TODO(josh11b): If x is not already a Tensor, we could return
1006 # ops.convert_to_tensor(x, dtype=dtype, ...) here, but that
1007 # allows some conversions that cast() can't do, e.g. casting numbers to
1008 # strings.
1009 x = ops.convert_to_tensor(x, name="x")
1010 if x.dtype.is_complex and base_type.is_floating:
1011 logging.warn(
1012 f"You are casting an input of type {x.dtype.name} to an "
1013 f"incompatible dtype {base_type.name}. This will "
1014 "discard the imaginary part and may not be what you "
1015 "intended."
1016 )
1017 if x.dtype != base_type:
1018 x = gen_math_ops.cast(x, base_type, name=name)
1019 return x
1022@tf_export("dtypes.saturate_cast", "saturate_cast")
1023@dispatch.register_unary_elementwise_api
1024@dispatch.add_dispatch_support
1025def saturate_cast(value, dtype, name=None):
1026 """Performs a safe saturating cast of `value` to `dtype`.
1028 This function casts the input to `dtype` without overflow. If
1029 there is a danger that values would over or underflow in the cast, this op
1030 applies the appropriate clamping before the cast. See `tf.cast` for more
1031 details.
1033 Args:
1034 value: A `Tensor`.
1035 dtype: The desired output `DType`.
1036 name: A name for the operation (optional).
1038 Returns:
1039 `value` safely cast to `dtype`.
1040 """
1041 # When casting to a type with smaller representable range, clamp.
1042 # Note that this covers casting to unsigned types as well.
1043 with ops.name_scope(name, "saturate_cast", [value]) as name:
1044 value = ops.convert_to_tensor(value, name="value")
1045 dtype = dtypes.as_dtype(dtype).base_dtype
1047 in_dtype = value.dtype
1048 if in_dtype.is_complex:
1049 if dtype.is_complex:
1050 # Clamp real and imag components separately, if required.
1051 real_in_dtype = in_dtype.real_dtype
1052 real_out_dtype = dtype.real_dtype
1053 if real_in_dtype.min < real_out_dtype.min or real_in_dtype.max > real_out_dtype.max:
1054 value = gen_math_ops._clip_by_value(
1055 value,
1056 ops.convert_to_tensor(
1057 builtins.complex(real_out_dtype.min, real_out_dtype.min),
1058 dtype=in_dtype),
1059 ops.convert_to_tensor(
1060 builtins.complex(real_out_dtype.max, real_out_dtype.max),
1061 dtype=in_dtype),
1062 name="clamp")
1063 return cast(value, dtype, name=name)
1064 else:
1065 # Extract real component and fall through to clamp+cast.
1066 value = real(value)
1067 logging.warn("Casting complex to real discards imaginary part.")
1068 in_dtype = in_dtype.real_dtype
1070 # in_dtype is real, but out_dtype could be complex.
1071 out_real_dtype = dtype.real_dtype
1072 if in_dtype.min < out_real_dtype.min or in_dtype.max > out_real_dtype.max:
1073 # The output min/max may not actually be representable in the
1074 # in_dtype (e.g. casting float32 to uint32). This can lead to undefined
1075 # behavior when trying to cast a value outside the valid range of the
1076 # target type. We work around this by nudging the min/max to fall within
1077 # the valid output range. The catch is that we may actually saturate
1078 # to a value less than the true saturation limit, but this is the best we
1079 # can do in order to avoid UB without introducing a separate SaturateCast
1080 # op.
1081 min_limit = in_dtype.as_numpy_dtype(out_real_dtype.min)
1082 if min_limit < out_real_dtype.min:
1083 min_limit = np.nextafter(
1084 out_real_dtype.min, 0, dtype=in_dtype.as_numpy_dtype
1085 )
1087 max_limit = in_dtype.as_numpy_dtype(out_real_dtype.max)
1088 if max_limit > out_real_dtype.max:
1089 max_limit = np.nextafter(
1090 out_real_dtype.max, 0, dtype=in_dtype.as_numpy_dtype
1091 )
1093 value = gen_math_ops._clip_by_value(
1094 value,
1095 ops.convert_to_tensor(min_limit, dtype=in_dtype),
1096 ops.convert_to_tensor(max_limit, dtype=in_dtype),
1097 name="clamp",
1098 )
1099 return cast(value, dtype, name=name)
1102@tf_export(v1=["to_float"])
1103@dispatch.register_unary_elementwise_api
1104@dispatch.add_dispatch_support
1105@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1106def to_float(x, name="ToFloat"):
1107 """Casts a tensor to type `float32`.
1109 Args:
1110 x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1111 name: A name for the operation (optional).
1113 Returns:
1114 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1115 type `float32`.
1117 Raises:
1118 TypeError: If `x` cannot be cast to the `float32`.
1120 @compatibility(TF2)
1122 This name was deprecated and removed in TF2, but has an exact replacement
1123 `tf.cast(..., tf.float32)`. There are no further issues with eager execution
1124 or tf.function.
1126 Before:
1128 >>> tf.compat.v1.to_float(tf.constant(3.14, dtype=tf.double))
1129 <tf.Tensor: shape=(), dtype=float32, numpy=3.14>
1131 After:
1133 >>> tf.cast(tf.constant(3.14, dtype=tf.double), tf.float32)
1134 <tf.Tensor: shape=(), dtype=float32, numpy=3.14>
1136 @end_compatibility
1138 """
1139 return cast(x, dtypes.float32, name=name)
1142@tf_export(v1=["to_double"])
1143@dispatch.register_unary_elementwise_api
1144@dispatch.add_dispatch_support
1145@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1146def to_double(x, name="ToDouble"):
1147 """Casts a tensor to type `float64`.
1149 Args:
1150 x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1151 name: A name for the operation (optional).
1153 Returns:
1154 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1155 type `float64`.
1157 Raises:
1158 TypeError: If `x` cannot be cast to the `float64`.
1160 @compatibility(TF2)
1162 This name was deprecated and removed in TF2, but has an exact replacement
1163 `tf.cast(..., tf.double)`. There are no further issues with eager execution or
1164 tf.function.
1166 Before:
1168 >>> tf.compat.v1.to_double(tf.constant(3.14, dtype=tf.float32))
1169 <tf.Tensor: shape=(), dtype=float64, numpy=3.14>
1171 After:
1173 >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.double)
1174 <tf.Tensor: shape=(), dtype=float64, numpy=3.14>
1176 @end_compatibility
1178 """
1179 return cast(x, dtypes.float64, name=name)
1182@tf_export(v1=["to_int32"])
1183@dispatch.register_unary_elementwise_api
1184@dispatch.add_dispatch_support
1185@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1186def to_int32(x, name="ToInt32"):
1187 """Casts a tensor to type `int32`.
1189 Args:
1190 x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1191 name: A name for the operation (optional).
1193 Returns:
1194 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1195 type `int32`.
1197 Raises:
1198 TypeError: If `x` cannot be cast to the `int32`.
1200 @compatibility(TF2)
1202 This name was deprecated and removed in TF2, but has an exact replacement
1203 `tf.cast(..., tf.int32)`. There are no further issues with eager execution or
1204 tf.function.
1206 Before:
1208 >>> tf.compat.v1.to_int32(tf.constant(1, dtype=tf.int64))
1209 <tf.Tensor: shape=(), dtype=int32, numpy=1>
1211 After:
1213 >>> tf.cast(tf.constant(1, dtype=tf.int64), tf.int32)
1214 <tf.Tensor: shape=(), dtype=int32, numpy=1>
1216 @end_compatibility
1218 """
1219 return cast(x, dtypes.int32, name=name)
1222@tf_export(v1=["to_int64"])
1223@dispatch.register_unary_elementwise_api
1224@dispatch.add_dispatch_support
1225@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1226def to_int64(x, name="ToInt64"):
1227 """Casts a tensor to type `int64`.
1229 Args:
1230 x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1231 name: A name for the operation (optional).
1233 Returns:
1234 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1235 type `int64`.
1237 Raises:
1238 TypeError: If `x` cannot be cast to the `int64`.
1240 @compatibility(TF2)
1242 This name was deprecated and removed in TF2, but has an exact replacement
1243 `tf.cast(..., tf.int64)`. There are no further issues with eager execution or
1244 tf.function.
1246 Before:
1248 >>> tf.compat.v1.to_int64(tf.constant(1, dtype=tf.int32))
1249 <tf.Tensor: shape=(), dtype=int64, numpy=1>
1251 After:
1253 >>> tf.cast(tf.constant(1, dtype=tf.int32), tf.int64)
1254 <tf.Tensor: shape=(), dtype=int64, numpy=1>
1256 @end_compatibility
1258 """
1259 return cast(x, dtypes.int64, name=name)
1262@tf_export(v1=["to_bfloat16"])
1263@dispatch.register_unary_elementwise_api
1264@dispatch.add_dispatch_support
1265@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1266def to_bfloat16(x, name="ToBFloat16"):
1267 """Casts a tensor to type `bfloat16`.
1269 Args:
1270 x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1271 name: A name for the operation (optional).
1273 Returns:
1274 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1275 type `bfloat16`.
1277 Raises:
1278 TypeError: If `x` cannot be cast to the `bfloat16`.
1280 @compatibility(TF2)
1282 This name was deprecated and removed in TF2, but has an exact replacement
1283 `tf.cast(..., tf.bfloat16)`. There are no further issues with eager execution
1284 or tf.function.
1286 Before:
1288 >>> tf.compat.v1.to_bfloat16(tf.constant(3.14, dtype=tf.float32))
1289 <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
1291 After:
1293 >>> tf.cast(tf.constant(3.14, dtype=tf.float32), tf.bfloat16)
1294 <tf.Tensor: shape=(), dtype=bfloat16, numpy=3.14>
1296 @end_compatibility
1298 """
1299 return cast(x, dtypes.bfloat16, name=name)
1302@tf_export(v1=["to_complex64"])
1303@dispatch.register_unary_elementwise_api
1304@dispatch.add_dispatch_support
1305@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1306def to_complex64(x, name="ToComplex64"):
1307 """Casts a tensor to type `complex64`.
1309 Args:
1310 x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1311 name: A name for the operation (optional).
1313 Returns:
1314 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1315 type `complex64`.
1317 Raises:
1318 TypeError: If `x` cannot be cast to the `complex64`.
1320 @compatibility(TF2)
1322 This name was deprecated and removed in TF2, but has an exact replacement
1323 `tf.cast(..., tf.complex64)`. There are no further issues with eager execution
1324 or tf.function.
1326 Before:
1328 >>> tf.compat.v1.to_complex64(tf.constant(1. + 2.j, dtype=tf.complex128))
1329 <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
1331 After:
1333 >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex128), tf.complex64)
1334 <tf.Tensor: shape=(), dtype=complex64, numpy=(1+2j)>
1336 @end_compatibility
1338 """
1339 return cast(x, dtypes.complex64, name=name)
1342@tf_export(v1=["to_complex128"])
1343@dispatch.register_unary_elementwise_api
1344@dispatch.add_dispatch_support
1345@deprecation.deprecated(date=None, instructions="Use `tf.cast` instead.")
1346def to_complex128(x, name="ToComplex128"):
1347 """Casts a tensor to type `complex128`.
1349 Args:
1350 x: A `Tensor` or `SparseTensor` or `IndexedSlices`.
1351 name: A name for the operation (optional).
1353 Returns:
1354 A `Tensor` or `SparseTensor` or `IndexedSlices` with same shape as `x` with
1355 type `complex128`.
1357 Raises:
1358 TypeError: If `x` cannot be cast to the `complex128`.
1360 @compatibility(TF2)
1362 This name was deprecated and removed in TF2, but has an exact replacement
1363 `tf.cast(..., tf.complex128)`. There are no further issues with eager
1364 execution or tf.function.
1366 Before:
1368 >>> tf.compat.v1.to_complex128(tf.constant(1. + 2.j, dtype=tf.complex64))
1369 <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
1371 After:
1373 >>> tf.cast(tf.constant(1. + 2.j, dtype=tf.complex64), tf.complex128)
1374 <tf.Tensor: shape=(), dtype=complex128, numpy=(1+2j)>
1376 @end_compatibility
1378 """
1379 return cast(x, dtypes.complex128, name=name)
1382ops.Tensor._override_operator("__neg__", gen_math_ops.neg)
1383ops.Tensor._override_operator("__abs__", abs)
1386def _maybe_get_dtype(x):
1387 """Returns a numpy type if available from x. Skips if x is numpy.ndarray."""
1388 # Don't put np.ndarray in this list, because np.result_type looks at the
1389 # value (not just dtype) of np.ndarray to decide the result type.
1390 if isinstance(x, numbers.Real):
1391 return x
1392 if isinstance(x, ops.Tensor):
1393 return x.dtype.as_numpy_dtype
1394 if isinstance(x, dtypes.DType):
1395 return x.as_numpy_dtype
1396 if isinstance(x, tensor_shape.TensorShape):
1397 return np.int32
1398 if isinstance(x, (list, tuple)):
1399 raise ValueError(f"Cannot determine dtype. Got sequence {x}.")
1400 return x
1403def maybe_promote_tensors(*tensors, force_same_dtype=False):
1404 """Promotes tensors if numpy style promotion is enabled.
1406 This function promotes `tensors` according to numpy promotion rules
1407 if numpy style promotion is enabled. Otherwise, if
1408 `force_same_dtype` is `True`, it force-casts `tensors[1:]` to
1409 `tensor[0]`'s dtype. Note that this force-cast can be problematic.
1410 For example, when some `tensors[1:]` elements can be silently
1411 downcasted.
1413 Args:
1414 *tensors: the list of tensors to promote.
1415 force_same_dtype: bool (optional, default to `False`). When numpy
1416 style promotion is disabled and `force_same_dtype` is `True`,
1417 this function will force-casts `tensors[1:]` to `tensor[0]`'s
1418 dtype (which could be problematic).
1420 Returns:
1421 The promoted list of tensors.
1422 """
1423 if not tensors:
1424 return tensors
1425 if not ops._numpy_style_type_promotion:
1426 if not force_same_dtype:
1427 return tensors
1428 promoted_tensors = []
1429 promoted_tensors.append(tensors[0])
1430 dtype = tensors[0].dtype.base_dtype
1431 for tensor in tensors[1:]:
1432 promoted_tensors.append(
1433 ops.convert_to_tensor(tensor, dtype, name="x"))
1434 return promoted_tensors
1435 result_type = np_dtypes._result_type(
1436 *[_maybe_get_dtype(x) for x in nest.flatten(tensors)])
1437 def _promote_or_cast(x):
1438 if isinstance(x, ops.Tensor):
1439 x = cast(x, result_type)
1440 else:
1441 x = ops.convert_to_tensor(x, result_type)
1442 return x
1443 return [_promote_or_cast(x) for x in tensors]
1446def _OverrideBinaryOperatorHelper(func, op_name, clazz_object=ops.Tensor):
1447 """Register operators with different tensor and scalar versions.
1449 If `clazz_object` is `SparseTensor`, assumes `func` takes `(sp_indices,
1450 sp_values, sp_shape, dense)` and outputs `(new_sp_values)`.
1452 Args:
1453 func: the operator
1454 op_name: name of the operator being overridden
1455 clazz_object: class to override for. Either `Tensor` or `SparseTensor`.
1456 """
1458 @traceback_utils.filter_traceback
1459 def binary_op_wrapper(x, y):
1460 with ops.name_scope(None, op_name, [x, y]) as name:
1461 try:
1462 # force_same_dtype=False to preserve existing TF behavior
1463 # TODO(b/178860388): Figure out why binary_op_wrapper and
1464 # r_binary_op_wrapper use different force_same_dtype values.
1465 x, y = maybe_promote_tensors(x, y)
1466 return func(x, y, name=name)
1467 except (TypeError, ValueError) as e:
1468 # Even if dispatching the op failed, the RHS may be a tensor aware
1469 # object that can implement the operator with knowledge of itself
1470 # and the tensor.
1471 # If the RHS is not tensor aware we still want to raise the
1472 # original error from the LHS, because it may be more
1473 # informative.
1474 if hasattr(type(y), "__r%s__" % op_name):
1475 try:
1476 r_op = getattr(y, "__r%s__" % op_name)
1477 out = r_op(x)
1478 if out is NotImplemented:
1479 raise
1480 return out
1481 except (TypeError, ValueError):
1482 raise e
1483 else:
1484 raise
1486 @traceback_utils.filter_traceback
1487 def binary_op_wrapper_sparse(sp_x, y):
1488 with ops.name_scope(None, op_name, [sp_x, y]) as name:
1489 y = ops.convert_to_tensor(y, dtype=sp_x.dtype.base_dtype, name="y")
1490 return sparse_tensor.SparseTensor(
1491 sp_x.indices,
1492 func(sp_x.indices, sp_x.values, sp_x.dense_shape, y, name=name),
1493 sp_x.dense_shape)
1495 @traceback_utils.filter_traceback
1496 def r_binary_op_wrapper(y, x):
1497 with ops.name_scope(None, op_name, [x, y]) as name:
1498 # TODO(b/178860388): Figure out why binary_op_wrapper and
1499 # r_binary_op_wrapper use different force_same_dtype values.
1500 y, x = maybe_promote_tensors(y, x, force_same_dtype=True)
1501 return func(x, y, name=name)
1503 # Propagate func.__doc__ to the wrappers
1504 try:
1505 doc = func.__doc__
1506 except AttributeError:
1507 doc = None
1508 binary_op_wrapper.__doc__ = doc
1509 r_binary_op_wrapper.__doc__ = doc
1510 binary_op_wrapper_sparse.__doc__ = doc
1512 if clazz_object is ops.Tensor:
1513 clazz_object._override_operator("__%s__" % op_name, binary_op_wrapper)
1514 del binary_op_wrapper
1515 clazz_object._override_operator("__r%s__" % op_name, r_binary_op_wrapper)
1516 del r_binary_op_wrapper
1517 else:
1518 clazz_object._override_operator("__%s__" % op_name,
1519 binary_op_wrapper_sparse)
1520 del binary_op_wrapper_sparse
1523# Conversion table for __truediv__. None entries mean no conversion required.
1524_TRUEDIV_TABLE = {
1525 dtypes.uint8: dtypes.float32,
1526 dtypes.int8: dtypes.float32,
1527 dtypes.uint16: dtypes.float32,
1528 dtypes.int16: dtypes.float32,
1529 dtypes.uint32: dtypes.float64,
1530 dtypes.int32: dtypes.float64,
1531 dtypes.uint64: dtypes.float64,
1532 dtypes.int64: dtypes.float64,
1533 dtypes.bfloat16: None,
1534 dtypes.float16: None,
1535 dtypes.float32: None,
1536 dtypes.float64: None,
1537 dtypes.complex64: None,
1538 dtypes.complex128: None,
1539}
1542# NOTE: the support of "sparse (true)div dense" is currently not baked in into
1543# "tf.(true_)div()". Until such an API decision is made, the supported usage is
1544# to explicitly use the "/" operator to invoke either truediv or div.
1545def _sparse_dense_truediv(sp_indices, sp_values, sp_shape, y, name=None):
1546 """Internal helper function for 'sp_t / dense_t'."""
1547 with ops.name_scope(name, "truediv",
1548 [sp_indices, sp_values, sp_shape, y]) as name:
1549 sp_values = ops.convert_to_tensor(sp_values, name="sp_values")
1550 y = ops.convert_to_tensor(y, name="y")
1551 x_dtype = sp_values.dtype.base_dtype
1552 y_dtype = y.dtype.base_dtype
1553 if x_dtype != y_dtype:
1554 raise TypeError(f"`x` and `y` must have the same dtype, "
1555 f"got {x_dtype!r} != {y_dtype!r}.")
1556 try:
1557 dtype = _TRUEDIV_TABLE[x_dtype]
1558 except KeyError:
1559 raise TypeError(
1560 f"Invalid dtype {x_dtype!r} in __truediv__. Expected one "
1561 f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.")
1562 if dtype is not None:
1563 sp_values = cast(sp_values, dtype)
1564 y = cast(y, dtype)
1565 return gen_sparse_ops.sparse_dense_cwise_div(
1566 sp_indices, sp_values, sp_shape, y, name=name)
1569def _truediv_python3(x, y, name=None):
1570 with ops.name_scope(name, "truediv", [x, y]) as name:
1571 x = ops.convert_to_tensor(x, name="x")
1572 y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
1573 x_dtype = x.dtype.base_dtype
1574 y_dtype = y.dtype.base_dtype
1575 if x_dtype != y_dtype:
1576 raise TypeError(f"`x` and `y` must have the same dtype, "
1577 f"got {x_dtype!r} != {y_dtype!r}.")
1578 try:
1579 dtype = _TRUEDIV_TABLE[x_dtype]
1580 except KeyError:
1581 raise TypeError(
1582 f"Invalid dtype {x_dtype!r} in __truediv__. Expected one "
1583 f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}.")
1584 if dtype is not None:
1585 x = cast(x, dtype)
1586 y = cast(y, dtype)
1587 return gen_math_ops.real_div(x, y, name=name)
1590def _div_python2(x, y, name=None):
1591 """Divide two values using Python 2 semantics.
1593 Used for Tensor.__div__.
1595 Args:
1596 x: `Tensor` numerator of real numeric type.
1597 y: `Tensor` denominator of real numeric type.
1598 name: A name for the operation (optional).
1600 Returns:
1601 `x / y` returns the quotient of x and y.
1602 """
1604 with ops.name_scope(name, "div", [x, y]) as name:
1605 x = ops.convert_to_tensor(x, name="x")
1606 y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1607 x_dtype = x.dtype.base_dtype
1608 y_dtype = y.dtype.base_dtype
1609 if x_dtype != y_dtype:
1610 raise TypeError(f"`x` and `y` must have the same dtype, "
1611 f"got {x_dtype!r} != {y_dtype!r}.")
1612 if x_dtype.is_floating or x_dtype.is_complex:
1613 return gen_math_ops.real_div(x, y, name=name)
1614 else:
1615 return gen_math_ops.floor_div(x, y, name=name)
1618@tf_export("math.truediv", "truediv")
1619@dispatch.register_binary_elementwise_api
1620@dispatch.add_dispatch_support
1621def truediv(x, y, name=None):
1622 """Divides x / y elementwise (using Python 3 division operator semantics).
1624 NOTE: Prefer using the Tensor operator or tf.divide which obey Python
1625 division operator semantics.
1627 This function forces Python 3 division operator semantics where all integer
1628 arguments are cast to floating types first. This op is generated by normal
1629 `x / y` division in Python 3 and in Python 2.7 with
1630 `from __future__ import division`. If you want integer division that rounds
1631 down, use `x // y` or `tf.math.floordiv`.
1633 `x` and `y` must have the same numeric type. If the inputs are floating
1634 point, the output will have the same type. If the inputs are integral, the
1635 inputs are cast to `float32` for `int8` and `int16` and `float64` for `int32`
1636 and `int64` (matching the behavior of Numpy).
1638 Args:
1639 x: `Tensor` numerator of numeric type.
1640 y: `Tensor` denominator of numeric type.
1641 name: A name for the operation (optional).
1643 Returns:
1644 `x / y` evaluated in floating point.
1646 Raises:
1647 TypeError: If `x` and `y` have different dtypes.
1648 """
1649 return _truediv_python3(x, y, name)
1652@tf_export(v1=["div"])
1653@dispatch.register_binary_elementwise_api
1654@dispatch.add_dispatch_support
1655@deprecation.deprecated(
1656 date=None,
1657 instructions="Deprecated in favor of operator or tf.math.divide.")
1658def div(x, y, name=None):
1659 """Divides x / y elementwise (using Python 2 division operator semantics).
1661 @compatibility(TF2)
1662 This function is deprecated in TF2. Prefer using the Tensor division operator,
1663 `tf.divide`, or `tf.math.divide`, which obey the Python 3 division operator
1664 semantics.
1665 @end_compatibility
1668 This function divides `x` and `y`, forcing Python 2 semantics. That is, if `x`
1669 and `y` are both integers then the result will be an integer. This is in
1670 contrast to Python 3, where division with `/` is always a float while division
1671 with `//` is always an integer.
1673 Args:
1674 x: `Tensor` numerator of real numeric type.
1675 y: `Tensor` denominator of real numeric type.
1676 name: A name for the operation (optional).
1678 Returns:
1679 `x / y` returns the quotient of x and y.
1680 """
1681 return _div_python2(x, y, name)
1684@tf_export("math.divide_no_nan", v1=["math.divide_no_nan", "div_no_nan"])
1685@dispatch.register_binary_elementwise_api
1686@dispatch.add_dispatch_support
1687@deprecation.deprecated_endpoints("div_no_nan")
1688def div_no_nan(x, y, name=None):
1689 """Computes a safe divide which returns 0 if `y` (denominator) is zero.
1691 For example:
1693 >>> tf.constant(3.0) / 0.0
1694 <tf.Tensor: shape=(), dtype=float32, numpy=inf>
1695 >>> tf.math.divide_no_nan(3.0, 0.0)
1696 <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
1698 Note that 0 is returned if `y` is 0 even if `x` is nonfinite:
1700 >>> tf.math.divide_no_nan(np.nan, 0.0)
1701 <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
1703 Args:
1704 x: A `Tensor` of a floating or integer dtype.
1705 y: A `Tensor` with the same dtype as `x` and a compatible shape.
1706 name: A name for the operation (optional).
1708 Returns:
1709 The element-wise quotient as in `tf.math.divide(x, y)`,
1710 except that division by zero produces `0.0`, not `nan`.
1711 """
1713 with ops.name_scope(name, "div_no_nan", [x, y]) as name:
1714 if not tensor_util.is_tf_type(x) and tensor_util.is_tf_type(y):
1715 # Treat this case specially like divide() does above.
1716 y = ops.convert_to_tensor(y, name="y")
1717 x = ops.convert_to_tensor(x, dtype=y.dtype.base_dtype, name="x")
1718 else:
1719 x = ops.convert_to_tensor(x, name="x")
1720 y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
1721 x_dtype = x.dtype.base_dtype
1722 y_dtype = y.dtype.base_dtype
1723 if x_dtype != y_dtype:
1724 raise TypeError(f"`x` and `y` must have the same dtype, "
1725 f"got {x_dtype!r} != {y_dtype!r}.")
1726 try:
1727 dtype = _TRUEDIV_TABLE[x_dtype]
1728 except KeyError as e:
1729 raise TypeError(
1730 f"Invalid dtype {x_dtype!r} in tf.math.divide_no_nan. Expected one "
1731 f"of {{{', '.join([repr(x) for x in _TRUEDIV_TABLE.keys()])}}}."
1732 ) from e
1733 if dtype is not None:
1734 x = cast(x, dtype)
1735 y = cast(y, dtype)
1736 return gen_math_ops.div_no_nan(x, y, name=name)
1739@tf_export("math.multiply_no_nan")
1740@dispatch.register_binary_elementwise_api
1741@dispatch.add_dispatch_support
1742def multiply_no_nan(x, y, name=None):
1743 """Computes the product of x and y and returns 0 if the y is zero, even if x is NaN or infinite.
1745 Note this is noncommutative: if y is NaN or infinite and x is 0, the result
1746 will be NaN.
1748 Args:
1749 x: A `Tensor`. Must be one of the following types: `float32`, `float64`.
1750 y: A `Tensor` whose dtype is compatible with `x`.
1751 name: A name for the operation (optional).
1753 Returns:
1754 The element-wise value of the x times y.
1755 """
1757 with ops.name_scope(name, "multiply_no_nan", [x, y]) as name:
1758 x = ops.convert_to_tensor(x, name="x")
1759 y = ops.convert_to_tensor(y, name="y", dtype=x.dtype.base_dtype)
1760 x_dtype = x.dtype.base_dtype
1761 y_dtype = y.dtype.base_dtype
1762 if x_dtype != y_dtype:
1763 raise TypeError(f"`x` and `y` must have the same dtype, "
1764 f"got {x_dtype!r} != {y_dtype!r}")
1765 return gen_math_ops.mul_no_nan(x, y, name=name)
1768# TODO(aselle): This should be removed
1769mod = gen_math_ops.floor_mod
1772@tf_export("math.floordiv", v1=["math.floordiv", "floordiv"])
1773@dispatch.register_binary_elementwise_api
1774@dispatch.add_dispatch_support
1775@deprecation.deprecated_endpoints("floordiv")
1776def floordiv(x, y, name=None):
1777 """Divides `x / y` elementwise, rounding toward the most negative integer.
1779 Mathematically, this is equivalent to floor(x / y). For example:
1780 floor(8.4 / 4.0) = floor(2.1) = 2.0
1781 floor(-8.4 / 4.0) = floor(-2.1) = -3.0
1782 This is equivalent to the '//' operator in Python 3.0 and above.
1784 Note: `x` and `y` must have the same type, and the result will have the same
1785 type as well.
1787 Args:
1788 x: `Tensor` numerator of real numeric type.
1789 y: `Tensor` denominator of real numeric type.
1790 name: A name for the operation (optional).
1792 Returns:
1793 `x / y` rounded toward -infinity.
1795 Raises:
1796 TypeError: If the inputs are complex.
1797 """
1798 with ops.name_scope(name, "floordiv", [x, y]) as name:
1799 return gen_math_ops.floor_div(x, y, name=name)
1802realdiv = gen_math_ops.real_div
1803truncatediv = gen_math_ops.truncate_div
1804floor_div = gen_math_ops.floor_div
1805truncatemod = gen_math_ops.truncate_mod
1806floormod = gen_math_ops.floor_mod
1809@tf_export("__operators__.add", v1=[])
1810@dispatch.add_dispatch_support
1811def _add_dispatch(x, y, name=None):
1812 """The operation invoked by the `Tensor.__add__` operator.
1814 Purpose in the API:
1816 This method is exposed in TensorFlow's API so that library developers
1817 can register dispatching for `Tensor.__add__` to allow it to handle
1818 custom composite tensors & other custom objects.
1820 The API symbol is not intended to be called by users directly and does
1821 appear in TensorFlow's generated documentation.
1823 Args:
1824 x: The left-hand side of the `+` operator.
1825 y: The right-hand side of the `+` operator.
1826 name: an optional name for the operation.
1828 Returns:
1829 The result of the elementwise `+` operation.
1830 """
1831 if not isinstance(y, ops.Tensor) and not isinstance(
1832 y, sparse_tensor.SparseTensor):
1833 y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
1834 if x.dtype == dtypes.string:
1835 return gen_math_ops.add(x, y, name=name)
1836 else:
1837 return gen_math_ops.add_v2(x, y, name=name)
1840def _mul_dispatch(x, y, name=None):
1841 """Dispatches cwise mul for "Dense*Dense" and "Dense*Sparse"."""
1842 if isinstance(y, sparse_tensor.SparseTensor): # Case: Dense * Sparse.
1843 new_vals = gen_sparse_ops.sparse_dense_cwise_mul(y.indices, y.values,
1844 y.dense_shape, x, name)
1845 return sparse_tensor.SparseTensor(y.indices, new_vals, y.dense_shape)
1846 else:
1847 return multiply(x, y, name=name)
1850# NOTE(aselle): When integer division is added for sparse_dense_cwise,
1851# div, truediv, and floordiv should be delegated appropriately for
1852# Python semantics, analogous to dense cwise tensor operations.
1853_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_div, "div",
1854 sparse_tensor.SparseTensor)
1855_OverrideBinaryOperatorHelper(_sparse_dense_truediv, "truediv",
1856 sparse_tensor.SparseTensor)
1857_OverrideBinaryOperatorHelper(gen_sparse_ops.sparse_dense_cwise_mul, "mul",
1858 sparse_tensor.SparseTensor)
1860_OverrideBinaryOperatorHelper(_add_dispatch, "add")
1861_OverrideBinaryOperatorHelper(subtract, "sub")
1862_OverrideBinaryOperatorHelper(_mul_dispatch, "mul")
1863_OverrideBinaryOperatorHelper(div, "div")
1864_OverrideBinaryOperatorHelper(truediv, "truediv")
1865_OverrideBinaryOperatorHelper(floordiv, "floordiv")
1866_OverrideBinaryOperatorHelper(gen_math_ops.floor_mod, "mod")
1867_OverrideBinaryOperatorHelper(pow, "pow")
1870@tf_export("math.logical_xor", v1=["math.logical_xor", "logical_xor"])
1871@dispatch.register_binary_elementwise_api
1872@dispatch.add_dispatch_support
1873@deprecation.deprecated_endpoints("logical_xor")
1874def logical_xor(x, y, name="LogicalXor"):
1875 """Logical XOR function.
1877 x ^ y = (x | y) & ~(x & y)
1879 Requires that `x` and `y` have the same shape or have
1880 [broadcast-compatible](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html)
1881 shapes. For example, `x` and `y` can be:
1883 - Two single elements of type `bool`
1884 - One `tf.Tensor` of type `bool` and one single `bool`, where the result will
1885 be calculated by applying logical XOR with the single element to each
1886 element in the larger Tensor.
1887 - Two `tf.Tensor` objects of type `bool` of the same shape. In this case,
1888 the result will be the element-wise logical XOR of the two input tensors.
1890 Usage:
1892 >>> a = tf.constant([True])
1893 >>> b = tf.constant([False])
1894 >>> tf.math.logical_xor(a, b)
1895 <tf.Tensor: shape=(1,), dtype=bool, numpy=array([ True])>
1897 >>> c = tf.constant([True])
1898 >>> x = tf.constant([False, True, True, False])
1899 >>> tf.math.logical_xor(c, x)
1900 <tf.Tensor: shape=(4,), dtype=bool, numpy=array([ True, False, False, True])>
1902 >>> y = tf.constant([False, False, True, True])
1903 >>> z = tf.constant([False, True, False, True])
1904 >>> tf.math.logical_xor(y, z)
1905 <tf.Tensor: shape=(4,), dtype=bool, numpy=array([False, True, True, False])>
1907 Args:
1908 x: A `tf.Tensor` type bool.
1909 y: A `tf.Tensor` of type bool.
1910 name: A name for the operation (optional).
1912 Returns:
1913 A `tf.Tensor` of type bool with the same size as that of x or y.
1914 """
1915 # TODO(alemi) Make this a cwise op if people end up relying on it.
1916 return gen_math_ops.logical_and(
1917 gen_math_ops.logical_or(x, y),
1918 gen_math_ops.logical_not(gen_math_ops.logical_and(x, y)),
1919 name=name)
1922def and_(x, y, name=None):
1923 if x.dtype == dtypes.bool:
1924 return gen_math_ops.logical_and(x, y, name)
1925 return gen_bitwise_ops.bitwise_and(x, y)
1928def or_(x, y, name=None):
1929 if x.dtype == dtypes.bool:
1930 return gen_math_ops.logical_or(x, y, name)
1931 return gen_bitwise_ops.bitwise_or(x, y)
1934def xor_(x, y, name=None):
1935 if x.dtype == dtypes.bool:
1936 return logical_xor(x, y, name)
1937 return gen_bitwise_ops.bitwise_xor(x, y)
1940def invert_(x, name=None):
1941 if x.dtype == dtypes.bool:
1942 return gen_math_ops.logical_not(x, name=name)
1943 return gen_bitwise_ops.invert(x, name=name)
1946_OverrideBinaryOperatorHelper(and_, "and")
1947_OverrideBinaryOperatorHelper(or_, "or")
1948_OverrideBinaryOperatorHelper(xor_, "xor")
1949ops.Tensor._override_operator("__invert__", invert_)
1952def _promote_dtypes_decorator(fn):
1953 def wrapper(x, y, *args, **kwargs):
1954 x, y = maybe_promote_tensors(x, y)
1955 return fn(x, y, *args, **kwargs)
1956 return tf_decorator.make_decorator(fn, wrapper)
1959ops.Tensor._override_operator("__lt__", _promote_dtypes_decorator(
1960 gen_math_ops.less))
1961ops.Tensor._override_operator("__le__", _promote_dtypes_decorator(
1962 gen_math_ops.less_equal))
1963ops.Tensor._override_operator("__gt__", _promote_dtypes_decorator(
1964 gen_math_ops.greater))
1965ops.Tensor._override_operator("__ge__", _promote_dtypes_decorator(
1966 gen_math_ops.greater_equal))
1969@tf_export("math.equal", "equal")
1970@dispatch.register_binary_elementwise_api
1971@dispatch.add_dispatch_support
1972def equal(x, y, name=None):
1973 """Returns the truth value of (x == y) element-wise.
1975 Performs a [broadcast](
1976 https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
1977 arguments and then an element-wise equality comparison, returning a Tensor of
1978 boolean values.
1980 For example:
1982 >>> x = tf.constant([2, 4])
1983 >>> y = tf.constant(2)
1984 >>> tf.math.equal(x, y)
1985 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
1987 >>> x = tf.constant([2, 4])
1988 >>> y = tf.constant([2, 4])
1989 >>> tf.math.equal(x, y)
1990 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
1992 Args:
1993 x: A `tf.Tensor`.
1994 y: A `tf.Tensor`.
1995 name: A name for the operation (optional).
1997 Returns:
1998 A `tf.Tensor` of type bool with the same size as that of x or y.
2000 Raises:
2001 `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
2002 """
2003 return gen_math_ops.equal(x, y, name=name)
2006@tf_export("math.not_equal", "not_equal")
2007@dispatch.register_binary_elementwise_api
2008@dispatch.add_dispatch_support
2009def not_equal(x, y, name=None):
2010 """Returns the truth value of (x != y) element-wise.
2012 Performs a [broadcast](
2013 https://docs.scipy.org/doc/numpy/user/basics.broadcasting.html) with the
2014 arguments and then an element-wise inequality comparison, returning a Tensor
2015 of boolean values.
2017 For example:
2019 >>> x = tf.constant([2, 4])
2020 >>> y = tf.constant(2)
2021 >>> tf.math.not_equal(x, y)
2022 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, True])>
2024 >>> x = tf.constant([2, 4])
2025 >>> y = tf.constant([2, 4])
2026 >>> tf.math.not_equal(x, y)
2027 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
2029 Args:
2030 x: A `tf.Tensor`.
2031 y: A `tf.Tensor`.
2032 name: A name for the operation (optional).
2034 Returns:
2035 A `tf.Tensor` of type bool with the same size as that of x or y.
2037 Raises:
2038 `tf.errors.InvalidArgumentError`: If shapes of arguments are incompatible
2039 """
2040 return gen_math_ops.not_equal(x, y, name=name)
2043@tf_export("__operators__.eq", v1=[])
2044@dispatch.add_dispatch_support
2045def tensor_equals(self, other):
2046 """The operation invoked by the `Tensor.__eq__` operator.
2048 Compares two tensors element-wise for equality if they are
2049 broadcast-compatible; or returns False if they are not broadcast-compatible.
2050 (Note that this behavior differs from `tf.math.equal`, which raises an
2051 exception if the two tensors are not broadcast-compatible.)
2053 Purpose in the API:
2055 This method is exposed in TensorFlow's API so that library developers
2056 can register dispatching for `Tensor.__eq__` to allow it to handle
2057 custom composite tensors & other custom objects.
2059 The API symbol is not intended to be called by users directly and does
2060 appear in TensorFlow's generated documentation.
2062 Args:
2063 self: The left-hand side of the `==` operator.
2064 other: The right-hand side of the `==` operator.
2066 Returns:
2067 The result of the elementwise `==` operation, or `False` if the arguments
2068 are not broadcast-compatible.
2069 """
2070 if other is None:
2071 return False
2072 g = getattr(self, "graph", None)
2073 if (ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions() and
2074 (g is None or g.building_function)):
2075 self, other = maybe_promote_tensors(self, other)
2076 return gen_math_ops.equal(self, other, incompatible_shape_error=False)
2077 else:
2078 # In legacy graph mode, tensor equality is object equality
2079 return self is other
2082@tf_export("__operators__.ne", v1=[])
2083@dispatch.add_dispatch_support
2084def tensor_not_equals(self, other):
2085 """The operation invoked by the `Tensor.__ne__` operator.
2087 Compares two tensors element-wise for inequality if they are
2088 broadcast-compatible; or returns True if they are not broadcast-compatible.
2089 (Note that this behavior differs from `tf.math.not_equal`, which raises an
2090 exception if the two tensors are not broadcast-compatible.)
2092 Purpose in the API:
2094 This method is exposed in TensorFlow's API so that library developers
2095 can register dispatching for `Tensor.__ne__` to allow it to handle
2096 custom composite tensors & other custom objects.
2098 The API symbol is not intended to be called by users directly and does
2099 appear in TensorFlow's generated documentation.
2101 Args:
2102 self: The left-hand side of the `!=` operator.
2103 other: The right-hand side of the `!=` operator.
2105 Returns:
2106 The result of the elementwise `!=` operation, or `True` if the arguments
2107 are not broadcast-compatible.
2108 """
2109 if other is None:
2110 return True
2111 if ops.Tensor._USE_EQUALITY and ops.executing_eagerly_outside_functions():
2112 self, other = maybe_promote_tensors(self, other)
2113 return gen_math_ops.not_equal(self, other, incompatible_shape_error=False)
2114 else:
2115 # In legacy graph mode, tensor equality is object equality
2116 return self is not other
2119ops.Tensor._override_operator("__eq__", tensor_equals)
2120ops.Tensor._override_operator("__ne__", tensor_not_equals)
2123@tf_export("range")
2124@dispatch.add_dispatch_support
2125def range(start, limit=None, delta=1, dtype=None, name="range"): # pylint: disable=redefined-builtin
2126 """Creates a sequence of numbers.
2128 Creates a sequence of numbers that begins at `start` and extends by
2129 increments of `delta` up to but not including `limit`.
2131 The dtype of the resulting tensor is inferred from the inputs unless
2132 it is provided explicitly.
2134 Like the Python builtin `range`, `start` defaults to 0, so that
2135 `range(n) = range(0, n)`.
2137 For example:
2139 >>> start = 3
2140 >>> limit = 18
2141 >>> delta = 3
2142 >>> tf.range(start, limit, delta)
2143 <tf.Tensor: shape=(5,), dtype=int32,
2144 numpy=array([ 3, 6, 9, 12, 15], dtype=int32)>
2146 >>> start = 3
2147 >>> limit = 1
2148 >>> delta = -0.5
2149 >>> tf.range(start, limit, delta)
2150 <tf.Tensor: shape=(4,), dtype=float32,
2151 numpy=array([3. , 2.5, 2. , 1.5], dtype=float32)>
2153 >>> limit = 5
2154 >>> tf.range(limit)
2155 <tf.Tensor: shape=(5,), dtype=int32,
2156 numpy=array([0, 1, 2, 3, 4], dtype=int32)>
2158 Args:
2159 start: A 0-D `Tensor` (scalar). Acts as first entry in the range if `limit`
2160 is not None; otherwise, acts as range limit and first entry defaults to 0.
2161 limit: A 0-D `Tensor` (scalar). Upper limit of sequence, exclusive. If None,
2162 defaults to the value of `start` while the first entry of the range
2163 defaults to 0.
2164 delta: A 0-D `Tensor` (scalar). Number that increments `start`. Defaults to
2165 1.
2166 dtype: The type of the elements of the resulting tensor.
2167 name: A name for the operation. Defaults to "range".
2169 Returns:
2170 An 1-D `Tensor` of type `dtype`.
2172 @compatibility(numpy)
2173 Equivalent to np.arange
2174 @end_compatibility
2175 """
2176 if limit is None:
2177 start, limit = 0, start
2179 with ops.name_scope(name, "Range", [start, limit, delta]) as name:
2180 if not isinstance(start, ops.Tensor):
2181 start = ops.convert_to_tensor(start, dtype=dtype, name="start")
2182 if not isinstance(limit, ops.Tensor):
2183 limit = ops.convert_to_tensor(limit, dtype=dtype, name="limit")
2184 if not isinstance(delta, ops.Tensor):
2185 delta = ops.convert_to_tensor(delta, dtype=dtype, name="delta")
2187 # infer dtype if not explicitly provided
2188 if dtype is None:
2189 dtype_hierarchy = [
2190 dtypes.int32, dtypes.int64, dtypes.float32, dtypes.float64
2191 ]
2192 assert all(arg.dtype in dtype_hierarchy for arg in [start, limit, delta])
2193 inferred_dtype = max([arg.dtype for arg in [start, limit, delta]],
2194 key=dtype_hierarchy.index)
2195 else:
2196 inferred_dtype = dtype
2197 # Always try to perform a cast even when start/limit/delta are already
2198 # tensors. This will resolve the case where start/limit/delta's original's
2199 # dtype is different from provided dtype.
2200 start = cast(start, inferred_dtype)
2201 limit = cast(limit, inferred_dtype)
2202 delta = cast(delta, inferred_dtype)
2204 return gen_math_ops._range(start, limit, delta, name=name)
2207def _range_tensor_conversion_function(value, dtype=None, name=None,
2208 as_ref=False):
2209 del as_ref
2210 return range(value.start, value.stop, value.step, dtype=dtype, name=name)
2213tensor_conversion_registry.register_tensor_conversion_function(
2214 builtins.range, _range_tensor_conversion_function)
2217# Reduction operations
2218def _ReductionDims(x, axis): # pylint: disable=invalid-name
2219 """Returns range(0, rank(x)) if axis is None."""
2220 if axis is not None:
2221 return axis
2222 else:
2223 try:
2224 x_rank = x.shape.rank
2225 except AttributeError:
2226 x_rank = None
2228 # Fast path: avoid creating Rank and Range ops if ndims is known.
2229 if x_rank:
2230 return constant_op.constant(np.arange(x_rank, dtype=np.int32))
2231 else:
2232 # Otherwise, we rely on Range and Rank to do the right thing at run-time.
2233 return range(0, array_ops.rank(x))
2236def _has_fully_defined_shape(tensor):
2237 """Returns true if tensor has a fully defined shape."""
2238 return isinstance(tensor, ops.EagerTensor) or tensor.shape.is_fully_defined()
2241def _may_reduce_to_scalar(keepdims, axis, output):
2242 """Set a reduction's output shape to be a scalar if we are certain."""
2243 if not _has_fully_defined_shape(output) and (not keepdims) and (
2244 axis is None):
2245 output.set_shape(())
2246 return output
2249@tf_export(v1=["math.reduce_sum", "reduce_sum"])
2250@dispatch.add_dispatch_support
2251@deprecation.deprecated_args(None,
2252 "keep_dims is deprecated, use keepdims instead",
2253 "keep_dims")
2254def reduce_sum_v1(input_tensor,
2255 axis=None,
2256 keepdims=None,
2257 name=None,
2258 reduction_indices=None,
2259 keep_dims=None):
2260 """Computes the sum of elements across dimensions of a tensor.
2262 This is the reduction operation for the elementwise `tf.math.add` op.
2264 Reduces `input_tensor` along the dimensions given in `axis`.
2265 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2266 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2267 reduced dimensions are retained with length 1.
2269 If `axis` is None, all dimensions are reduced, and a
2270 tensor with a single element is returned.
2272 For example:
2274 >>> # x has a shape of (2, 3) (two rows and three columns):
2275 >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
2276 >>> x.numpy()
2277 array([[1, 1, 1],
2278 [1, 1, 1]], dtype=int32)
2279 >>> # sum all the elements
2280 >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
2281 >>> tf.reduce_sum(x).numpy()
2282 6
2283 >>> # reduce along the first dimension
2284 >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2285 >>> tf.reduce_sum(x, 0).numpy()
2286 array([2, 2, 2], dtype=int32)
2287 >>> # reduce along the second dimension
2288 >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
2289 >>> tf.reduce_sum(x, 1).numpy()
2290 array([3, 3], dtype=int32)
2291 >>> # keep the original dimensions
2292 >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
2293 array([[3],
2294 [3]], dtype=int32)
2295 >>> # reduce along both dimensions
2296 >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
2297 >>> # or, equivalently, reduce along rows, then reduce the resultant array
2298 >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2299 >>> # 2 + 2 + 2 = 6
2300 >>> tf.reduce_sum(x, [0, 1]).numpy()
2301 6
2303 Args:
2304 input_tensor: The tensor to reduce. Should have numeric type.
2305 axis: The dimensions to reduce. If `None` (the default), reduces all
2306 dimensions. Must be in the range `[-rank(input_tensor),
2307 rank(input_tensor))`.
2308 keepdims: If true, retains reduced dimensions with length 1.
2309 name: A name for the operation (optional).
2310 reduction_indices: The old (deprecated) name for axis.
2311 keep_dims: Deprecated alias for `keepdims`.
2313 Returns:
2314 The reduced tensor, of the same dtype as the input_tensor.
2316 @compatibility(numpy)
2317 Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
2318 int64 while tensorflow returns the same dtype as the input.
2319 @end_compatibility
2320 """
2321 axis = deprecation.deprecated_argument_lookup("axis", axis,
2322 "reduction_indices",
2323 reduction_indices)
2324 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2325 "keep_dims", keep_dims)
2326 return reduce_sum(input_tensor, axis, keepdims, name)
2329@tf_export("math.reduce_sum", "reduce_sum", v1=[])
2330@dispatch.add_dispatch_support
2331def reduce_sum(input_tensor, axis=None, keepdims=False, name=None):
2332 """Computes the sum of elements across dimensions of a tensor.
2334 This is the reduction operation for the elementwise `tf.math.add` op.
2336 Reduces `input_tensor` along the dimensions given in `axis`.
2337 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2338 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2339 reduced dimensions are retained with length 1.
2341 If `axis` is None, all dimensions are reduced, and a
2342 tensor with a single element is returned.
2344 For example:
2346 >>> # x has a shape of (2, 3) (two rows and three columns):
2347 >>> x = tf.constant([[1, 1, 1], [1, 1, 1]])
2348 >>> x.numpy()
2349 array([[1, 1, 1],
2350 [1, 1, 1]], dtype=int32)
2351 >>> # sum all the elements
2352 >>> # 1 + 1 + 1 + 1 + 1+ 1 = 6
2353 >>> tf.reduce_sum(x).numpy()
2354 6
2355 >>> # reduce along the first dimension
2356 >>> # the result is [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2357 >>> tf.reduce_sum(x, 0).numpy()
2358 array([2, 2, 2], dtype=int32)
2359 >>> # reduce along the second dimension
2360 >>> # the result is [1, 1] + [1, 1] + [1, 1] = [3, 3]
2361 >>> tf.reduce_sum(x, 1).numpy()
2362 array([3, 3], dtype=int32)
2363 >>> # keep the original dimensions
2364 >>> tf.reduce_sum(x, 1, keepdims=True).numpy()
2365 array([[3],
2366 [3]], dtype=int32)
2367 >>> # reduce along both dimensions
2368 >>> # the result is 1 + 1 + 1 + 1 + 1 + 1 = 6
2369 >>> # or, equivalently, reduce along rows, then reduce the resultant array
2370 >>> # [1, 1, 1] + [1, 1, 1] = [2, 2, 2]
2371 >>> # 2 + 2 + 2 = 6
2372 >>> tf.reduce_sum(x, [0, 1]).numpy()
2373 6
2375 Args:
2376 input_tensor: The tensor to reduce. Should have numeric type.
2377 axis: The dimensions to reduce. If `None` (the default), reduces all
2378 dimensions. Must be in the range `[-rank(input_tensor),
2379 rank(input_tensor)]`.
2380 keepdims: If true, retains reduced dimensions with length 1.
2381 name: A name for the operation (optional).
2383 Returns:
2384 The reduced tensor, of the same dtype as the input_tensor.
2386 @compatibility(numpy)
2387 Equivalent to np.sum apart the fact that numpy upcast uint8 and int32 to
2388 int64 while tensorflow returns the same dtype as the input.
2389 @end_compatibility
2390 """
2392 return reduce_sum_with_dims(input_tensor, axis, keepdims, name,
2393 _ReductionDims(input_tensor, axis))
2396def reduce_sum_with_dims(input_tensor,
2397 axis=None,
2398 keepdims=False,
2399 name=None,
2400 dims=None):
2401 keepdims = False if keepdims is None else bool(keepdims)
2402 return _may_reduce_to_scalar(
2403 keepdims, axis,
2404 gen_math_ops._sum(input_tensor, dims, keepdims, name=name))
2407@tf_export("math.reduce_euclidean_norm")
2408@dispatch.add_dispatch_support
2409def reduce_euclidean_norm(input_tensor, axis=None, keepdims=False, name=None):
2410 """Computes the Euclidean norm of elements across dimensions of a tensor.
2412 Reduces `input_tensor` along the dimensions given in `axis`.
2413 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2414 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2415 reduced dimensions are retained with length 1.
2417 If `axis` is None, all dimensions are reduced, and a
2418 tensor with a single element is returned.
2420 For example:
2422 ```python
2423 x = tf.constant([[1, 2, 3], [1, 1, 1]]) # x.dtype is tf.int32
2424 tf.math.reduce_euclidean_norm(x) # returns 4 as dtype is tf.int32
2425 y = tf.constant([[1, 2, 3], [1, 1, 1]], dtype = tf.float32)
2426 tf.math.reduce_euclidean_norm(y) # returns 4.1231055 which is sqrt(17)
2427 tf.math.reduce_euclidean_norm(y, 0) # [sqrt(2), sqrt(5), sqrt(10)]
2428 tf.math.reduce_euclidean_norm(y, 1) # [sqrt(14), sqrt(3)]
2429 tf.math.reduce_euclidean_norm(y, 1, keepdims=True) # [[sqrt(14)], [sqrt(3)]]
2430 tf.math.reduce_euclidean_norm(y, [0, 1]) # sqrt(17)
2431 ```
2433 Args:
2434 input_tensor: The tensor to reduce. Should have numeric type.
2435 axis: The dimensions to reduce. If `None` (the default), reduces all
2436 dimensions. Must be in the range `[-rank(input_tensor),
2437 rank(input_tensor))`.
2438 keepdims: If true, retains reduced dimensions with length 1.
2439 name: A name for the operation (optional).
2441 Returns:
2442 The reduced tensor, of the same dtype as the input_tensor.
2443 """
2444 keepdims = bool(keepdims)
2445 return _may_reduce_to_scalar(
2446 keepdims, axis,
2447 gen_math_ops.euclidean_norm(
2448 input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2449 name=name))
2452@tf_export(v1=["math.count_nonzero", "count_nonzero"])
2453@dispatch.add_dispatch_support
2454@deprecation.deprecated_args(None,
2455 "keep_dims is deprecated, use keepdims instead",
2456 "keep_dims")
2457@deprecation.deprecated_args(
2458 None, "reduction_indices is deprecated, use axis instead",
2459 "reduction_indices")
2460def count_nonzero(input_tensor=None,
2461 axis=None,
2462 keepdims=None,
2463 dtype=dtypes.int64,
2464 name=None,
2465 reduction_indices=None,
2466 keep_dims=None,
2467 input=None): # pylint: disable=redefined-builtin
2468 """Computes number of nonzero elements across dimensions of a tensor.
2470 Reduces `input_tensor` along the dimensions given in `axis`.
2471 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2472 entry in `axis`. If `keepdims` is true, the reduced dimensions
2473 are retained with length 1.
2475 If `axis` has no entries, all dimensions are reduced, and a
2476 tensor with a single element is returned.
2478 **NOTE** Floating point comparison to zero is done by exact floating point
2479 equality check. Small values are **not** rounded to zero for purposes of
2480 the nonzero check.
2482 For example:
2484 ```python
2485 x = tf.constant([[0, 1, 0], [1, 1, 0]])
2486 tf.math.count_nonzero(x) # 3
2487 tf.math.count_nonzero(x, 0) # [1, 2, 0]
2488 tf.math.count_nonzero(x, 1) # [1, 2]
2489 tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
2490 tf.math.count_nonzero(x, [0, 1]) # 3
2491 ```
2493 **NOTE** Strings are compared against zero-length empty string `""`. Any
2494 string with a size greater than zero is already considered as nonzero.
2496 For example:
2497 ```python
2498 x = tf.constant(["", "a", " ", "b", ""])
2499 tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
2500 ```
2502 Args:
2503 input_tensor: The tensor to reduce. Should be of numeric type, `bool`, or
2504 `string`.
2505 axis: The dimensions to reduce. If `None` (the default), reduces all
2506 dimensions. Must be in the range `[-rank(input_tensor),
2507 rank(input_tensor))`.
2508 keepdims: If true, retains reduced dimensions with length 1.
2509 dtype: The output dtype; defaults to `tf.int64`.
2510 name: A name for the operation (optional).
2511 reduction_indices: The old (deprecated) name for axis.
2512 keep_dims: Deprecated alias for `keepdims`.
2513 input: Overrides input_tensor. For compatibility.
2515 Returns:
2516 The reduced tensor (number of nonzero values).
2517 """
2518 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2519 "keep_dims", keep_dims)
2520 input_tensor = deprecation.deprecated_argument_lookup("input", input,
2521 "input_tensor",
2522 input_tensor)
2523 axis = deprecation.deprecated_argument_lookup("axis", axis,
2524 "reduction_indices",
2525 reduction_indices)
2527 return count_nonzero_v2(input_tensor, axis, keepdims, dtype, name)
2530@tf_export("math.count_nonzero", v1=[])
2531@dispatch.add_dispatch_support
2532def count_nonzero_v2(
2533 input, # pylint: disable=redefined-builtin
2534 axis=None,
2535 keepdims=None,
2536 dtype=dtypes.int64,
2537 name=None):
2538 """Computes number of nonzero elements across dimensions of a tensor.
2540 Reduces `input` along the dimensions given in `axis`.
2541 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2542 entry in `axis`. If `keepdims` is true, the reduced dimensions
2543 are retained with length 1.
2545 If `axis` has no entries, all dimensions are reduced, and a
2546 tensor with a single element is returned.
2548 **NOTE** Floating point comparison to zero is done by exact floating point
2549 equality check. Small values are **not** rounded to zero for purposes of
2550 the nonzero check.
2552 For example:
2554 ```python
2555 x = tf.constant([[0, 1, 0], [1, 1, 0]])
2556 tf.math.count_nonzero(x) # 3
2557 tf.math.count_nonzero(x, 0) # [1, 2, 0]
2558 tf.math.count_nonzero(x, 1) # [1, 2]
2559 tf.math.count_nonzero(x, 1, keepdims=True) # [[1], [2]]
2560 tf.math.count_nonzero(x, [0, 1]) # 3
2561 ```
2563 **NOTE** Strings are compared against zero-length empty string `""`. Any
2564 string with a size greater than zero is already considered as nonzero.
2566 For example:
2567 ```python
2568 x = tf.constant(["", "a", " ", "b", ""])
2569 tf.math.count_nonzero(x) # 3, with "a", " ", and "b" as nonzero strings.
2570 ```
2572 Args:
2573 input: The tensor to reduce. Should be of numeric type, `bool`, or `string`.
2574 axis: The dimensions to reduce. If `None` (the default), reduces all
2575 dimensions. Must be in the range `[-rank(input), rank(input))`.
2576 keepdims: If true, retains reduced dimensions with length 1.
2577 dtype: The output dtype; defaults to `tf.int64`.
2578 name: A name for the operation (optional).
2580 Returns:
2581 The reduced tensor (number of nonzero values).
2582 """
2583 if keepdims is None:
2584 keepdims = False
2585 with ops.name_scope(name, "count_nonzero", [input]):
2586 input = ops.convert_to_tensor(input, name="input")
2587 # A scalar of 'zero' is enough as `not_equal` will broadcast.
2588 zero = array_ops.zeros([], dtype=input.dtype)
2589 return cast(
2590 reduce_sum(
2591 # int64 reduction happens on GPU
2592 cast(gen_math_ops.not_equal(input, zero), dtypes.int64),
2593 axis=axis,
2594 keepdims=keepdims),
2595 dtype=dtype)
2598@tf_export(v1=["math.reduce_mean", "reduce_mean"])
2599@dispatch.add_dispatch_support
2600def reduce_mean_v1(input_tensor,
2601 axis=None,
2602 keepdims=None,
2603 name=None,
2604 reduction_indices=None,
2605 keep_dims=None):
2606 """Computes the mean of elements across dimensions of a tensor.
2608 Reduces `input_tensor` along the dimensions given in `axis` by computing the
2609 mean of elements across the dimensions in `axis`.
2610 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2611 the entries in `axis`, which must be unique. If `keepdims` is true, the
2612 reduced dimensions are retained with length 1.
2614 If `axis` is None, all dimensions are reduced, and a tensor with a single
2615 element is returned.
2617 For example:
2619 >>> x = tf.constant([[1., 1.], [2., 2.]])
2620 >>> tf.reduce_mean(x)
2621 <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
2622 >>> tf.reduce_mean(x, 0)
2623 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
2624 >>> tf.reduce_mean(x, 1)
2625 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
2627 Args:
2628 input_tensor: The tensor to reduce. Should have numeric type.
2629 axis: The dimensions to reduce. If `None` (the default), reduces all
2630 dimensions. Must be in the range `[-rank(input_tensor),
2631 rank(input_tensor))`.
2632 keepdims: If true, retains reduced dimensions with length 1.
2633 name: A name for the operation (optional).
2634 reduction_indices: The old (deprecated) name for axis.
2635 keep_dims: Deprecated alias for `keepdims`.
2637 Returns:
2638 The reduced tensor.
2640 @compatibility(numpy)
2641 Equivalent to np.mean
2643 Please note that `np.mean` has a `dtype` parameter that could be used to
2644 specify the output type. By default this is `dtype=float64`. On the other
2645 hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
2646 for example:
2648 >>> x = tf.constant([1, 0, 1, 0])
2649 >>> tf.reduce_mean(x)
2650 <tf.Tensor: shape=(), dtype=int32, numpy=0>
2651 >>> y = tf.constant([1., 0., 1., 0.])
2652 >>> tf.reduce_mean(y)
2653 <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
2655 @end_compatibility
2656 """
2657 axis = deprecation.deprecated_argument_lookup("axis", axis,
2658 "reduction_indices",
2659 reduction_indices)
2660 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2661 "keep_dims", keep_dims)
2662 return reduce_mean(input_tensor, axis, keepdims, name)
2665@tf_export("math.reduce_mean", "reduce_mean", v1=[])
2666@dispatch.add_dispatch_support
2667def reduce_mean(input_tensor, axis=None, keepdims=False, name=None):
2668 """Computes the mean of elements across dimensions of a tensor.
2670 Reduces `input_tensor` along the dimensions given in `axis` by computing the
2671 mean of elements across the dimensions in `axis`.
2672 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2673 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2674 reduced dimensions are retained with length 1.
2676 If `axis` is None, all dimensions are reduced, and a tensor with a single
2677 element is returned.
2679 For example:
2681 >>> x = tf.constant([[1., 1.], [2., 2.]])
2682 >>> tf.reduce_mean(x)
2683 <tf.Tensor: shape=(), dtype=float32, numpy=1.5>
2684 >>> tf.reduce_mean(x, 0)
2685 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1.5, 1.5], dtype=float32)>
2686 >>> tf.reduce_mean(x, 1)
2687 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 2.], dtype=float32)>
2689 Args:
2690 input_tensor: The tensor to reduce. Should have numeric type.
2691 axis: The dimensions to reduce. If `None` (the default), reduces all
2692 dimensions. Must be in the range `[-rank(input_tensor),
2693 rank(input_tensor))`.
2694 keepdims: If true, retains reduced dimensions with length 1.
2695 name: A name for the operation (optional).
2697 Returns:
2698 The reduced tensor.
2700 @compatibility(numpy)
2701 Equivalent to np.mean
2703 Please note that `np.mean` has a `dtype` parameter that could be used to
2704 specify the output type. By default this is `dtype=float64`. On the other
2705 hand, `tf.reduce_mean` has an aggressive type inference from `input_tensor`,
2706 for example:
2708 >>> x = tf.constant([1, 0, 1, 0])
2709 >>> tf.reduce_mean(x)
2710 <tf.Tensor: shape=(), dtype=int32, numpy=0>
2711 >>> y = tf.constant([1., 0., 1., 0.])
2712 >>> tf.reduce_mean(y)
2713 <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
2715 @end_compatibility
2716 """
2717 keepdims = False if keepdims is None else bool(keepdims)
2718 return _may_reduce_to_scalar(
2719 keepdims, axis,
2720 gen_math_ops.mean(
2721 input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2722 name=name))
2725@tf_export("math.reduce_variance")
2726@dispatch.add_dispatch_support
2727def reduce_variance(input_tensor, axis=None, keepdims=False, name=None):
2728 """Computes the variance of elements across dimensions of a tensor.
2730 Reduces `input_tensor` along the dimensions given in `axis`.
2731 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2732 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2733 reduced dimensions are retained with length 1.
2735 If `axis` is None, all dimensions are reduced, and a
2736 tensor with a single element is returned.
2738 For example:
2740 >>> x = tf.constant([[1., 2.], [3., 4.]])
2741 >>> tf.math.reduce_variance(x)
2742 <tf.Tensor: shape=(), dtype=float32, numpy=1.25>
2743 >>> tf.math.reduce_variance(x, 0)
2744 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], ...)>
2745 >>> tf.math.reduce_variance(x, 1)
2746 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.25, 0.25], ...)>
2748 Args:
2749 input_tensor: The tensor to reduce. Should have real or complex type.
2750 axis: The dimensions to reduce. If `None` (the default), reduces all
2751 dimensions. Must be in the range `[-rank(input_tensor),
2752 rank(input_tensor))`.
2753 keepdims: If true, retains reduced dimensions with length 1.
2754 name: A name scope for the associated operations (optional).
2756 Returns:
2757 The reduced tensor, of the same dtype as the input_tensor. Note, for
2758 `complex64` or `complex128` input, the returned `Tensor` will be of type
2759 `float32` or `float64`, respectively.
2761 @compatibility(numpy)
2762 Equivalent to np.var
2764 Please note `np.var` has a `dtype` parameter that could be used to specify the
2765 output type. By default this is `dtype=float64`. On the other hand,
2766 `tf.math.reduce_variance` has aggressive type inference from `input_tensor`.
2767 @end_compatibility
2768 """
2769 name = name if name else "reduce_variance"
2770 with ops.name_scope(name):
2771 input_tensor = ops.convert_to_tensor(input_tensor)
2772 means = reduce_mean(input_tensor, axis=axis, keepdims=True)
2773 if means.dtype.is_integer:
2774 raise TypeError(f"Input must be either real or complex. "
2775 f"Received integer type {means.dtype}.")
2776 diff = input_tensor - means
2777 if diff.dtype.is_complex:
2778 # For complex values we need to take the absolute value before squaring.
2779 # This is achieved by multiplying with the conjugate.
2780 real_dtype = diff.dtype.real_dtype
2781 squared_deviations = gen_math_ops.real(
2782 gen_math_ops.mul(gen_math_ops.conj(diff), diff), Tout=real_dtype)
2783 else:
2784 squared_deviations = gen_math_ops.square(diff)
2785 return reduce_mean(squared_deviations, axis=axis, keepdims=keepdims)
2788@tf_export("math.reduce_std")
2789@dispatch.add_dispatch_support
2790def reduce_std(input_tensor, axis=None, keepdims=False, name=None):
2791 """Computes the standard deviation of elements across dimensions of a tensor.
2793 Reduces `input_tensor` along the dimensions given in `axis`.
2794 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2795 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2796 reduced dimensions are retained with length 1.
2798 If `axis` is None, all dimensions are reduced, and a
2799 tensor with a single element is returned.
2801 For example:
2803 >>> x = tf.constant([[1., 2.], [3., 4.]])
2804 >>> tf.math.reduce_std(x)
2805 <tf.Tensor: shape=(), dtype=float32, numpy=1.118034>
2806 >>> tf.math.reduce_std(x, 0)
2807 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>
2808 >>> tf.math.reduce_std(x, 1)
2809 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([0.5, 0.5], dtype=float32)>
2811 Args:
2812 input_tensor: The tensor to reduce. Should have real or complex type.
2813 axis: The dimensions to reduce. If `None` (the default), reduces all
2814 dimensions. Must be in the range `[-rank(input_tensor),
2815 rank(input_tensor))`.
2816 keepdims: If true, retains reduced dimensions with length 1.
2817 name: A name scope for the associated operations (optional).
2819 Returns:
2820 The reduced tensor, of the same dtype as the input_tensor. Note, for
2821 `complex64` or `complex128` input, the returned `Tensor` will be of type
2822 `float32` or `float64`, respectively.
2824 @compatibility(numpy)
2825 Equivalent to np.std
2827 Please note `np.std` has a `dtype` parameter that could be used to specify the
2828 output type. By default this is `dtype=float64`. On the other hand,
2829 `tf.math.reduce_std` has aggressive type inference from `input_tensor`.
2830 @end_compatibility
2831 """
2832 name = name if name else "reduce_std"
2833 with ops.name_scope(name):
2834 input_tensor = ops.convert_to_tensor(input_tensor)
2835 variance = reduce_variance(input_tensor, axis=axis, keepdims=keepdims)
2836 return gen_math_ops.sqrt(variance)
2839@tf_export("math.reduce_prod", "reduce_prod", v1=[])
2840@dispatch.add_dispatch_support
2841def reduce_prod(input_tensor, axis=None, keepdims=False, name=None):
2842 """Computes `tf.math.multiply` of elements across dimensions of a tensor.
2844 This is the reduction operation for the elementwise `tf.math.multiply` op.
2846 Reduces `input_tensor` along the dimensions given in `axis`.
2847 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2848 entry in `axis`. If `keepdims` is true, the reduced dimensions
2849 are retained with length 1.
2851 If `axis` is None, all dimensions are reduced, and a
2852 tensor with a single element is returned.
2854 For example:
2856 >>> x = tf.constant([[1., 2.], [3., 4.]])
2857 >>> tf.math.reduce_prod(x)
2858 <tf.Tensor: shape=(), dtype=float32, numpy=24.>
2859 >>> tf.math.reduce_prod(x, 0)
2860 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
2861 >>> tf.math.reduce_prod(x, 1)
2862 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
2863 dtype=float32)>
2865 Args:
2866 input_tensor: The tensor to reduce. Should have numeric type.
2867 axis: The dimensions to reduce. If `None` (the default), reduces all
2868 dimensions. Must be in the range `[-rank(input_tensor),
2869 rank(input_tensor))`.
2870 keepdims: If true, retains reduced dimensions with length 1.
2871 name: A name for the operation (optional).
2873 Returns:
2874 The reduced tensor.
2876 @compatibility(numpy)
2877 Equivalent to np.prod
2878 @end_compatibility
2879 """
2880 keepdims = False if keepdims is None else bool(keepdims)
2881 return _may_reduce_to_scalar(
2882 keepdims, axis,
2883 gen_math_ops.prod(
2884 input_tensor, _ReductionDims(input_tensor, axis), keepdims,
2885 name=name))
2888@tf_export(v1=["math.reduce_prod", "reduce_prod"])
2889@dispatch.add_dispatch_support
2890@deprecation.deprecated_args(None,
2891 "keep_dims is deprecated, use keepdims instead",
2892 "keep_dims")
2893def reduce_prod_v1(input_tensor,
2894 axis=None,
2895 keepdims=None,
2896 name=None,
2897 reduction_indices=None,
2898 keep_dims=None):
2899 """Computes `tf.math.multiply` of elements across dimensions of a tensor.
2901 This is the reduction operation for the elementwise `tf.math.multiply` op.
2903 Reduces `input_tensor` along the dimensions given in `axis`.
2904 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2905 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2906 reduced dimensions are retained with length 1.
2908 If `axis` is None, all dimensions are reduced, and a
2909 tensor with a single element is returned.
2911 For example:
2913 >>> x = tf.constant([[1., 2.], [3., 4.]])
2914 >>> tf.math.reduce_prod(x)
2915 <tf.Tensor: shape=(), dtype=float32, numpy=24.>
2916 >>> tf.math.reduce_prod(x, 0)
2917 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([3., 8.], dtype=float32)>
2918 >>> tf.math.reduce_prod(x, 1)
2919 <tf.Tensor: shape=(2,), dtype=float32, numpy=array([2., 12.],
2920 dtype=float32)>
2922 Args:
2923 input_tensor: The tensor to reduce. Should have numeric type.
2924 axis: The dimensions to reduce. If `None` (the default), reduces all
2925 dimensions. Must be in the range `[-rank(input_tensor),
2926 rank(input_tensor))`.
2927 keepdims: If true, retains reduced dimensions with length 1.
2928 name: A name for the operation (optional).
2929 reduction_indices: The old (deprecated) name for axis.
2930 keep_dims: Deprecated alias for `keepdims`.
2932 Returns:
2933 The reduced tensor.
2935 @compatibility(numpy)
2936 Equivalent to np.prod
2937 @end_compatibility
2938 """
2939 axis = deprecation.deprecated_argument_lookup("axis", axis,
2940 "reduction_indices",
2941 reduction_indices)
2942 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
2943 "keep_dims", keep_dims)
2944 return reduce_prod(input_tensor, axis, keepdims, name)
2947@tf_export(v1=["math.reduce_min", "reduce_min"])
2948@dispatch.add_dispatch_support
2949@deprecation.deprecated_args(None,
2950 "keep_dims is deprecated, use keepdims instead",
2951 "keep_dims")
2952def reduce_min_v1(input_tensor,
2953 axis=None,
2954 keepdims=None,
2955 name=None,
2956 reduction_indices=None,
2957 keep_dims=None):
2958 """Computes the `tf.math.minimum` of elements across dimensions of a tensor.
2960 This is the reduction operation for the elementwise `tf.math.minimum` op.
2962 Reduces `input_tensor` along the dimensions given in `axis`.
2963 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
2964 of the entries in `axis`, which must be unique. If `keepdims` is true, the
2965 reduced dimensions are retained with length 1.
2967 If `axis` is None, all dimensions are reduced, and a
2968 tensor with a single element is returned.
2970 Usage example:
2972 >>> x = tf.constant([5, 1, 2, 4])
2973 >>> tf.reduce_min(x)
2974 <tf.Tensor: shape=(), dtype=int32, numpy=1>
2975 >>> x = tf.constant([-5, -1, -2, -4])
2976 >>> tf.reduce_min(x)
2977 <tf.Tensor: shape=(), dtype=int32, numpy=-5>
2978 >>> x = tf.constant([4, float('nan')])
2979 >>> tf.reduce_min(x)
2980 <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2981 >>> x = tf.constant([float('nan'), float('nan')])
2982 >>> tf.reduce_min(x)
2983 <tf.Tensor: shape=(), dtype=float32, numpy=nan>
2984 >>> x = tf.constant([float('-inf'), float('inf')])
2985 >>> tf.reduce_min(x)
2986 <tf.Tensor: shape=(), dtype=float32, numpy=-inf>
2988 See the numpy docs for `np.amin` and `np.nanmin` behavior.
2990 Args:
2991 input_tensor: The tensor to reduce. Should have real numeric type.
2992 axis: The dimensions to reduce. If `None` (the default), reduces all
2993 dimensions. Must be in the range `[-rank(input_tensor),
2994 rank(input_tensor))`.
2995 keepdims: If true, retains reduced dimensions with length 1.
2996 name: A name for the operation (optional).
2997 reduction_indices: The old (deprecated) name for axis.
2998 keep_dims: Deprecated alias for `keepdims`.
3000 Returns:
3001 The reduced tensor.
3002 """
3003 axis = deprecation.deprecated_argument_lookup("axis", axis,
3004 "reduction_indices",
3005 reduction_indices)
3006 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3007 "keep_dims", keep_dims)
3008 return reduce_min(input_tensor, axis, keepdims, name)
3011@tf_export("math.reduce_min", "reduce_min", v1=[])
3012@dispatch.add_dispatch_support
3013def reduce_min(input_tensor, axis=None, keepdims=False, name=None):
3014 """Computes the `tf.math.minimum` of elements across dimensions of a tensor.
3016 This is the reduction operation for the elementwise `tf.math.minimum` op.
3018 Reduces `input_tensor` along the dimensions given in `axis`.
3019 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3020 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3021 reduced dimensions are retained with length 1.
3023 If `axis` is None, all dimensions are reduced, and a
3024 tensor with a single element is returned.
3026 For example:
3028 >>> a = tf.constant([
3029 ... [[1, 2], [3, 4]],
3030 ... [[1, 2], [3, 4]]
3031 ... ])
3032 >>> tf.reduce_min(a)
3033 <tf.Tensor: shape=(), dtype=int32, numpy=1>
3035 Choosing a specific axis returns minimum element in the given axis:
3037 >>> b = tf.constant([[1, 2, 3], [4, 5, 6]])
3038 >>> tf.reduce_min(b, axis=0)
3039 <tf.Tensor: shape=(3,), dtype=int32, numpy=array([1, 2, 3], dtype=int32)>
3040 >>> tf.reduce_min(b, axis=1)
3041 <tf.Tensor: shape=(2,), dtype=int32, numpy=array([1, 4], dtype=int32)>
3043 Setting `keepdims` to `True` retains the dimension of `input_tensor`:
3045 >>> tf.reduce_min(a, keepdims=True)
3046 <tf.Tensor: shape=(1, 1, 1), dtype=int32, numpy=array([[[1]]], dtype=int32)>
3047 >>> tf.math.reduce_min(a, axis=0, keepdims=True)
3048 <tf.Tensor: shape=(1, 2, 2), dtype=int32, numpy=
3049 array([[[1, 2],
3050 [3, 4]]], dtype=int32)>
3052 Args:
3053 input_tensor: The tensor to reduce. Should have real numeric type.
3054 axis: The dimensions to reduce. If `None` (the default), reduces all
3055 dimensions. Must be in the range `[-rank(input_tensor),
3056 rank(input_tensor))`.
3057 keepdims: If true, retains reduced dimensions with length 1.
3058 name: A name for the operation (optional).
3060 Returns:
3061 The reduced tensor.
3063 @compatibility(numpy)
3064 Equivalent to np.min
3065 @end_compatibility
3066 """
3067 keepdims = False if keepdims is None else bool(keepdims)
3068 return _may_reduce_to_scalar(
3069 keepdims, axis,
3070 gen_math_ops._min(
3071 input_tensor, _ReductionDims(input_tensor, axis), keepdims,
3072 name=name))
3075@tf_export(v1=["math.reduce_max", "reduce_max"])
3076@dispatch.add_dispatch_support
3077@deprecation.deprecated_args(None,
3078 "keep_dims is deprecated, use keepdims instead",
3079 "keep_dims")
3080def reduce_max_v1(input_tensor,
3081 axis=None,
3082 keepdims=None,
3083 name=None,
3084 reduction_indices=None,
3085 keep_dims=None):
3086 """Computes `tf.math.maximum` of elements across dimensions of a tensor.
3088 This is the reduction operation for the elementwise `tf.math.maximum` op.
3090 Reduces `input_tensor` along the dimensions given in `axis`.
3091 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3092 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3093 reduced dimensions are retained with length 1.
3095 If `axis` is None, all dimensions are reduced, and a
3096 tensor with a single element is returned.
3098 Usage example:
3100 >>> x = tf.constant([5, 1, 2, 4])
3101 >>> tf.reduce_max(x)
3102 <tf.Tensor: shape=(), dtype=int32, numpy=5>
3103 >>> x = tf.constant([-5, -1, -2, -4])
3104 >>> tf.reduce_max(x)
3105 <tf.Tensor: shape=(), dtype=int32, numpy=-1>
3106 >>> x = tf.constant([4, float('nan')])
3107 >>> tf.reduce_max(x)
3108 <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3109 >>> x = tf.constant([float('nan'), float('nan')])
3110 >>> tf.reduce_max(x)
3111 <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3112 >>> x = tf.constant([float('-inf'), float('inf')])
3113 >>> tf.reduce_max(x)
3114 <tf.Tensor: shape=(), dtype=float32, numpy=inf>
3116 See the numpy docs for `np.amax` and `np.nanmax` behavior.
3118 Args:
3119 input_tensor: The tensor to reduce. Should have real numeric type.
3120 axis: The dimensions to reduce. If `None` (the default), reduces all
3121 dimensions. Must be in the range `[-rank(input_tensor),
3122 rank(input_tensor))`.
3123 keepdims: If true, retains reduced dimensions with length 1.
3124 name: A name for the operation (optional).
3125 reduction_indices: The old (deprecated) name for axis.
3126 keep_dims: Deprecated alias for `keepdims`.
3128 Returns:
3129 The reduced tensor.
3130 """
3131 axis = deprecation.deprecated_argument_lookup("axis", axis,
3132 "reduction_indices",
3133 reduction_indices)
3134 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3135 "keep_dims", keep_dims)
3136 return reduce_max(input_tensor, axis, keepdims, name)
3139@tf_export("math.reduce_max", "reduce_max", v1=[])
3140@dispatch.add_dispatch_support
3141def reduce_max(input_tensor, axis=None, keepdims=False, name=None):
3142 """Computes `tf.math.maximum` of elements across dimensions of a tensor.
3144 This is the reduction operation for the elementwise `tf.math.maximum` op.
3146 Reduces `input_tensor` along the dimensions given in `axis`.
3147 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3148 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3149 reduced dimensions are retained with length 1.
3151 If `axis` is None, all dimensions are reduced, and a
3152 tensor with a single element is returned.
3154 Usage example:
3156 >>> x = tf.constant([5, 1, 2, 4])
3157 >>> tf.reduce_max(x)
3158 <tf.Tensor: shape=(), dtype=int32, numpy=5>
3159 >>> x = tf.constant([-5, -1, -2, -4])
3160 >>> tf.reduce_max(x)
3161 <tf.Tensor: shape=(), dtype=int32, numpy=-1>
3162 >>> x = tf.constant([4, float('nan')])
3163 >>> tf.reduce_max(x)
3164 <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3165 >>> x = tf.constant([float('nan'), float('nan')])
3166 >>> tf.reduce_max(x)
3167 <tf.Tensor: shape=(), dtype=float32, numpy=nan>
3168 >>> x = tf.constant([float('-inf'), float('inf')])
3169 >>> tf.reduce_max(x)
3170 <tf.Tensor: shape=(), dtype=float32, numpy=inf>
3172 See the numpy docs for `np.amax` and `np.nanmax` behavior.
3174 Args:
3175 input_tensor: The tensor to reduce. Should have real numeric type.
3176 axis: The dimensions to reduce. If `None` (the default), reduces all
3177 dimensions. Must be in the range `[-rank(input_tensor),
3178 rank(input_tensor))`.
3179 keepdims: If true, retains reduced dimensions with length 1.
3180 name: A name for the operation (optional).
3182 Returns:
3183 The reduced tensor.
3184 """
3185 return reduce_max_with_dims(input_tensor, axis, keepdims, name,
3186 _ReductionDims(input_tensor, axis))
3189def reduce_max_with_dims(input_tensor,
3190 axis=None,
3191 keepdims=False,
3192 name=None,
3193 dims=None):
3194 keepdims = False if keepdims is None else bool(keepdims)
3195 return _may_reduce_to_scalar(
3196 keepdims, axis,
3197 gen_math_ops._max(input_tensor, dims, keepdims, name=name))
3200@tf_export(v1=["math.reduce_all", "reduce_all"])
3201@dispatch.add_dispatch_support
3202@deprecation.deprecated_args(None,
3203 "keep_dims is deprecated, use keepdims instead",
3204 "keep_dims")
3205def reduce_all_v1(input_tensor,
3206 axis=None,
3207 keepdims=None,
3208 name=None,
3209 reduction_indices=None,
3210 keep_dims=None):
3211 """Computes `tf.math.logical_and` of elements across dimensions of a tensor.
3213 This is the reduction operation for the elementwise `tf.math.logical_and` op.
3215 Reduces `input_tensor` along the dimensions given in `axis`.
3216 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3217 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3218 reduced dimensions are retained with length 1.
3220 If `axis` is None, all dimensions are reduced, and a
3221 tensor with a single element is returned.
3223 For example:
3225 >>> x = tf.constant([[True, True], [False, False]])
3226 >>> tf.math.reduce_all(x)
3227 <tf.Tensor: shape=(), dtype=bool, numpy=False>
3228 >>> tf.math.reduce_all(x, 0)
3229 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
3230 >>> tf.math.reduce_all(x, 1)
3231 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3233 Args:
3234 input_tensor: The boolean tensor to reduce.
3235 axis: The dimensions to reduce. If `None` (the default), reduces all
3236 dimensions. Must be in the range `[-rank(input_tensor),
3237 rank(input_tensor))`.
3238 keepdims: If true, retains reduced dimensions with length 1.
3239 name: A name for the operation (optional).
3240 reduction_indices: The old (deprecated) name for axis.
3241 keep_dims: Deprecated alias for `keepdims`.
3243 Returns:
3244 The reduced tensor.
3246 @compatibility(numpy)
3247 Equivalent to np.all
3248 @end_compatibility
3249 """
3250 axis = deprecation.deprecated_argument_lookup("axis", axis,
3251 "reduction_indices",
3252 reduction_indices)
3253 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3254 "keep_dims", keep_dims)
3255 return reduce_all(input_tensor, axis, keepdims, name)
3258@tf_export("math.reduce_all", "reduce_all", v1=[])
3259@dispatch.add_dispatch_support
3260def reduce_all(input_tensor, axis=None, keepdims=False, name=None):
3261 """Computes `tf.math.logical_and` of elements across dimensions of a tensor.
3263 This is the reduction operation for the elementwise `tf.math.logical_and` op.
3265 Reduces `input_tensor` along the dimensions given in `axis`.
3266 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3267 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3268 reduced dimensions are retained with length 1.
3270 If `axis` is None, all dimensions are reduced, and a
3271 tensor with a single element is returned.
3273 For example:
3275 >>> x = tf.constant([[True, True], [False, False]])
3276 >>> tf.math.reduce_all(x)
3277 <tf.Tensor: shape=(), dtype=bool, numpy=False>
3278 >>> tf.math.reduce_all(x, 0)
3279 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([False, False])>
3280 >>> tf.math.reduce_all(x, 1)
3281 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3283 Args:
3284 input_tensor: The boolean tensor to reduce.
3285 axis: The dimensions to reduce. If `None` (the default), reduces all
3286 dimensions. Must be in the range `[-rank(input_tensor),
3287 rank(input_tensor))`.
3288 keepdims: If true, retains reduced dimensions with length 1.
3289 name: A name for the operation (optional).
3291 Returns:
3292 The reduced tensor.
3294 @compatibility(numpy)
3295 Equivalent to np.all
3296 @end_compatibility
3297 """
3298 keepdims = False if keepdims is None else bool(keepdims)
3299 return _may_reduce_to_scalar(
3300 keepdims, axis,
3301 gen_math_ops._all(
3302 input_tensor, _ReductionDims(input_tensor, axis), keepdims,
3303 name=name))
3306@tf_export(v1=["math.reduce_any", "reduce_any"])
3307@dispatch.add_dispatch_support
3308@deprecation.deprecated_args(None,
3309 "keep_dims is deprecated, use keepdims instead",
3310 "keep_dims")
3311def reduce_any_v1(input_tensor,
3312 axis=None,
3313 keepdims=None,
3314 name=None,
3315 reduction_indices=None,
3316 keep_dims=None):
3317 """Computes `tf.math.logical_or` of elements across dimensions of a tensor.
3319 This is the reduction operation for the elementwise `tf.math.logical_or` op.
3321 Reduces `input_tensor` along the dimensions given in `axis`.
3322 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3323 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3324 reduced dimensions are retained with length 1.
3326 If `axis` is None, all dimensions are reduced, and a
3327 tensor with a single element is returned.
3329 For example:
3331 >>> x = tf.constant([[True, True], [False, False]])
3332 >>> tf.reduce_any(x)
3333 <tf.Tensor: shape=(), dtype=bool, numpy=True>
3334 >>> tf.reduce_any(x, 0)
3335 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
3336 >>> tf.reduce_any(x, 1)
3337 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3339 Args:
3340 input_tensor: The boolean tensor to reduce.
3341 axis: The dimensions to reduce. If `None` (the default), reduces all
3342 dimensions. Must be in the range `[-rank(input_tensor),
3343 rank(input_tensor))`.
3344 keepdims: If true, retains reduced dimensions with length 1.
3345 name: A name for the operation (optional).
3346 reduction_indices: The old (deprecated) name for axis.
3347 keep_dims: Deprecated alias for `keepdims`.
3349 Returns:
3350 The reduced tensor.
3352 @compatibility(numpy)
3353 Equivalent to np.any
3354 @end_compatibility
3355 """
3356 axis = deprecation.deprecated_argument_lookup("axis", axis,
3357 "reduction_indices",
3358 reduction_indices)
3359 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3360 "keep_dims", keep_dims)
3361 return reduce_any(input_tensor, axis, keepdims, name)
3364@tf_export("math.reduce_any", "reduce_any", v1=[])
3365@dispatch.add_dispatch_support
3366def reduce_any(input_tensor, axis=None, keepdims=False, name=None):
3367 """Computes `tf.math.logical_or` of elements across dimensions of a tensor.
3369 This is the reduction operation for the elementwise `tf.math.logical_or` op.
3371 Reduces `input_tensor` along the dimensions given in `axis`.
3372 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3373 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3374 reduced dimensions are retained with length 1.
3376 If `axis` is None, all dimensions are reduced, and a
3377 tensor with a single element is returned.
3379 For example:
3381 >>> x = tf.constant([[True, True], [False, False]])
3382 >>> tf.reduce_any(x)
3383 <tf.Tensor: shape=(), dtype=bool, numpy=True>
3384 >>> tf.reduce_any(x, 0)
3385 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, True])>
3386 >>> tf.reduce_any(x, 1)
3387 <tf.Tensor: shape=(2,), dtype=bool, numpy=array([ True, False])>
3389 Args:
3390 input_tensor: The boolean tensor to reduce.
3391 axis: The dimensions to reduce. If `None` (the default), reduces all
3392 dimensions. Must be in the range `[-rank(input_tensor),
3393 rank(input_tensor))`.
3394 keepdims: If true, retains reduced dimensions with length 1.
3395 name: A name for the operation (optional).
3397 Returns:
3398 The reduced tensor.
3400 @compatibility(numpy)
3401 Equivalent to np.any
3402 @end_compatibility
3403 """
3404 keepdims = False if keepdims is None else bool(keepdims)
3405 return _may_reduce_to_scalar(
3406 keepdims, axis,
3407 gen_math_ops._any(
3408 input_tensor, _ReductionDims(input_tensor, axis), keepdims,
3409 name=name))
3412@tf_export(v1=["math.reduce_logsumexp", "reduce_logsumexp"])
3413@dispatch.add_dispatch_support
3414@deprecation.deprecated_args(None,
3415 "keep_dims is deprecated, use keepdims instead",
3416 "keep_dims")
3417def reduce_logsumexp_v1(input_tensor,
3418 axis=None,
3419 keepdims=None,
3420 name=None,
3421 reduction_indices=None,
3422 keep_dims=None):
3423 """Computes log(sum(exp(elements across dimensions of a tensor))).
3425 Reduces `input_tensor` along the dimensions given in `axis`.
3426 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3427 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3428 reduced dimensions are retained with length 1.
3430 If `axis` has no entries, all dimensions are reduced, and a
3431 tensor with a single element is returned.
3433 This function is more numerically stable than log(sum(exp(input))). It avoids
3434 overflows caused by taking the exp of large inputs and underflows caused by
3435 taking the log of small inputs.
3437 For example:
3439 ```python
3440 x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
3441 tf.reduce_logsumexp(x) # log(6)
3442 tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
3443 tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
3444 tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
3445 tf.reduce_logsumexp(x, [0, 1]) # log(6)
3446 ```
3448 Args:
3449 input_tensor: The tensor to reduce. Should have numeric type.
3450 axis: The dimensions to reduce. If `None` (the default), reduces all
3451 dimensions. Must be in the range `[-rank(input_tensor),
3452 rank(input_tensor))`.
3453 keepdims: If true, retains reduced dimensions with length 1.
3454 name: A name for the operation (optional).
3455 reduction_indices: The old (deprecated) name for axis.
3456 keep_dims: Deprecated alias for `keepdims`.
3458 Returns:
3459 The reduced tensor.
3460 """
3461 axis = deprecation.deprecated_argument_lookup("axis", axis,
3462 "reduction_indices",
3463 reduction_indices)
3464 keepdims = deprecation.deprecated_argument_lookup("keepdims", keepdims,
3465 "keep_dims", keep_dims)
3466 return reduce_logsumexp(input_tensor, axis, keepdims, name)
3469@tf_export("math.reduce_logsumexp", "reduce_logsumexp", v1=[])
3470@dispatch.add_dispatch_support
3471def reduce_logsumexp(input_tensor, axis=None, keepdims=False, name=None):
3472 """Computes log(sum(exp(elements across dimensions of a tensor))).
3474 Reduces `input_tensor` along the dimensions given in `axis`.
3475 Unless `keepdims` is true, the rank of the tensor is reduced by 1 for each
3476 of the entries in `axis`, which must be unique. If `keepdims` is true, the
3477 reduced dimensions are retained with length 1.
3479 If `axis` has no entries, all dimensions are reduced, and a
3480 tensor with a single element is returned.
3482 This function is more numerically stable than log(sum(exp(input))). It avoids
3483 overflows caused by taking the exp of large inputs and underflows caused by
3484 taking the log of small inputs.
3486 For example:
3488 ```python
3489 x = tf.constant([[0., 0., 0.], [0., 0., 0.]])
3490 tf.reduce_logsumexp(x) # log(6)
3491 tf.reduce_logsumexp(x, 0) # [log(2), log(2), log(2)]
3492 tf.reduce_logsumexp(x, 1) # [log(3), log(3)]
3493 tf.reduce_logsumexp(x, 1, keepdims=True) # [[log(3)], [log(3)]]
3494 tf.reduce_logsumexp(x, [0, 1]) # log(6)
3495 ```
3497 Args:
3498 input_tensor: The tensor to reduce. Should have numeric type.
3499 axis: The dimensions to reduce. If `None` (the default), reduces all
3500 dimensions. Must be in the range `[-rank(input_tensor),
3501 rank(input_tensor))`.
3502 keepdims: If true, retains reduced dimensions with length 1.
3503 name: A name for the operation (optional).
3505 Returns:
3506 The reduced tensor.
3507 """
3508 with ops.name_scope(name, "ReduceLogSumExp", [input_tensor]) as name:
3509 raw_max = reduce_max(input_tensor, axis=axis, keepdims=True)
3510 my_max = array_ops.stop_gradient(
3511 gen_math_ops.select(
3512 gen_math_ops.is_finite(raw_max), raw_max,
3513 gen_array_ops.zeros_like(raw_max)))
3514 result = gen_math_ops.log(
3515 reduce_sum(
3516 exp(subtract(input_tensor, my_max)),
3517 axis=axis,
3518 keepdims=keepdims))
3519 if not keepdims:
3520 my_max = array_ops.reshape(my_max, gen_array_ops.shape(result))
3521 result = add(result, my_max, name=name)
3522 return _may_reduce_to_scalar(keepdims, axis, result)
3525@tf_export("linalg.trace", v1=["linalg.trace", "trace"])
3526@dispatch.add_dispatch_support
3527@deprecation.deprecated_endpoints("trace")
3528def trace(x, name=None):
3529 """Compute the trace of a tensor `x`.
3531 `trace(x)` returns the sum along the main diagonal of each inner-most matrix
3532 in x. If x is of rank `k` with shape `[I, J, K, ..., L, M, N]`, then output
3533 is a tensor of rank `k-2` with dimensions `[I, J, K, ..., L]` where
3535 `output[i, j, k, ..., l] = trace(x[i, j, k, ..., l, :, :])`
3537 For example:
3539 ```python
3540 x = tf.constant([[1, 2], [3, 4]])
3541 tf.linalg.trace(x) # 5
3543 x = tf.constant([[1, 2, 3],
3544 [4, 5, 6],
3545 [7, 8, 9]])
3546 tf.linalg.trace(x) # 15
3548 x = tf.constant([[[1, 2, 3],
3549 [4, 5, 6],
3550 [7, 8, 9]],
3551 [[-1, -2, -3],
3552 [-4, -5, -6],
3553 [-7, -8, -9]]])
3554 tf.linalg.trace(x) # [15, -15]
3555 ```
3557 Args:
3558 x: tensor.
3559 name: A name for the operation (optional).
3561 Returns:
3562 The trace of input tensor.
3563 """
3564 with ops.name_scope(name, "Trace", [x]) as name:
3565 x = ops.convert_to_tensor(x, name="x")
3566 return reduce_sum(array_ops.matrix_diag_part(x), [-1], name=name)
3569@tf_export("linalg.matmul", "matmul")
3570@dispatch.add_dispatch_support
3571def matmul(a,
3572 b,
3573 transpose_a=False,
3574 transpose_b=False,
3575 adjoint_a=False,
3576 adjoint_b=False,
3577 a_is_sparse=False,
3578 b_is_sparse=False,
3579 output_type=None,
3580 name=None):
3581 """Multiplies matrix `a` by matrix `b`, producing `a` * `b`.
3583 The inputs must, following any transpositions, be tensors of rank >= 2
3584 where the inner 2 dimensions specify valid matrix multiplication dimensions,
3585 and any further outer dimensions specify matching batch size.
3587 Both matrices must be of the same type. The supported types are:
3588 `bfloat16`, `float16`, `float32`, `float64`, `int32`, `int64`,
3589 `complex64`, `complex128`.
3591 Either matrix can be transposed or adjointed (conjugated and transposed) on
3592 the fly by setting one of the corresponding flag to `True`. These are `False`
3593 by default.
3595 If one or both of the matrices contain a lot of zeros, a more efficient
3596 multiplication algorithm can be used by setting the corresponding
3597 `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
3598 This optimization is only available for plain matrices (rank-2 tensors) with
3599 datatypes `bfloat16` or `float32`.
3601 A simple 2-D tensor matrix multiplication:
3603 >>> a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
3604 >>> a # 2-D tensor
3605 <tf.Tensor: shape=(2, 3), dtype=int32, numpy=
3606 array([[1, 2, 3],
3607 [4, 5, 6]], dtype=int32)>
3608 >>> b = tf.constant([7, 8, 9, 10, 11, 12], shape=[3, 2])
3609 >>> b # 2-D tensor
3610 <tf.Tensor: shape=(3, 2), dtype=int32, numpy=
3611 array([[ 7, 8],
3612 [ 9, 10],
3613 [11, 12]], dtype=int32)>
3614 >>> c = tf.matmul(a, b)
3615 >>> c # `a` * `b`
3616 <tf.Tensor: shape=(2, 2), dtype=int32, numpy=
3617 array([[ 58, 64],
3618 [139, 154]], dtype=int32)>
3620 A batch matrix multiplication with batch shape [2]:
3622 >>> a = tf.constant(np.arange(1, 13, dtype=np.int32), shape=[2, 2, 3])
3623 >>> a # 3-D tensor
3624 <tf.Tensor: shape=(2, 2, 3), dtype=int32, numpy=
3625 array([[[ 1, 2, 3],
3626 [ 4, 5, 6]],
3627 [[ 7, 8, 9],
3628 [10, 11, 12]]], dtype=int32)>
3629 >>> b = tf.constant(np.arange(13, 25, dtype=np.int32), shape=[2, 3, 2])
3630 >>> b # 3-D tensor
3631 <tf.Tensor: shape=(2, 3, 2), dtype=int32, numpy=
3632 array([[[13, 14],
3633 [15, 16],
3634 [17, 18]],
3635 [[19, 20],
3636 [21, 22],
3637 [23, 24]]], dtype=int32)>
3638 >>> c = tf.matmul(a, b)
3639 >>> c # `a` * `b`
3640 <tf.Tensor: shape=(2, 2, 2), dtype=int32, numpy=
3641 array([[[ 94, 100],
3642 [229, 244]],
3643 [[508, 532],
3644 [697, 730]]], dtype=int32)>
3646 Since python >= 3.5 the @ operator is supported
3647 (see [PEP 465](https://www.python.org/dev/peps/pep-0465/)). In TensorFlow,
3648 it simply calls the `tf.matmul()` function, so the following lines are
3649 equivalent:
3651 >>> d = a @ b @ [[10], [11]]
3652 >>> d = tf.matmul(tf.matmul(a, b), [[10], [11]])
3654 Args:
3655 a: `tf.Tensor` of type `float16`, `float32`, `float64`, `int32`,
3656 `complex64`, `complex128` and rank > 1.
3657 b: `tf.Tensor` with same type and rank as `a`.
3658 transpose_a: If `True`, `a` is transposed before multiplication.
3659 transpose_b: If `True`, `b` is transposed before multiplication.
3660 adjoint_a: If `True`, `a` is conjugated and transposed before
3661 multiplication.
3662 adjoint_b: If `True`, `b` is conjugated and transposed before
3663 multiplication.
3664 a_is_sparse: If `True`, `a` is treated as a sparse matrix. Notice, this
3665 **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
3666 that assume most values in `a` are zero.
3667 See `tf.sparse.sparse_dense_matmul`
3668 for some support for `tf.sparse.SparseTensor` multiplication.
3669 b_is_sparse: If `True`, `b` is treated as a sparse matrix. Notice, this
3670 **does not support `tf.sparse.SparseTensor`**, it just makes optimizations
3671 that assume most values in `b` are zero.
3672 See `tf.sparse.sparse_dense_matmul`
3673 for some support for `tf.sparse.SparseTensor` multiplication.
3674 output_type: The output datatype if needed. Defaults to None in which case
3675 the output_type is the same as input type. Currently only works when input
3676 tensors are type (u)int8 and output_type can be int32.
3677 name: Name for the operation (optional).
3679 Returns:
3680 A `tf.Tensor` of the same type as `a` and `b` where each inner-most matrix
3681 is the product of the corresponding matrices in `a` and `b`, e.g. if all
3682 transpose or adjoint attributes are `False`:
3684 `output[..., i, j] = sum_k (a[..., i, k] * b[..., k, j])`,
3685 for all indices `i`, `j`.
3687 Note: This is matrix product, not element-wise product.
3690 Raises:
3691 ValueError: If `transpose_a` and `adjoint_a`, or `transpose_b` and
3692 `adjoint_b` are both set to `True`.
3693 TypeError: If output_type is specified but the types of `a`, `b` and
3694 `output_type` is not (u)int8, (u)int8 and int32.
3695 """
3697 with ops.name_scope(name, "MatMul", [a, b]) as name:
3698 if transpose_a and adjoint_a:
3699 raise ValueError(
3700 f"Only one of `transpose_a` and `adjoint_a` can be True. "
3701 f"Received `transpose_a`={transpose_a}, "
3702 f"`adjoint_a`={adjoint_a}.")
3703 if transpose_b and adjoint_b:
3704 raise ValueError(
3705 f"Only one of `transpose_b` and `adjoint_b` can be True. "
3706 f"Received `transpose_b`={transpose_b}, "
3707 f"`adjoint_b`={adjoint_b}.")
3709 if context.executing_eagerly():
3710 if not isinstance(a, (ops.EagerTensor, _resource_variable_type)):
3711 a = ops.convert_to_tensor(a, name="a")
3712 if not isinstance(b, (ops.EagerTensor, _resource_variable_type)):
3713 b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
3714 else:
3715 a = ops.convert_to_tensor(a, name="a")
3716 b = ops.convert_to_tensor(b, dtype_hint=a.dtype.base_dtype, name="b")
3718 # TODO(apassos) remove _shape_tuple here when it is not needed.
3719 a_shape = a._shape_tuple() # pylint: disable=protected-access
3720 b_shape = b._shape_tuple() # pylint: disable=protected-access
3722 output_may_have_non_empty_batch_shape = (
3723 (a_shape is None or len(a_shape) > 2) or
3724 (b_shape is None or len(b_shape) > 2))
3726 # TODO(b/178749687): remove this boolean and all related branches once the
3727 # bridges are ready.
3728 # batch_matmul_v3 is for when input type is different from output type.
3729 use_batch_matmul_v3 = False
3730 if output_type and (output_type != a.dtype or output_type != b.dtype):
3731 use_batch_matmul_v3 = True
3733 if (not a_is_sparse and
3734 not b_is_sparse) and output_may_have_non_empty_batch_shape:
3735 # BatchMatmul does not support transpose, so we conjugate the matrix and
3736 # use adjoint instead. Conj() is a noop for real matrices.
3737 if transpose_a:
3738 a = conj(a)
3739 adjoint_a = True
3740 if transpose_b:
3741 b = conj(b)
3742 adjoint_b = True
3743 if use_batch_matmul_v3:
3744 return gen_math_ops.batch_mat_mul_v3(
3745 a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
3746 else:
3747 return gen_math_ops.batch_mat_mul_v2(
3748 a, b, adj_x=adjoint_a, adj_y=adjoint_b, name=name)
3750 # Neither matmul nor sparse_matmul support adjoint, so we conjugate
3751 # the matrix and use transpose instead. Conj() is a noop for real
3752 # matrices.
3753 if adjoint_a:
3754 a = conj(a)
3755 transpose_a = True
3756 if adjoint_b:
3757 b = conj(b)
3758 transpose_b = True
3760 use_sparse_matmul = False
3761 if a_is_sparse or b_is_sparse:
3762 sparse_matmul_types = [dtypes.bfloat16, dtypes.float32]
3763 use_sparse_matmul = (
3764 a.dtype in sparse_matmul_types and b.dtype in sparse_matmul_types)
3765 if (((a.dtype == dtypes.bfloat16 and
3766 b.dtype not in (dtypes.int8, dtypes.uint8)) or
3767 (b.dtype == dtypes.bfloat16 and
3768 a.dtype not in (dtypes.int8, dtypes.uint8))) and a.dtype != b.dtype):
3769 # matmul currently doesn't handle mixed-precision inputs other than
3770 # fp16 * int8 which is supported in BatchMatMulV3.
3771 use_sparse_matmul = True
3772 if use_sparse_matmul:
3773 ret = sparse_matmul(
3774 a,
3775 b,
3776 transpose_a=transpose_a,
3777 transpose_b=transpose_b,
3778 a_is_sparse=a_is_sparse,
3779 b_is_sparse=b_is_sparse,
3780 name=name)
3781 # sparse_matmul always returns float32, even with
3782 # bfloat16 inputs. This prevents us from configuring bfloat16 training.
3783 # casting to bfloat16 also matches non-sparse matmul behavior better.
3784 if a.dtype == dtypes.bfloat16 and b.dtype == dtypes.bfloat16:
3785 ret = cast(ret, dtypes.bfloat16)
3786 return ret
3787 else:
3788 if use_batch_matmul_v3:
3789 adjoint_a = adjoint_a or transpose_a
3790 adjoint_b = adjoint_b or transpose_b
3791 return gen_math_ops.batch_mat_mul_v3(
3792 a, b, adj_x=adjoint_a, adj_y=adjoint_b, Tout=output_type, name=name)
3793 else:
3794 return gen_math_ops.mat_mul(
3795 a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
3798@tf_export("linalg.matvec")
3799@dispatch.add_dispatch_support
3800def matvec(a,
3801 b,
3802 transpose_a=False,
3803 adjoint_a=False,
3804 a_is_sparse=False,
3805 b_is_sparse=False,
3806 name=None):
3807 """Multiplies matrix `a` by vector `b`, producing `a` * `b`.
3809 The matrix `a` must, following any transpositions, be a tensor of rank >= 2,
3810 with `shape(a)[-1] == shape(b)[-1]`, and `shape(a)[:-2]` able to broadcast
3811 with `shape(b)[:-1]`.
3813 Both `a` and `b` must be of the same type. The supported types are:
3814 `float16`, `float32`, `float64`, `int32`, `complex64`, `complex128`.
3816 Matrix `a` can be transposed or adjointed (conjugated and transposed) on
3817 the fly by setting one of the corresponding flag to `True`. These are `False`
3818 by default.
3820 If one or both of the inputs contain a lot of zeros, a more efficient
3821 multiplication algorithm can be used by setting the corresponding
3822 `a_is_sparse` or `b_is_sparse` flag to `True`. These are `False` by default.
3823 This optimization is only available for plain matrices/vectors (rank-2/1
3824 tensors) with datatypes `bfloat16` or `float32`.
3826 For example:
3828 ```python
3829 # 2-D tensor `a`
3830 # [[1, 2, 3],
3831 # [4, 5, 6]]
3832 a = tf.constant([1, 2, 3, 4, 5, 6], shape=[2, 3])
3834 # 1-D tensor `b`
3835 # [7, 9, 11]
3836 b = tf.constant([7, 9, 11], shape=[3])
3838 # `a` * `b`
3839 # [ 58, 64]
3840 c = tf.linalg.matvec(a, b)
3843 # 3-D tensor `a`
3844 # [[[ 1, 2, 3],
3845 # [ 4, 5, 6]],
3846 # [[ 7, 8, 9],
3847 # [10, 11, 12]]]
3848 a = tf.constant(np.arange(1, 13, dtype=np.int32),
3849 shape=[2, 2, 3])
3851 # 2-D tensor `b`
3852 # [[13, 14, 15],
3853 # [16, 17, 18]]
3854 b = tf.constant(np.arange(13, 19, dtype=np.int32),
3855 shape=[2, 3])
3857 # `a` * `b`
3858 # [[ 86, 212],
3859 # [410, 563]]
3860 c = tf.linalg.matvec(a, b)
3861 ```
3863 Args:
3864 a: `Tensor` of type `float16`, `float32`, `float64`, `int32`, `complex64`,
3865 `complex128` and rank > 1.
3866 b: `Tensor` with same type as `a` and compatible dimensions.
3867 transpose_a: If `True`, `a` is transposed before multiplication.
3868 adjoint_a: If `True`, `a` is conjugated and transposed before
3869 multiplication.
3870 a_is_sparse: If `True`, `a` is treated as a sparse matrix.
3871 b_is_sparse: If `True`, `b` is treated as a sparse matrix.
3872 name: Name for the operation (optional).
3874 Returns:
3875 A `Tensor` of the same type as `a` and `b` where each inner-most vector is
3876 the product of the corresponding matrices in `a` and vectors in `b`, e.g. if
3877 all transpose or adjoint attributes are `False`:
3879 `output`[..., i] = sum_k (`a`[..., i, k] * `b`[..., k]), for all indices i.
3881 Note: This is matrix-vector product, not element-wise product.
3884 Raises:
3885 ValueError: If transpose_a and adjoint_a are both set to True.
3886 """
3887 with ops.name_scope(name, "MatVec", [a, b]) as name:
3888 output = matmul(
3889 a,
3890 array_ops.expand_dims(b, axis=-1),
3891 transpose_a=transpose_a,
3892 adjoint_a=adjoint_a,
3893 a_is_sparse=a_is_sparse,
3894 b_is_sparse=b_is_sparse)
3895 return array_ops.squeeze(output, axis=-1)
3898# TODO(b/178650720): Also support numpy-style type promotion in freestanding TF
3899# functions (e.g. tf.add).
3900def matmul_wrapper(a, b, name=None): # pylint: disable=missing-function-docstring
3901 if ops._numpy_style_type_promotion:
3902 return a._matmul(b)
3903 return matmul(a, b, name=name)
3904matmul_wrapper.__doc__ = matmul.__doc__
3905_OverrideBinaryOperatorHelper(matmul_wrapper, "matmul")
3907sparse_matmul = deprecation.deprecated(None, "Use `tf.linalg.matmul` instead")(
3908 gen_math_ops.sparse_mat_mul)
3909tf_export(v1=["sparse_matmul"])(sparse_matmul)
3910@dispatch.add_dispatch_support
3913def _as_indexed_slices(x, optimize=True):
3914 """Convert 'x' to IndexedSlices.
3916 Convert a dense Tensor to a block-sparse IndexedSlices.
3918 Args:
3919 x: Either a Tensor object, or an IndexedSlices object.
3920 optimize: if true, attempt to optimize the conversion of 'x'.
3922 Returns:
3923 An IndexedSlices object.
3925 Raises:
3926 TypeError: If 'x' is not a Tensor or an IndexedSlices object.
3927 """
3928 # TODO(touts): op_scope
3929 if not isinstance(x, (ops.Tensor, indexed_slices.IndexedSlices)):
3930 raise TypeError(f"Not a Tensor or IndexedSlices: {type(x)}.")
3931 if isinstance(x, indexed_slices.IndexedSlices):
3932 return x
3933 x_shape = array_ops.shape_internal(x, optimize=optimize)
3934 return indexed_slices.IndexedSlices(x, range(0, x_shape[0]), x_shape)
3937def _as_indexed_slices_list(inputs, optimize=True):
3938 """Convert all elements of 'inputs' to IndexedSlices.
3940 Additionally, homogenize the types of all the indices to
3941 either int32 or int64.
3943 Args:
3944 inputs: List containing either Tensor or IndexedSlices objects.
3945 optimize: if true, attempt to optimize the conversion of each input.
3947 Returns:
3948 A list of IndexedSlices objects.
3950 Raises:
3951 TypeError: If 'inputs' is not a list or a tuple.
3952 """
3953 if not isinstance(inputs, (list, tuple)):
3954 raise TypeError(f"Expected a list or tuple, not {type(inputs)}.")
3955 outputs = [_as_indexed_slices(i, optimize=optimize) for i in inputs]
3956 with_int32_index = [
3957 o.indices for o in outputs if o.indices.dtype == dtypes.int32
3958 ]
3959 if not with_int32_index or len(with_int32_index) == len(outputs):
3960 return outputs
3961 casted_outputs = []
3962 for o in outputs:
3963 if o.indices.dtype == dtypes.int32:
3964 casted_outputs.append(
3965 indexed_slices.IndexedSlices(o.values, cast(o.indices, dtypes.int64),
3966 o.dense_shape))
3967 else:
3968 casted_outputs.append(o)
3969 return casted_outputs
3972@tf_export("math.add", "add")
3973@dispatch.register_binary_elementwise_api
3974@dispatch.add_dispatch_support
3975def add(x, y, name=None):
3976 """Returns x + y element-wise.
3978 Example usages below.
3980 Add a scalar and a list:
3982 >>> x = [1, 2, 3, 4, 5]
3983 >>> y = 1
3984 >>> tf.add(x, y)
3985 <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
3986 dtype=int32)>
3988 Note that binary `+` operator can be used instead:
3990 >>> x = tf.convert_to_tensor([1, 2, 3, 4, 5])
3991 >>> y = tf.convert_to_tensor(1)
3992 >>> x + y
3993 <tf.Tensor: shape=(5,), dtype=int32, numpy=array([2, 3, 4, 5, 6],
3994 dtype=int32)>
3996 Add a tensor and a list of same shape:
3998 >>> x = [1, 2, 3, 4, 5]
3999 >>> y = tf.constant([1, 2, 3, 4, 5])
4000 >>> tf.add(x, y)
4001 <tf.Tensor: shape=(5,), dtype=int32,
4002 numpy=array([ 2, 4, 6, 8, 10], dtype=int32)>
4004 **Warning**: If one of the inputs (`x` or `y`) is a tensor and the other is a
4005 non-tensor, the non-tensor input will adopt (or get casted to) the data type
4006 of the tensor input. This can potentially cause unwanted overflow or underflow
4007 conversion.
4009 For example,
4011 >>> x = tf.constant([1, 2], dtype=tf.int8)
4012 >>> y = [2**7 + 1, 2**7 + 2]
4013 >>> tf.add(x, y)
4014 <tf.Tensor: shape=(2,), dtype=int8, numpy=array([-126, -124], dtype=int8)>
4016 When adding two input values of different shapes, `Add` follows NumPy
4017 broadcasting rules. The two input array shapes are compared element-wise.
4018 Starting with the trailing dimensions, the two dimensions either have to be
4019 equal or one of them needs to be `1`.
4021 For example,
4023 >>> x = np.ones(6).reshape(1, 2, 1, 3)
4024 >>> y = np.ones(6).reshape(2, 1, 3, 1)
4025 >>> tf.add(x, y).shape.as_list()
4026 [2, 2, 3, 3]
4028 Another example with two arrays of different dimension.
4030 >>> x = np.ones([1, 2, 1, 4])
4031 >>> y = np.ones([3, 4])
4032 >>> tf.add(x, y).shape.as_list()
4033 [1, 2, 3, 4]
4035 The reduction version of this elementwise operation is `tf.math.reduce_sum`
4037 Args:
4038 x: A `tf.Tensor`. Must be one of the following types: bfloat16, half,
4039 float16, float32, float64, uint8, uint16, uint32, uint64, int8, int16,
4040 int32, int64, complex64, complex128, string.
4041 y: A `tf.Tensor`. Must have the same type as x.
4042 name: A name for the operation (optional)
4043 """
4044 with ops.name_scope(name, "Add", [x]) as name:
4045 x = ops.convert_to_tensor(x, name="x")
4046 y = ops.convert_to_tensor(y, dtype_hint=x.dtype.base_dtype, name="y")
4047 if x.dtype == dtypes.string:
4048 return gen_math_ops.add(x, y, name=name)
4049 else:
4050 return gen_math_ops.add_v2(x, y, name=name)
4053@tf_export("math.add_n", "add_n")
4054@dispatch.add_dispatch_support(iterable_parameters=["inputs"])
4055def add_n(inputs, name=None):
4056 """Returns the element-wise sum of a list of tensors.
4058 All inputs in the list must have the same shape. This op does not
4059 [broadcast](https://docs.scipy.org/doc/numpy-1.13.0/user/basics.broadcasting.html)
4060 its inputs. If you need broadcasting, use `tf.math.add` (or the `+` operator)
4061 instead.
4063 For example:
4065 >>> a = tf.constant([[3, 5], [4, 8]])
4066 >>> b = tf.constant([[1, 6], [2, 9]])
4067 >>> tf.math.add_n([a, b, a]).numpy()
4068 array([[ 7, 16],
4069 [10, 25]], dtype=int32)
4071 See Also:
4073 * `tf.reduce_sum(inputs, axis=0)` - This performs the same mathematical
4074 operation, but `tf.add_n` may be more efficient because it sums the
4075 tensors directly. `reduce_sum` on the other hand calls
4076 `tf.convert_to_tensor` on the list of tensors, unnecessarily stacking them
4077 into a single tensor before summing.
4079 Args:
4080 inputs: A list of `tf.Tensor` or `tf.IndexedSlices` objects, each with the
4081 same shape and type. `tf.IndexedSlices` objects will be converted into
4082 dense tensors prior to adding.
4083 name: A name for the operation (optional).
4085 Returns:
4086 A `tf.Tensor` of the same shape and type as the elements of `inputs`.
4088 Raises:
4089 ValueError: If `inputs` don't all have same shape and dtype or the shape
4090 cannot be inferred.
4091 """
4092 if not inputs or not isinstance(inputs, collections_abc.Iterable):
4093 raise ValueError("Inputs must be an iterable of at least one "
4094 "Tensor/IndexedSlices with the same dtype and shape.")
4095 inputs = indexed_slices.convert_n_to_tensor_or_indexed_slices(inputs)
4096 if not all(
4097 isinstance(x, (ops.Tensor, indexed_slices.IndexedSlices))
4098 for x in inputs):
4099 raise ValueError("Inputs must be an iterable of at least one "
4100 "Tensor/IndexedSlices with the same dtype and shape.")
4102 if len(inputs) == 1:
4103 if isinstance(inputs[0], indexed_slices.IndexedSlices):
4104 values = ops.convert_to_tensor(inputs[0])
4105 else:
4106 values = inputs[0]
4107 if name:
4108 return array_ops.identity(values, name=name)
4109 return values
4110 return gen_math_ops.add_n(inputs, name=name)
4113@tf_export("math.accumulate_n", v1=["math.accumulate_n", "accumulate_n"])
4114@dispatch.add_dispatch_support
4115@deprecation.deprecated(None, "Use `tf.math.add_n` Instead")
4116def accumulate_n(inputs, shape=None, tensor_dtype=None, name=None):
4117 """Returns the element-wise sum of a list of tensors.
4119 Optionally, pass `shape` and `tensor_dtype` for shape and type checking,
4120 otherwise, these are inferred.
4122 For example:
4124 >>> a = tf.constant([[1, 2], [3, 4]])
4125 >>> b = tf.constant([[5, 0], [0, 6]])
4126 >>> tf.math.accumulate_n([a, b, a]).numpy()
4127 array([[ 7, 4],
4128 [ 6, 14]], dtype=int32)
4130 >>> # Explicitly pass shape and type
4131 >>> tf.math.accumulate_n(
4132 ... [a, b, a], shape=[2, 2], tensor_dtype=tf.int32).numpy()
4133 array([[ 7, 4],
4134 [ 6, 14]], dtype=int32)
4136 Note: The input must be a list or tuple. This function does not handle
4137 `IndexedSlices`
4139 See Also:
4141 * `tf.reduce_sum(inputs, axis=0)` - This performe the same mathematical
4142 operation, but `tf.add_n` may be more efficient because it sums the
4143 tensors directly. `reduce_sum` on the other hand calls
4144 `tf.convert_to_tensor` on the list of tensors, unncessairly stacking them
4145 into a single tensor before summing.
4146 * `tf.add_n` - This is another python wrapper for the same Op. It has
4147 nearly identical functionality.
4149 Args:
4150 inputs: A list of `Tensor` objects, each with same shape and type.
4151 shape: Expected shape of elements of `inputs` (optional). Also controls the
4152 output shape of this op, which may affect type inference in other ops. A
4153 value of `None` means "infer the input shape from the shapes in `inputs`".
4154 tensor_dtype: Expected data type of `inputs` (optional). A value of `None`
4155 means "infer the input dtype from `inputs[0]`".
4156 name: A name for the operation (optional).
4158 Returns:
4159 A `Tensor` of same shape and type as the elements of `inputs`.
4161 Raises:
4162 ValueError: If `inputs` don't all have same shape and dtype or the shape
4163 cannot be inferred.
4164 """
4166 def _input_error():
4167 return ValueError("inputs must be a list of at least one Tensor with the "
4168 "same dtype and shape")
4170 if not inputs or not isinstance(inputs, (list, tuple)):
4171 raise _input_error()
4172 inputs = indexed_slices.convert_n_to_tensor_or_indexed_slices(inputs)
4173 if not all(isinstance(x, ops.Tensor) for x in inputs):
4174 raise _input_error()
4175 if not all(x.dtype == inputs[0].dtype for x in inputs):
4176 raise _input_error()
4177 if shape is not None:
4178 shape = tensor_shape.as_shape(shape)
4179 else:
4180 shape = tensor_shape.unknown_shape()
4181 for input_tensor in inputs:
4182 if isinstance(input_tensor, ops.Tensor):
4183 shape = shape.merge_with(input_tensor.get_shape())
4185 # tensor_dtype is for safety only; operator's output type computed in C++
4186 if tensor_dtype is not None and tensor_dtype != inputs[0].dtype:
4187 raise TypeError(
4188 f"The `tensor_dtype` argument is {tensor_dtype}, but `input` is of "
4189 f"type {inputs[0].dtype}. These must be equal. Try casting the input "
4190 f"to the desired type.")
4192 if len(inputs) == 1 and name is None:
4193 return inputs[0]
4194 elif len(inputs) == 1 and name is not None:
4195 return array_ops.identity(inputs[0], name=name)
4196 return add_n(inputs, name=name)
4199@ops.RegisterGradient("AccumulateNV2")
4200def _accumulate_n_grad(op, grad):
4201 """Same as gradient for AddN. Copies the gradient to all inputs."""
4202 # Not broadcasting.
4203 return [grad] * len(op.inputs)
4206@tf_export("math.sigmoid", "nn.sigmoid", "sigmoid")
4207@dispatch.register_unary_elementwise_api
4208@dispatch.add_dispatch_support
4209def sigmoid(x, name=None):
4210 r"""Computes sigmoid of `x` element-wise.
4212 Formula for calculating $\mathrm{sigmoid}(x) = y = 1 / (1 + \exp(-x))$.
4214 For $x \in (-\infty, \infty)$, $\mathrm{sigmoid}(x) \in (0, 1)$.
4216 Example Usage:
4218 If a positive number is large, then its sigmoid will approach to 1 since the
4219 formula will be `y = <large_num> / (1 + <large_num>)`
4221 >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
4222 >>> tf.math.sigmoid(x)
4223 <tf.Tensor: shape=(4,), dtype=float32,
4224 numpy=array([0.5, 0.7310586, 1.0, 1.0], dtype=float32)>
4226 If a negative number is large, its sigmoid will approach to 0 since the
4227 formula will be `y = 1 / (1 + <large_num>)`
4229 >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
4230 >>> tf.math.sigmoid(x)
4231 <tf.Tensor: shape=(4,), dtype=float32, numpy=
4232 array([0.0000000e+00, 1.9287499e-22, 2.6894143e-01, 0.5],
4233 dtype=float32)>
4235 Args:
4236 x: A Tensor with type `float16`, `float32`, `float64`, `complex64`, or
4237 `complex128`.
4238 name: A name for the operation (optional).
4240 Returns:
4241 A Tensor with the same type as `x`.
4243 Usage Example:
4245 >>> x = tf.constant([-128.0, 0.0, 128.0], dtype=tf.float32)
4246 >>> tf.sigmoid(x)
4247 <tf.Tensor: shape=(3,), dtype=float32,
4248 numpy=array([0. , 0.5, 1. ], dtype=float32)>
4250 @compatibility(scipy)
4251 Equivalent to scipy.special.expit
4252 @end_compatibility
4253 """
4254 with ops.name_scope(name, "Sigmoid", [x]) as name:
4255 x = ops.convert_to_tensor(x, name="x")
4256 return gen_math_ops.sigmoid(x, name=name)
4259@tf_export("math.log_sigmoid", v1=["math.log_sigmoid", "log_sigmoid"])
4260@dispatch.register_unary_elementwise_api
4261@dispatch.add_dispatch_support
4262@deprecation.deprecated_endpoints("log_sigmoid")
4263def log_sigmoid(x, name=None):
4264 """Computes log sigmoid of `x` element-wise.
4266 Specifically, `y = log(1 / (1 + exp(-x)))`. For numerical stability,
4267 we use `y = -tf.nn.softplus(-x)`.
4269 Args:
4270 x: A Tensor with type `float32` or `float64`.
4271 name: A name for the operation (optional).
4273 Returns:
4274 A Tensor with the same type as `x`.
4276 Usage Example:
4278 If a positive number is large, then its log_sigmoid will approach to 0 since
4279 the formula will be `y = log( <large_num> / (1 + <large_num>) )` which
4280 approximates to `log (1)` which is 0.
4282 >>> x = tf.constant([0.0, 1.0, 50.0, 100.0])
4283 >>> tf.math.log_sigmoid(x)
4284 <tf.Tensor: shape=(4,), dtype=float32, numpy=
4285 array([-6.9314718e-01, -3.1326169e-01, -1.9287499e-22, -0.0000000e+00],
4286 dtype=float32)>
4288 If a negative number is large, its log_sigmoid will approach to the number
4289 itself since the formula will be `y = log( 1 / (1 + <large_num>) )` which is
4290 `log (1) - log ( (1 + <large_num>) )` which approximates to `- <large_num>`
4291 that is the number itself.
4293 >>> x = tf.constant([-100.0, -50.0, -1.0, 0.0])
4294 >>> tf.math.log_sigmoid(x)
4295 <tf.Tensor: shape=(4,), dtype=float32, numpy=
4296 array([-100. , -50. , -1.3132616, -0.6931472],
4297 dtype=float32)>
4298 """
4299 with ops.name_scope(name, "LogSigmoid", [x]) as name:
4300 x = ops.convert_to_tensor(x, name="x")
4301 return gen_math_ops.neg(gen_nn_ops.softplus(-x), name=name) # pylint: disable=invalid-unary-operand-type
4304@tf_export("math.cumsum", "cumsum")
4305@dispatch.add_dispatch_support
4306def cumsum(x, axis=0, exclusive=False, reverse=False, name=None):
4307 """Compute the cumulative sum of the tensor `x` along `axis`.
4309 By default, this op performs an inclusive cumsum, which means that the first
4310 element of the input is identical to the first element of the output:
4311 For example:
4313 >>> # tf.cumsum([a, b, c]) # [a, a + b, a + b + c]
4314 >>> x = tf.constant([2, 4, 6, 8])
4315 >>> tf.cumsum(x)
4316 <tf.Tensor: shape=(4,), dtype=int32,
4317 numpy=array([ 2, 6, 12, 20], dtype=int32)>
4319 >>> # using varying `axis` values
4320 >>> y = tf.constant([[2, 4, 6, 8], [1,3,5,7]])
4321 >>> tf.cumsum(y, axis=0)
4322 <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
4323 array([[ 2, 4, 6, 8],
4324 [ 3, 7, 11, 15]], dtype=int32)>
4325 >>> tf.cumsum(y, axis=1)
4326 <tf.Tensor: shape=(2, 4), dtype=int32, numpy=
4327 array([[ 2, 6, 12, 20],
4328 [ 1, 4, 9, 16]], dtype=int32)>
4330 By setting the `exclusive` kwarg to `True`, an exclusive cumsum is performed
4331 instead:
4333 >>> # tf.cumsum([a, b, c], exclusive=True) => [0, a, a + b]
4334 >>> x = tf.constant([2, 4, 6, 8])
4335 >>> tf.cumsum(x, exclusive=True)
4336 <tf.Tensor: shape=(4,), dtype=int32,
4337 numpy=array([ 0, 2, 6, 12], dtype=int32)>
4339 By setting the `reverse` kwarg to `True`, the cumsum is performed in the
4340 opposite direction:
4342 >>> # tf.cumsum([a, b, c], reverse=True) # [a + b + c, b + c, c]
4343 >>> x = tf.constant([2, 4, 6, 8])
4344 >>> tf.cumsum(x, reverse=True)
4345 <tf.Tensor: shape=(4,), dtype=int32,
4346 numpy=array([20, 18, 14, 8], dtype=int32)>
4348 This is more efficient than using separate `tf.reverse` ops.
4349 The `reverse` and `exclusive` kwargs can also be combined:
4351 >>> # tf.cumsum([a, b, c], exclusive=True, reverse=True) # [b + c, c, 0]
4352 >>> x = tf.constant([2, 4, 6, 8])
4353 >>> tf.cumsum(x, exclusive=True, reverse=True)
4354 <tf.Tensor: shape=(4,), dtype=int32,
4355 numpy=array([18, 14, 8, 0], dtype=int32)>
4357 Args:
4358 x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
4359 `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
4360 `complex128`, `qint8`, `quint8`, `qint32`, `half`.
4361 axis: A `Tensor` of type `int32` (default: 0). Must be in the range
4362 `[-rank(x), rank(x))`.
4363 exclusive: If `True`, perform exclusive cumsum.
4364 reverse: A `bool` (default: False).
4365 name: A name for the operation (optional).
4367 Returns:
4368 A `Tensor`. Has the same type as `x`.
4369 """
4370 with ops.name_scope(name, "Cumsum", [x]) as name:
4371 x = ops.convert_to_tensor(x, name="x")
4372 return gen_math_ops.cumsum(
4373 x, axis, exclusive=exclusive, reverse=reverse, name=name)
4376@tf_export("math.cumprod", v1=["math.cumprod", "cumprod"])
4377@dispatch.add_dispatch_support
4378@deprecation.deprecated_endpoints("cumprod")
4379def cumprod(x, axis=0, exclusive=False, reverse=False, name=None):
4380 """Compute the cumulative product of the tensor `x` along `axis`.
4382 By default, this op performs an inclusive cumprod, which means that the
4383 first element of the input is identical to the first element of the output:
4385 ```python
4386 tf.math.cumprod([a, b, c]) # [a, a * b, a * b * c]
4387 ```
4389 By setting the `exclusive` kwarg to `True`, an exclusive cumprod is
4390 performed
4391 instead:
4393 ```python
4394 tf.math.cumprod([a, b, c], exclusive=True) # [1, a, a * b]
4395 ```
4397 By setting the `reverse` kwarg to `True`, the cumprod is performed in the
4398 opposite direction:
4400 ```python
4401 tf.math.cumprod([a, b, c], reverse=True) # [a * b * c, b * c, c]
4402 ```
4404 This is more efficient than using separate `tf.reverse` ops.
4405 The `reverse` and `exclusive` kwargs can also be combined:
4407 ```python
4408 tf.math.cumprod([a, b, c], exclusive=True, reverse=True) # [b * c, c, 1]
4409 ```
4411 Args:
4412 x: A `Tensor`. Must be one of the following types: `float32`, `float64`,
4413 `int64`, `int32`, `uint8`, `uint16`, `int16`, `int8`, `complex64`,
4414 `complex128`, `qint8`, `quint8`, `qint32`, `half`.
4415 axis: A `Tensor` of type `int32` (default: 0). Must be in the range
4416 `[-rank(x), rank(x))`.
4417 exclusive: If `True`, perform exclusive cumprod.
4418 reverse: A `bool` (default: False).
4419 name: A name for the operation (optional).
4421 Returns:
4422 A `Tensor`. Has the same type as `x`.
4423 """
4424 with ops.name_scope(name, "Cumprod", [x]) as name:
4425 x = ops.convert_to_tensor(x, name="x")
4426 return gen_math_ops.cumprod(
4427 x, axis, exclusive=exclusive, reverse=reverse, name=name)
4430@tf_export("math.cumulative_logsumexp", v1=["math.cumulative_logsumexp"])
4431@dispatch.add_dispatch_support
4432def cumulative_logsumexp(x, axis=0, exclusive=False, reverse=False, name=None):
4433 """Compute the cumulative log-sum-exp of the tensor `x` along `axis`.
4435 By default, this op performs an inclusive cumulative log-sum-exp, which means
4436 that the first element of the input is identical to the first element of
4437 the output.
4439 This operation is significantly more numerically stable than the equivalent
4440 tensorflow operation `tf.math.log(tf.math.cumsum(tf.math.exp(x)))`, although
4441 computes the same result given infinite numerical precision. However, note
4442 that in some cases, it may be less stable than `tf.math.reduce_logsumexp`
4443 for a given element, as it applies the "log-sum-exp trick" in a different
4444 way.
4446 More precisely, where `tf.math.reduce_logsumexp` uses the following trick:
4448 ```
4449 log(sum(exp(x))) == log(sum(exp(x - max(x)))) + max(x)
4450 ```
4452 it cannot be directly used here as there is no fast way of applying it
4453 to each prefix `x[:i]`. Instead, this function implements a prefix
4454 scan using pairwise log-add-exp, which is a commutative and associative
4455 (up to floating point precision) operator:
4457 ```
4458 log_add_exp(x, y) = log(exp(x) + exp(y))
4459 = log(1 + exp(min(x, y) - max(x, y))) + max(x, y)
4460 ```
4462 However, reducing using the above operator leads to a different computation
4463 tree (logs are taken repeatedly instead of only at the end), and the maximum
4464 is only computed pairwise instead of over the entire prefix. In general, this
4465 leads to a different and slightly less precise computation.
4467 Args:
4468 x: A `Tensor`. Must be one of the following types: `float16`, `float32`,
4469 `float64`.
4470 axis: A `Tensor` of type `int32` or `int64` (default: 0). Must be in the
4471 range `[-rank(x), rank(x))`.
4472 exclusive: If `True`, perform exclusive cumulative log-sum-exp.
4473 reverse: If `True`, performs the cumulative log-sum-exp in the reverse
4474 direction.
4475 name: A name for the operation (optional).
4477 Returns:
4478 A `Tensor`. Has the same shape and type as `x`.
4479 """
4480 with ops.name_scope(name, "CumulativeLogsumexp", [x]) as name:
4481 x = ops.convert_to_tensor(x, name="x")
4482 return gen_math_ops.cumulative_logsumexp(
4483 x, axis, exclusive=exclusive, reverse=reverse, name=name)
4486@tf_export("math.conj", v1=["math.conj", "conj"])
4487@dispatch.register_unary_elementwise_api
4488@dispatch.add_dispatch_support
4489@deprecation.deprecated_endpoints("conj")
4490def conj(x, name=None):
4491 r"""Returns the complex conjugate of a complex number.
4493 Given a tensor `x` of complex numbers, this operation returns a tensor of
4494 complex numbers that are the complex conjugate of each element in `x`. The
4495 complex numbers in `x` must be of the form \\(a + bj\\), where `a` is the
4496 real part and `b` is the imaginary part.
4498 The complex conjugate returned by this operation is of the form \\(a - bj\\).
4500 For example:
4502 >>> x = tf.constant([-2.25 + 4.75j, 3.25 + 5.75j])
4503 >>> tf.math.conj(x)
4504 <tf.Tensor: shape=(2,), dtype=complex128,
4505 numpy=array([-2.25-4.75j, 3.25-5.75j])>
4507 If `x` is real, it is returned unchanged.
4509 For example:
4511 >>> x = tf.constant([-2.25, 3.25])
4512 >>> tf.math.conj(x)
4513 <tf.Tensor: shape=(2,), dtype=float32,
4514 numpy=array([-2.25, 3.25], dtype=float32)>
4516 Args:
4517 x: `Tensor` to conjugate. Must have numeric or variant type.
4518 name: A name for the operation (optional).
4520 Returns:
4521 A `Tensor` that is the conjugate of `x` (with the same type).
4523 Raises:
4524 TypeError: If `x` is not a numeric tensor.
4526 @compatibility(numpy)
4527 Equivalent to numpy.conj.
4528 @end_compatibility
4529 """
4530 if isinstance(x, ops.Tensor):
4531 dt = x.dtype
4532 if dt.is_floating or dt.is_integer:
4533 return x
4534 with ops.name_scope(name, "Conj", [x]) as name:
4535 x = ops.convert_to_tensor(x, name="x")
4536 if x.dtype.is_complex or x.dtype == dtypes.variant:
4537 return gen_math_ops.conj(x, name=name)
4538 elif x.dtype.is_floating or x.dtype.is_integer:
4539 return x
4540 else:
4541 raise TypeError(
4542 f"Expected numeric or variant tensor, got dtype {x.dtype!r}.")
4545def reduced_shape(input_shape, axes):
4546 """Helper function for reduction ops.
4548 Args:
4549 input_shape: 1-D Tensor, the shape of the Tensor being reduced.
4550 axes: 1-D Tensor, the reduction axes.
4552 Returns:
4553 A 1-D Tensor, the output shape as if keepdims were set to True.
4554 """
4555 # TODO(allenl): Refactor `reduced_shape` to take the tensor corresponding to
4556 # `input_shape` rather than `tf.shape` of it. Then we can check if the shape
4557 # is fully defined here, which may be faster executing eagerly than running
4558 # `tf.shape` and then fetching its constant value.
4559 constant_input_shape = tensor_util.constant_value(input_shape)
4560 if constant_input_shape is not None:
4561 constant_axes = tensor_util.constant_value(axes)
4562 if constant_axes is not None:
4563 constant_axes = np.array(constant_axes, dtype=np.int32)
4564 constant_input_shape = np.array(constant_input_shape, dtype=np.int32)
4565 constant_input_shape[constant_axes] = 1
4566 return constant_input_shape
4568 # Example:
4569 # cast needed for SparseTensor reductions
4570 input_shape = cast(input_shape, dtypes.int32) # [2, 3, 5, 7]
4571 axes = cast(axes, dtypes.int32) # [1, 2]
4573 input_rank = array_ops.size(input_shape) # 4
4574 axes = (axes + input_rank) % input_rank
4575 axes_shape = array_ops.shape(axes) # [2]
4576 return gen_data_flow_ops.dynamic_stitch( # [2, 1, 1, 7]
4577 [
4578 range(input_rank), # [0, 1, 2, 3]
4579 axes
4580 ], # [1, 2]
4581 [
4582 input_shape, # [2, 3, 5, 7]
4583 array_ops.ones(axes_shape, dtype=dtypes.int32)
4584 ]) # [1, 1]
4587def _unsorted_segment_N(data, segment_ids, num_segments):
4588 """ Helper function for unsorted_segment_mean/_sqrtN.
4590 Computes the number
4591 of segment entries with 0-entries set to 1 to allow division by N.
4592 """
4593 num_segments = ops.convert_to_tensor(num_segments)
4594 # bincount doesn't support negative indices so we use unsorted_segment_sum
4595 segment_ids_shape = array_ops.shape_internal(segment_ids)
4596 ones_tensor = array_ops.ones(segment_ids_shape, dtype=data.dtype)
4597 n = gen_math_ops.unsorted_segment_sum(ones_tensor, segment_ids, num_segments)
4598 # add dimensions for all non-reduced axes
4599 broadcastable_shape = array_ops.concat(
4600 [num_segments[array_ops.newaxis],
4601 array_ops.ones([array_ops.rank(data)
4602 - array_ops.rank(segment_ids)],
4603 dtype=num_segments.dtype)],
4604 axis=0)
4605 n = array_ops.reshape(n, broadcastable_shape)
4606 return gen_math_ops.maximum(n, 1)
4609@tf_export(
4610 "math.unsorted_segment_mean",
4611 v1=["math.unsorted_segment_mean", "unsorted_segment_mean"])
4612@dispatch.add_dispatch_support
4613@deprecation.deprecated_endpoints("unsorted_segment_mean")
4614def unsorted_segment_mean(data, segment_ids, num_segments, name=None):
4615 r"""Computes the mean along segments of a tensor.
4617 Read [the section on
4618 segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4619 for an explanation of segments.
4621 This operator is similar to the `tf.math.unsorted_segment_sum` operator.
4622 Instead of computing the sum over segments, it computes the mean of all
4623 entries belonging to a segment such that:
4625 \\(output_i = 1/N_i \sum_{j...} data[j...]\\) where the sum is over tuples
4626 `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the number of
4627 occurrences of id \\i\\.
4629 If there is no entry for a given segment ID `i`, it outputs 0.
4631 If the given segment ID `i` is negative, the value is dropped and will not
4632 be added to the sum of the segment.
4634 Caution: On CPU, values in `segment_ids` are always validated to be less than
4635 `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
4636 does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
4637 result in safe but unspecified behavior, which may include ignoring
4638 out-of-bound indices or outputting a tensor with a 0 stored in the first
4639 dimension of its shape if `num_segments` is 0.
4641 Args:
4642 data: A `Tensor` with floating point or complex dtype.
4643 segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
4644 The values must be less than `num_segments`.
4645 The values are always validated to be in range on CPU,
4646 never validated on GPU.
4647 num_segments: An integer scalar `Tensor`. The number of distinct segment
4648 IDs.
4649 name: A name for the operation (optional).
4651 Returns:
4652 A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
4653 dimensions, which are replaced with a single dimension which has size
4654 `num_segments`.
4655 """
4656 with ops.name_scope(name, "UnsortedSegmentMean"):
4657 data = ops.convert_to_tensor(data)
4658 segment_ids = ops.convert_to_tensor(segment_ids)
4659 N = _unsorted_segment_N(data, segment_ids, num_segments)
4660 summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
4661 return summed / N
4664@tf_export(
4665 "math.unsorted_segment_sqrt_n",
4666 v1=["math.unsorted_segment_sqrt_n", "unsorted_segment_sqrt_n"])
4667@dispatch.add_dispatch_support
4668@deprecation.deprecated_endpoints("unsorted_segment_sqrt_n")
4669def unsorted_segment_sqrt_n(data, segment_ids, num_segments, name=None):
4670 r"""Computes the sum along segments of a tensor divided by the sqrt(N).
4672 Read [the section on
4673 segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4674 for an explanation of segments.
4676 This operator is similar to the `tf.math.unsorted_segment_sum` operator.
4677 Additionally to computing the sum over segments, it divides the results by
4678 sqrt(N).
4680 \\(output_i = 1/sqrt(N_i) \sum_{j...} data[j...]\\) where the sum is over
4681 tuples `j...` such that `segment_ids[j...] == i` with \\N_i\\ being the
4682 number of occurrences of id \\i\\.
4684 If there is no entry for a given segment ID `i`, it outputs 0.
4686 Note that this op only supports floating point and complex dtypes,
4687 due to tf.sqrt only supporting these types.
4689 If the given segment ID `i` is negative, the value is dropped and will not
4690 be added to the sum of the segment.
4692 Caution: On CPU, values in `segment_ids` are always validated to be less than
4693 `num_segments`, and an error is thrown for out-of-bound indices. On GPU, this
4694 does not throw an error for out-of-bound indices. On Gpu, out-of-bound indices
4695 result in safe but unspecified behavior, which may include ignoring
4696 out-of-bound indices or outputting a tensor with a 0 stored in the first
4697 dimension of its shape if `num_segments` is 0.
4699 Args:
4700 data: A `Tensor` with floating point or complex dtype.
4701 segment_ids: An integer tensor whose shape is a prefix of `data.shape`.
4702 The values must be in the range `[0, num_segments)`.
4703 The values are always validated to be in range on CPU,
4704 never validated on GPU.
4705 num_segments: An integer scalar `Tensor`. The number of distinct segment
4706 IDs.
4707 name: A name for the operation (optional).
4709 Returns:
4710 A `Tensor`. Has same shape as data, except for the first `segment_ids.rank`
4711 dimensions, which are replaced with a single dimension which has size
4712 `num_segments`.
4713 """
4714 with ops.name_scope(name, "UnsortedSegmentSqrtN"):
4715 data = ops.convert_to_tensor(data)
4716 segment_ids = ops.convert_to_tensor(segment_ids)
4717 N = _unsorted_segment_N(data, segment_ids, num_segments)
4718 summed = gen_math_ops.unsorted_segment_sum(data, segment_ids, num_segments)
4719 return summed / gen_math_ops.sqrt(N)
4722@tf_export(v1=["sparse.segment_sum", "sparse_segment_sum"])
4723@deprecation.deprecated_endpoints("sparse_segment_sum")
4724def sparse_segment_sum(data,
4725 indices,
4726 segment_ids,
4727 name=None,
4728 num_segments=None):
4729 r"""Computes the sum along sparse segments of a tensor.
4731 Read [the section on
4732 segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4733 for an explanation of segments.
4735 Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
4736 first dimension, selecting a subset of dimension 0, specified by `indices`.
4737 `segment_ids` is allowed to have missing ids, in which case the output will
4738 be zeros at those indices. In those cases `num_segments` is used to determine
4739 the size of the output.
4741 For example:
4743 ```python
4744 c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
4746 # Select two rows, one segment.
4747 tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
4748 # => [[0 0 0 0]]
4750 # Select two rows, two segment.
4751 tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
4752 # => [[ 1 2 3 4]
4753 # [-1 -2 -3 -4]]
4755 # With missing segment ids.
4756 tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
4757 num_segments=4)
4758 # => [[ 1 2 3 4]
4759 # [ 0 0 0 0]
4760 # [-1 -2 -3 -4]
4761 # [ 0 0 0 0]]
4763 # Select all rows, two segments.
4764 tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
4765 # => [[0 0 0 0]
4766 # [5 6 7 8]]
4768 # Which is equivalent to:
4769 tf.math.segment_sum(c, tf.constant([0, 0, 1]))
4770 ```
4772 Args:
4773 data: A `Tensor` with data that will be assembled in the output.
4774 indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4775 `segment_ids`.
4776 segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4777 should be sorted and can be repeated.
4778 name: A name for the operation (optional).
4779 num_segments: An optional int32 scalar. Indicates the size of the output
4780 `Tensor`.
4782 Returns:
4783 A `tensor` of the shape as data, except for dimension 0 which
4784 has size `k`, the number of segments specified via `num_segments` or
4785 inferred for the last element in `segments_ids`.
4786 """
4787 if num_segments is not None:
4788 return gen_math_ops.sparse_segment_sum_with_num_segments(
4789 data=data,
4790 indices=indices,
4791 segment_ids=segment_ids,
4792 num_segments=num_segments,
4793 name=name)
4794 else:
4795 return gen_math_ops.sparse_segment_sum(
4796 data=data, indices=indices, segment_ids=segment_ids, name=name)
4799@tf_export("sparse.segment_sum", v1=[])
4800def sparse_segment_sum_v2(data,
4801 indices,
4802 segment_ids,
4803 num_segments=None,
4804 name=None):
4805 r"""Computes the sum along sparse segments of a tensor.
4807 Read [the section on
4808 segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4809 for an explanation of segments.
4811 Like `tf.math.segment_sum`, but `segment_ids` can have rank less than `data`'s
4812 first dimension, selecting a subset of dimension 0, specified by `indices`.
4813 `segment_ids` is allowed to have missing ids, in which case the output will
4814 be zeros at those indices. In those cases `num_segments` is used to determine
4815 the size of the output.
4817 For example:
4819 ```python
4820 c = tf.constant([[1,2,3,4], [-1,-2,-3,-4], [5,6,7,8]])
4822 # Select two rows, one segment.
4823 tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 0]))
4824 # => [[0 0 0 0]]
4826 # Select two rows, two segment.
4827 tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 1]))
4828 # => [[ 1 2 3 4]
4829 # [-1 -2 -3 -4]]
4831 # With missing segment ids.
4832 tf.sparse.segment_sum(c, tf.constant([0, 1]), tf.constant([0, 2]),
4833 num_segments=4)
4834 # => [[ 1 2 3 4]
4835 # [ 0 0 0 0]
4836 # [-1 -2 -3 -4]
4837 # [ 0 0 0 0]]
4839 # Select all rows, two segments.
4840 tf.sparse.segment_sum(c, tf.constant([0, 1, 2]), tf.constant([0, 0, 1]))
4841 # => [[0 0 0 0]
4842 # [5 6 7 8]]
4844 # Which is equivalent to:
4845 tf.math.segment_sum(c, tf.constant([0, 0, 1]))
4846 ```
4848 Args:
4849 data: A `Tensor` with data that will be assembled in the output.
4850 indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4851 `segment_ids`.
4852 segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4853 should be sorted and can be repeated.
4854 num_segments: An optional int32 scalar. Indicates the size of the output
4855 `Tensor`.
4856 name: A name for the operation (optional).
4858 Returns:
4859 A `tensor` of the shape as data, except for dimension 0 which
4860 has size `k`, the number of segments specified via `num_segments` or
4861 inferred for the last element in `segments_ids`.
4862 """
4863 return sparse_segment_sum(
4864 data, indices, segment_ids, name=name, num_segments=num_segments)
4867@tf_export(v1=["sparse.segment_mean", "sparse_segment_mean"])
4868@deprecation.deprecated_endpoints("sparse_segment_mean")
4869def sparse_segment_mean(data,
4870 indices,
4871 segment_ids,
4872 name=None,
4873 num_segments=None):
4874 r"""Computes the mean along sparse segments of a tensor.
4876 Read [the section on
4877 segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4878 for an explanation of segments.
4880 Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
4881 `data`'s first dimension, selecting a subset of dimension 0, specified by
4882 `indices`.
4883 `segment_ids` is allowed to have missing ids, in which case the output will
4884 be zeros at those indices. In those cases `num_segments` is used to determine
4885 the size of the output.
4887 Args:
4888 data: A `Tensor` with data that will be assembled in the output.
4889 indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4890 `segment_ids`.
4891 segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4892 should be sorted and can be repeated.
4893 name: A name for the operation (optional).
4894 num_segments: An optional int32 scalar. Indicates the size of the output
4895 `Tensor`.
4897 Returns:
4898 A `tensor` of the shape as data, except for dimension 0 which
4899 has size `k`, the number of segments specified via `num_segments` or
4900 inferred for the last element in `segments_ids`.
4901 """
4902 if num_segments is not None:
4903 return gen_math_ops.sparse_segment_mean_with_num_segments(
4904 data=data,
4905 indices=indices,
4906 segment_ids=segment_ids,
4907 num_segments=num_segments,
4908 name=name)
4909 else:
4910 return gen_math_ops.sparse_segment_mean(
4911 data=data, indices=indices, segment_ids=segment_ids, name=name)
4914@tf_export("sparse.segment_mean", v1=[])
4915def sparse_segment_mean_v2(data,
4916 indices,
4917 segment_ids,
4918 num_segments=None,
4919 name=None):
4920 r"""Computes the mean along sparse segments of a tensor.
4922 Read [the section on
4923 segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
4924 for an explanation of segments.
4926 Like `tf.math.segment_mean`, but `segment_ids` can have rank less than
4927 `data`'s first dimension, selecting a subset of dimension 0, specified by
4928 `indices`.
4929 `segment_ids` is allowed to have missing ids, in which case the output will
4930 be zeros at those indices. In those cases `num_segments` is used to determine
4931 the size of the output.
4933 Args:
4934 data: A `Tensor` with data that will be assembled in the output.
4935 indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4936 `segment_ids`.
4937 segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4938 should be sorted and can be repeated.
4939 num_segments: An optional int32 scalar. Indicates the size of the output
4940 `Tensor`.
4941 name: A name for the operation (optional).
4943 Returns:
4944 A `tensor` of the shape as data, except for dimension 0 which
4945 has size `k`, the number of segments specified via `num_segments` or
4946 inferred for the last element in `segments_ids`.
4947 """
4948 return sparse_segment_mean(
4949 data, indices, segment_ids, name=name, num_segments=num_segments)
4952@tf_export(v1=["sparse.segment_sqrt_n", "sparse_segment_sqrt_n"])
4953@deprecation.deprecated_endpoints("sparse_segment_sqrt_n")
4954def sparse_segment_sqrt_n(data,
4955 indices,
4956 segment_ids,
4957 name=None,
4958 num_segments=None):
4959 r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4961 `N` is the size of the segment being reduced.
4963 Args:
4964 data: A `Tensor` with data that will be assembled in the output.
4965 indices: A 1-D `Tensor` with indices into `data`. Has same rank as
4966 `segment_ids`.
4967 segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
4968 should be sorted and can be repeated.
4969 name: A name for the operation (optional).
4970 num_segments: An optional int32 scalar. Indicates the size of the output
4971 `Tensor`.
4973 Returns:
4974 A `tensor` of the shape as data, except for dimension 0 which
4975 has size `k`, the number of segments specified via `num_segments` or
4976 inferred for the last element in `segments_ids`.
4977 """
4978 if num_segments is not None:
4979 return gen_math_ops.sparse_segment_sqrt_n_with_num_segments(
4980 data=data,
4981 indices=indices,
4982 segment_ids=segment_ids,
4983 num_segments=num_segments,
4984 name=name)
4985 else:
4986 return gen_math_ops.sparse_segment_sqrt_n(
4987 data=data, indices=indices, segment_ids=segment_ids, name=name)
4990@tf_export("sparse.segment_sqrt_n", v1=[])
4991def sparse_segment_sqrt_n_v2(data,
4992 indices,
4993 segment_ids,
4994 num_segments=None,
4995 name=None):
4996 r"""Computes the sum along sparse segments of a tensor divided by the sqrt(N).
4998 Read [the section on
4999 segmentation](https://www.tensorflow.org/versions/r2.0/api_docs/python/tf/math#about_segmentation)
5000 for an explanation of segments.
5002 Like `tf.sparse.segment_mean`, but instead of dividing by the size of the
5003 segment, `N`, divide by `sqrt(N)` instead.
5005 Args:
5006 data: A `Tensor` with data that will be assembled in the output.
5007 indices: A 1-D `Tensor` with indices into `data`. Has same rank as
5008 `segment_ids`.
5009 segment_ids: A 1-D `Tensor` with indices into the output `Tensor`. Values
5010 should be sorted and can be repeated.
5011 num_segments: An optional int32 scalar. Indicates the size of the output
5012 `Tensor`.
5013 name: A name for the operation (optional).
5015 Returns:
5016 A `tensor` of the shape as data, except for dimension 0 which
5017 has size `k`, the number of segments specified via `num_segments` or
5018 inferred for the last element in `segments_ids`.
5019 """
5020 return sparse_segment_sqrt_n(
5021 data, indices, segment_ids, name=name, num_segments=num_segments)
5024@tf_export("tensordot", "linalg.tensordot")
5025@dispatch.add_dispatch_support
5026def tensordot(a, b, axes, name=None):
5027 r"""Tensor contraction of a and b along specified axes and outer product.
5029 Tensordot (also known as tensor contraction) sums the product of elements
5030 from `a` and `b` over the indices specified by `axes`.
5032 This operation corresponds to `numpy.tensordot(a, b, axes)`.
5034 Example 1: When `a` and `b` are matrices (order 2), the case `axes=1`
5035 is equivalent to matrix multiplication.
5037 Example 2: When `a` and `b` are matrices (order 2), the case
5038 `axes = [[1], [0]]` is equivalent to matrix multiplication.
5040 Example 3: When `a` and `b` are matrices (order 2), the case `axes=0` gives
5041 the outer product, a tensor of order 4.
5043 Example 4: Suppose that \\(a_{ijk}\\) and \\(b_{lmn}\\) represent two
5044 tensors of order 3. Then, `contract(a, b, [[0], [2]])` is the order 4 tensor
5045 \\(c_{jklm}\\) whose entry
5046 corresponding to the indices \\((j,k,l,m)\\) is given by:
5048 \\( c_{jklm} = \sum_i a_{ijk} b_{lmi} \\).
5050 In general, `order(c) = order(a) + order(b) - 2*len(axes[0])`.
5052 Args:
5053 a: `Tensor` of type `float32` or `float64`.
5054 b: `Tensor` with the same type as `a`.
5055 axes: Either a scalar `N`, or a list or an `int32` `Tensor` of shape [2, k].
5056 If axes is a scalar, sum over the last N axes of a and the first N axes of
5057 b in order. If axes is a list or `Tensor` the first and second row contain
5058 the set of unique integers specifying axes along which the contraction is
5059 computed, for `a` and `b`, respectively. The number of axes for `a` and
5060 `b` must be equal. If `axes=0`, computes the outer product between `a` and
5061 `b`.
5062 name: A name for the operation (optional).
5064 Returns:
5065 A `Tensor` with the same type as `a`.
5067 Raises:
5068 ValueError: If the shapes of `a`, `b`, and `axes` are incompatible.
5069 IndexError: If the values in axes exceed the rank of the corresponding
5070 tensor.
5071 """
5073 def _tensordot_reshape(a, axes, flipped=False):
5074 """Helper method to perform transpose and reshape for contraction op.
5076 This method is helpful in reducing `math_ops.tensordot` to `math_ops.matmul`
5077 using `array_ops.transpose` and `array_ops.reshape`. The method takes a
5078 tensor and performs the correct transpose and reshape operation for a given
5079 set of indices. It returns the reshaped tensor as well as a list of indices
5080 necessary to reshape the tensor again after matrix multiplication.
5082 Args:
5083 a: `Tensor`.
5084 axes: List or `int32` `Tensor` of unique indices specifying valid axes of
5085 `a`.
5086 flipped: An optional `bool`. Defaults to `False`. If `True`, the method
5087 assumes that `a` is the second argument in the contraction operation.
5089 Returns:
5090 A tuple `(reshaped_a, free_dims, free_dims_static)` where `reshaped_a` is
5091 the tensor `a` reshaped to allow contraction via `matmul`, `free_dims` is
5092 either a list of integers or an `int32` `Tensor`, depending on whether
5093 the shape of a is fully specified, and free_dims_static is either a list
5094 of integers and None values, or None, representing the inferred
5095 static shape of the free dimensions
5096 """
5097 if a.get_shape().is_fully_defined() and isinstance(axes, (list, tuple)):
5098 shape_a = a.get_shape().as_list()
5099 axes = [i if i >= 0 else i + len(shape_a) for i in axes]
5100 free = [i for i in builtins.range(len(shape_a)) if i not in axes]
5101 free_dims = [shape_a[i] for i in free]
5102 prod_free = int(np.prod([shape_a[i] for i in free]))
5103 prod_axes = int(np.prod([shape_a[i] for i in axes]))
5104 perm = list(axes) + free if flipped else free + list(axes)
5105 new_shape = [prod_axes, prod_free] if flipped else [prod_free, prod_axes]
5106 if (perm != np.arange(len(shape_a))).any():
5107 a_trans = array_ops.transpose(a, perm)
5108 else:
5109 a_trans = a
5110 if a_trans.get_shape().as_list() != new_shape:
5111 reshaped_a = array_ops.reshape(a_trans, new_shape)
5112 else:
5113 reshaped_a = a_trans
5114 return reshaped_a, free_dims, free_dims
5115 else:
5116 if a.get_shape().ndims is not None and isinstance(axes, (list, tuple)):
5117 shape_a = a.get_shape().as_list()
5118 axes = [i if i >= 0 else i + len(shape_a) for i in axes]
5119 free = [i for i in builtins.range(len(shape_a)) if i not in axes]
5120 axes_dims = [shape_a[i] for i in axes]
5121 free_dims = [shape_a[i] for i in free]
5122 free_dims_static = free_dims
5123 axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
5124 free = ops.convert_to_tensor(free, dtype=dtypes.int32, name="free")
5125 shape_a = array_ops.shape(a)
5126 else:
5127 free_dims_static = None
5128 shape_a = array_ops.shape(a)
5129 rank_a = array_ops.rank(a)
5130 axes = ops.convert_to_tensor(axes, dtype=dtypes.int32, name="axes")
5131 axes = array_ops.where(axes >= 0, axes, axes + rank_a)
5132 free, _ = gen_array_ops.list_diff(range(rank_a), axes, dtypes.int32)
5133 free_dims = array_ops.gather(shape_a, free)
5134 axes_dims = array_ops.gather(shape_a, axes)
5135 prod_free_dims = reduce_prod(free_dims)
5136 prod_axes_dims = reduce_prod(axes_dims)
5137 if flipped:
5138 perm = array_ops.concat([axes, free], 0)
5139 new_shape = array_ops_stack.stack([prod_axes_dims, prod_free_dims])
5140 else:
5141 perm = array_ops.concat([free, axes], 0)
5142 new_shape = array_ops_stack.stack([prod_free_dims, prod_axes_dims])
5143 reshaped_a = array_ops.reshape(array_ops.transpose(a, perm), new_shape)
5144 return reshaped_a, free_dims, free_dims_static
5146 def _tensordot_axes(a, axes):
5147 """Generates two sets of contraction axes for the two tensor arguments."""
5148 a_shape = a.get_shape()
5149 if isinstance(axes, compat.integral_types):
5150 if axes < 0:
5151 raise ValueError(f"`axes` must be at least 0. Received: {axes}.")
5152 if a_shape.ndims is not None:
5153 if axes > a_shape.ndims:
5154 raise ValueError(f"`axes` must not be larger than the number of "
5155 f"dimensions of tensor {a}. Received {axes}, vs "
5156 f"tensor dimensions {a_shape.ndims}.")
5157 return (list(builtins.range(a_shape.ndims - axes,
5158 a_shape.ndims)), list(builtins.range(axes)))
5159 else:
5160 rank = array_ops.rank(a)
5161 return (range(rank - axes, rank,
5162 dtype=dtypes.int32), range(axes, dtype=dtypes.int32))
5163 elif isinstance(axes, (list, tuple)):
5164 if len(axes) != 2:
5165 raise ValueError(
5166 f"`axes` must be an integer or have length 2. Received {axes}.")
5167 a_axes = axes[0]
5168 b_axes = axes[1]
5169 if isinstance(a_axes, compat.integral_types) and \
5170 isinstance(b_axes, compat.integral_types):
5171 a_axes = [a_axes]
5172 b_axes = [b_axes]
5173 if len(a_axes) != len(b_axes):
5174 raise ValueError(f"Different number of contraction axes `a` and `b`, "
5175 f"{len(a_axes)} != {len(b_axes)}.")
5176 return a_axes, b_axes
5177 else:
5178 axes = ops.convert_to_tensor(axes, name="axes", dtype=dtypes.int32)
5179 return axes[0], axes[1]
5181 with ops.name_scope(name, "Tensordot", [a, b, axes]) as name:
5182 a = ops.convert_to_tensor(a, name="a")
5183 b = ops.convert_to_tensor(b, name="b")
5184 a_axes, b_axes = _tensordot_axes(a, axes)
5185 a_reshape, a_free_dims, a_free_dims_static = _tensordot_reshape(a, a_axes)
5186 b_reshape, b_free_dims, b_free_dims_static = _tensordot_reshape(
5187 b, b_axes, True)
5188 ab_matmul = matmul(a_reshape, b_reshape)
5189 if isinstance(a_free_dims, list) and isinstance(b_free_dims, list):
5190 if (ab_matmul.get_shape().is_fully_defined() and
5191 ab_matmul.get_shape().as_list() == a_free_dims + b_free_dims):
5192 return ab_matmul
5193 else:
5194 return array_ops.reshape(
5195 ab_matmul, a_free_dims + b_free_dims, name=name)
5196 else:
5197 a_free_dims = ops.convert_to_tensor(a_free_dims, dtype=dtypes.int32)
5198 b_free_dims = ops.convert_to_tensor(b_free_dims, dtype=dtypes.int32)
5199 product = array_ops.reshape(
5200 ab_matmul, array_ops.concat([a_free_dims, b_free_dims], 0), name=name)
5201 if a_free_dims_static is not None and b_free_dims_static is not None:
5202 product.set_shape(a_free_dims_static + b_free_dims_static)
5203 return product
5206@tf_export("math.polyval")
5207@dispatch.add_dispatch_support
5208def polyval(coeffs, x, name=None):
5209 r"""Computes the elementwise value of a polynomial.
5211 If `x` is a tensor and `coeffs` is a list n + 1 tensors,
5212 this function returns the value of the n-th order polynomial
5214 `p(x) = coeffs[n-1] + coeffs[n-2] * x + ... + coeffs[0] * x**(n-1)`
5216 evaluated using Horner's method, i.e.
5218 ```python
5219 p(x) = coeffs[n-1] + x * (coeffs[n-2] + ... + x * (coeffs[1] + x * coeffs[0]))
5220 ```
5222 Usage Example:
5224 >>> coefficients = [1.0, 2.5, -4.2]
5225 >>> x = 5.0
5226 >>> y = tf.math.polyval(coefficients, x)
5227 >>> y
5228 <tf.Tensor: shape=(), dtype=float32, numpy=33.3>
5230 Usage Example:
5232 >>> tf.math.polyval([2, 1, 0], 3) # evaluates 2 * (3**2) + 1 * (3**1) + 0 * (3**0)
5233 <tf.Tensor: shape=(), dtype=int32, numpy=21>
5235 `tf.math.polyval` can also be used in polynomial regression. Taking
5236 advantage of this function can facilitate writing a polynomial equation
5237 as compared to explicitly writing it out, especially for higher degree
5238 polynomials.
5240 >>> x = tf.constant(3)
5241 >>> theta1 = tf.Variable(2)
5242 >>> theta2 = tf.Variable(1)
5243 >>> theta3 = tf.Variable(0)
5244 >>> tf.math.polyval([theta1, theta2, theta3], x)
5245 <tf.Tensor: shape=(), dtype=int32, numpy=21>
5247 Args:
5248 coeffs: A list of `Tensor` representing the coefficients of the polynomial.
5249 x: A `Tensor` representing the variable of the polynomial.
5250 name: A name for the operation (optional).
5252 Returns:
5253 A `tensor` of the shape as the expression p(x) with usual broadcasting
5254 rules for element-wise addition and multiplication applied.
5256 @compatibility(numpy)
5257 Equivalent to numpy.polyval.
5258 @end_compatibility
5259 """
5260 if not isinstance(coeffs, list):
5261 raise ValueError(
5262 f"Argument coeffs must be list type. Received type {type(coeffs)}.")
5264 with ops.name_scope(name, "polyval", nest.flatten(coeffs) + [x]) as name:
5265 x = ops.convert_to_tensor(x, name="x")
5266 if len(coeffs) < 1:
5267 return array_ops.zeros_like(x, name=name)
5268 coeffs = [
5269 ops.convert_to_tensor(coeff, name=("coeff_%d" % index))
5270 for index, coeff in enumerate(coeffs)
5271 ]
5272 p = coeffs[0]
5273 for c in coeffs[1:]:
5274 p = c + p * x
5275 return p
5278@tf_export("math.reciprocal_no_nan")
5279@dispatch.register_unary_elementwise_api
5280@dispatch.add_dispatch_support
5281def reciprocal_no_nan(x, name=None):
5282 """Performs a safe reciprocal operation, element wise.
5284 If a particular element is zero, the reciprocal for that element is
5285 also set to zero.
5287 For example:
5288 ```python
5289 x = tf.constant([2.0, 0.5, 0, 1], dtype=tf.float32)
5290 tf.math.reciprocal_no_nan(x) # [ 0.5, 2, 0.0, 1.0 ]
5291 ```
5293 Args:
5294 x: A `Tensor` of type `float16`, `float32`, `float64` `complex64` or
5295 `complex128`.
5296 name: A name for the operation (optional).
5298 Returns:
5299 A `Tensor` of same shape and type as `x`.
5301 Raises:
5302 TypeError: x must be of a valid dtype.
5304 """
5306 with ops.name_scope(name, "reciprocal_no_nan", [x]) as scope:
5307 x = ops.convert_to_tensor(x, name="x")
5308 one = constant_op.constant(1, dtype=x.dtype.base_dtype, name="one")
5309 return gen_math_ops.div_no_nan(one, x, name=scope)
5312@tf_export("math.xdivy")
5313@dispatch.register_binary_elementwise_api
5314@dispatch.add_dispatch_support
5315def xdivy(x, y, name=None):
5316 """Computes `x / y`.
5318 Given `x` and `y`, computes `x / y`. This function safely returns
5319 zero when `x = 0`, no matter what the value of `y` is.
5321 Example:
5323 >>> tf.math.xdivy(1., 2.)
5324 <tf.Tensor: shape=(), dtype=float32, numpy=0.5>
5325 >>> tf.math.xdivy(0., 1.)
5326 <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
5327 >>> tf.math.xdivy(0., 0.)
5328 <tf.Tensor: shape=(), dtype=float32, numpy=0.0>
5329 >>> tf.math.xdivy(1., 0.)
5330 <tf.Tensor: shape=(), dtype=float32, numpy=inf>
5332 Args:
5333 x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
5334 `complex128`
5335 y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
5336 `complex128`
5337 name: A name for the operation (optional).
5339 Returns:
5340 `x / y`.
5341 """
5342 with ops.name_scope(name, "xdivy", [x]):
5343 return gen_math_ops.xdivy(x, y)
5346@tf_export("math.xlog1py")
5347@dispatch.register_binary_elementwise_api
5348@dispatch.add_dispatch_support
5349def xlog1py(x, y, name=None):
5350 r"""Compute x * log1p(y).
5352 Given `x` and `y`, compute `x * log1p(y)`. This function safely returns
5353 zero when `x = 0`, no matter what the value of `y` is.
5355 Example:
5357 >>> tf.math.xlog1py(0., 1.)
5358 <tf.Tensor: shape=(), dtype=float32, numpy=0.>
5359 >>> tf.math.xlog1py(1., 1.)
5360 <tf.Tensor: shape=(), dtype=float32, numpy=0.6931472>
5361 >>> tf.math.xlog1py(2., 2.)
5362 <tf.Tensor: shape=(), dtype=float32, numpy=2.1972246>
5363 >>> tf.math.xlog1py(0., -1.)
5364 <tf.Tensor: shape=(), dtype=float32, numpy=0.>
5366 Args:
5367 x: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
5368 `complex128`
5369 y: A `tf.Tensor` of type `half`, `float32`, `float64`, `complex64`,
5370 `complex128`
5371 name: A name for the operation (optional).
5373 Returns:
5374 `x * log1p(y)`.
5376 @compatibility(scipy)
5377 Equivalent to scipy.special.xlog1py
5378 @end_compatibility
5379 """
5380 with ops.name_scope(name, "xlog1py", [x]):
5381 return gen_math_ops.xlog1py(x, y)
5384@tf_export("math.erfinv")
5385@dispatch.register_unary_elementwise_api
5386@dispatch.add_dispatch_support
5387def erfinv(x, name=None):
5388 """Compute inverse error function.
5390 Given `x`, compute the inverse error function of `x`. This function
5391 is the inverse of `tf.math.erf`.
5393 Args:
5394 x: `Tensor` with type `float` or `double`.
5395 name: A name for the operation (optional).
5396 Returns:
5397 Inverse error function of `x`.
5398 """
5399 with ops.name_scope(name, "erfinv", [x]):
5400 return gen_math_ops.erfinv(x)
5403@tf_export("math.ndtri")
5404@dispatch.register_unary_elementwise_api
5405@dispatch.add_dispatch_support
5406def ndtri(x, name=None):
5407 """Compute quantile of Standard Normal.
5409 Args:
5410 x: `Tensor` with type `float` or `double`.
5411 name: A name for the operation (optional).
5412 Returns:
5413 Inverse error function of `x`.
5414 """
5415 with ops.name_scope(name, "ndtri", [x]):
5416 return gen_math_ops.ndtri(x)
5419@tf_export("math.erfcinv")
5420@dispatch.register_unary_elementwise_api
5421@dispatch.add_dispatch_support
5422def erfcinv(x, name=None):
5423 """Computes the inverse of complementary error function.
5425 Given `x`, compute the inverse complementary error function of `x`.
5426 This function is the inverse of `tf.math.erfc`, and is defined on
5427 `[0, 2]`.
5429 >>> tf.math.erfcinv([0., 0.2, 1., 1.5, 2.])
5430 <tf.Tensor: shape=(5,), dtype=float32, numpy=
5431 array([ inf, 0.9061935, -0. , -0.4769363, -inf],
5432 dtype=float32)>
5434 Args:
5435 x: `Tensor` with type `float` or `double`.
5436 name: A name for the operation (optional).
5437 Returns:
5438 Inverse complementary error function of `x`.
5440 @compatibility(numpy)
5441 Equivalent to scipy.special.erfcinv
5442 @end_compatibility
5443 """
5444 with ops.name_scope(name, "erfcinv", [x]):
5445 x = ops.convert_to_tensor(x, name="start")
5446 return -ndtri(0.5 * x) * np.sqrt(0.5)
5449@tf_export("math.ceil", v1=["math.ceil", "ceil"])
5450@dispatch.register_unary_elementwise_api
5451@dispatch.add_dispatch_support
5452@deprecation.deprecated_endpoints("ceil")
5453def ceil(x, name=None):
5454 """Return the ceiling of the input, element-wise.
5456 For example:
5458 >>> tf.math.ceil([-1.7, -1.5, -0.2, 0.2, 1.5, 1.7, 2.0])
5459 <tf.Tensor: shape=(7,), dtype=float32,
5460 numpy=array([-1., -1., -0., 1., 2., 2., 2.], dtype=float32)>
5462 Args:
5463 x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5464 `float32`, `float64`. `int32`
5465 name: A name for the operation (optional).
5467 Returns:
5468 A `tf.Tensor`. Has the same type as `x`.
5470 @compatibility(numpy)
5471 Equivalent to np.ceil
5472 @end_compatibility
5473 """
5474 return gen_math_ops.ceil(x, name)
5477@tf_export("math.sqrt", "sqrt")
5478@dispatch.register_unary_elementwise_api
5479@dispatch.add_dispatch_support
5480def sqrt(x, name=None): # pylint: disable=redefined-builtin
5481 r"""Computes element-wise square root of the input tensor.
5483 Note: This operation does not support integer types.
5485 >>> x = tf.constant([[4.0], [16.0]])
5486 >>> tf.sqrt(x)
5487 <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
5488 array([[2.],
5489 [4.]], dtype=float32)>
5490 >>> y = tf.constant([[-4.0], [16.0]])
5491 >>> tf.sqrt(y)
5492 <tf.Tensor: shape=(2, 1), dtype=float32, numpy=
5493 array([[nan],
5494 [ 4.]], dtype=float32)>
5495 >>> z = tf.constant([[-1.0], [16.0]], dtype=tf.complex128)
5496 >>> tf.sqrt(z)
5497 <tf.Tensor: shape=(2, 1), dtype=complex128, numpy=
5498 array([[0.0+1.j],
5499 [4.0+0.j]])>
5501 Note: In order to support complex type, please provide an input tensor
5502 of `complex64` or `complex128`.
5504 Args:
5505 x: A `tf.Tensor` of type `bfloat16`, `half`, `float32`, `float64`,
5506 `complex64`, `complex128`
5507 name: A name for the operation (optional).
5509 Returns:
5510 A `tf.Tensor` of same size, type and sparsity as `x`.
5511 """
5512 return gen_math_ops.sqrt(x, name)
5515# pylint: disable=g-docstring-has-escape
5516@tf_export("math.exp", "exp")
5517@dispatch.register_unary_elementwise_api
5518@dispatch.add_dispatch_support
5519def exp(x, name=None):
5520 r"""Computes exponential of x element-wise. \\(y = e^x\\).
5522 This function computes the exponential of the input tensor element-wise.
5523 i.e. `math.exp(x)` or \\(e^x\\), where `x` is the input tensor.
5524 \\(e\\) denotes Euler's number and is approximately equal to 2.718281.
5525 Output is positive for any real input.
5527 >>> x = tf.constant(2.0)
5528 >>> tf.math.exp(x)
5529 <tf.Tensor: shape=(), dtype=float32, numpy=7.389056>
5531 >>> x = tf.constant([2.0, 8.0])
5532 >>> tf.math.exp(x)
5533 <tf.Tensor: shape=(2,), dtype=float32,
5534 numpy=array([ 7.389056, 2980.958 ], dtype=float32)>
5536 For complex numbers, the exponential value is calculated as
5537 $$
5538 e^{x+iy} = {e^x} {e^{iy}} = {e^x} ({\cos (y) + i \sin (y)})
5539 $$
5541 For `1+1j` the value would be computed as:
5542 $$
5543 e^1 (\cos (1) + i \sin (1)) = 2.7182817 \times (0.5403023+0.84147096j)
5544 $$
5546 >>> x = tf.constant(1 + 1j)
5547 >>> tf.math.exp(x)
5548 <tf.Tensor: shape=(), dtype=complex128,
5549 numpy=(1.4686939399158851+2.2873552871788423j)>
5551 Args:
5552 x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5553 `float32`, `float64`, `complex64`, `complex128`.
5554 name: A name for the operation (optional).
5556 Returns:
5557 A `tf.Tensor`. Has the same type as `x`.
5559 @compatibility(numpy)
5560 Equivalent to np.exp
5561 @end_compatibility
5562 """
5563 return gen_math_ops.exp(x, name)
5566# pylint: enable=g-docstring-has-escape
5569@tf_export("math.sobol_sample")
5570@dispatch.add_dispatch_support
5571def sobol_sample(dim, num_results, skip=0, dtype=dtypes.float32, name=None):
5572 """Generates points from the Sobol sequence.
5574 Creates a Sobol sequence with `num_results` samples. Each sample has dimension
5575 `dim`. Skips the first `skip` samples.
5577 Args:
5578 dim: Positive scalar `Tensor` representing each sample's dimension.
5579 num_results: Positive scalar `Tensor` of dtype int32. The number of Sobol
5580 points to return in the output.
5581 skip: (Optional) Positive scalar `Tensor` of dtype int32. The number of
5582 initial points of the Sobol sequence to skip. Default value is 0.
5583 dtype: (Optional) The `tf.Dtype` of the sample. One of: `tf.float32` or
5584 `tf.float64`. Defaults to `tf.float32`.
5585 name: (Optional) Python `str` name prefixed to ops created by this function.
5587 Returns:
5588 `Tensor` of samples from Sobol sequence with `shape` [num_results, dim].
5589 """
5590 with ops.name_scope(name, "sobol", [dim, num_results, skip]):
5591 return gen_math_ops.sobol_sample(dim, num_results, skip, dtype=dtype)
5594@tf_export("math.rsqrt", v1=["math.rsqrt", "rsqrt"])
5595@dispatch.register_unary_elementwise_api
5596@dispatch.add_dispatch_support
5597@deprecation.deprecated_endpoints("rsqrt")
5598def rsqrt(x, name=None):
5599 """Computes reciprocal of square root of x element-wise.
5601 For example:
5603 >>> x = tf.constant([2., 0., -2.])
5604 >>> tf.math.rsqrt(x)
5605 <tf.Tensor: shape=(3,), dtype=float32,
5606 numpy=array([0.707, inf, nan], dtype=float32)>
5608 Args:
5609 x: A `tf.Tensor`. Must be one of the following types: `bfloat16`, `half`,
5610 `float32`, `float64`.
5611 name: A name for the operation (optional).
5613 Returns:
5614 A `tf.Tensor`. Has the same type as `x`.
5615 """
5616 return gen_math_ops.rsqrt(x, name)
5619@tf_export("math.acos", "acos")
5620@dispatch.register_unary_elementwise_api
5621@dispatch.add_dispatch_support
5622def acos(x, name=None):
5623 """Computes acos of x element-wise.
5625 Provided an input tensor, the `tf.math.acos` operation
5626 returns the inverse cosine of each element of the tensor.
5627 If `y = tf.math.cos(x)` then, `x = tf.math.acos(y)`.
5629 Input range is `[-1, 1]` and the output has a range of `[0, pi]`.
5631 For example:
5633 >>> x = tf.constant([1.0, -0.5, 3.4, 0.2, 0.0, -2], dtype = tf.float32)
5634 >>> tf.math.acos(x)
5635 <tf.Tensor: shape=(6,), dtype=float32,
5636 numpy= array([0. , 2.0943952, nan, 1.3694383, 1.5707964, nan],
5637 dtype=float32)>
5639 Args:
5640 x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
5641 `float32`, `float64`, `complex64`, `complex128`.
5642 name: A name for the operation (optional).
5644 Returns:
5645 A `Tensor`. Has the same type as x.
5646 """
5647 return gen_math_ops.acos(x, name)
5650@tf_export("math.floor", "floor")
5651@dispatch.register_unary_elementwise_api
5652@dispatch.add_dispatch_support
5653def floor(x, name=None):
5654 """Returns element-wise largest integer not greater than x.
5656 Both input range is `(-inf, inf)` and the
5657 output range consists of all integer values.
5659 For example:
5661 >>> x = tf.constant([1.3324, -1.5, 5.555, -2.532, 0.99, float("inf")])
5662 >>> tf.floor(x).numpy()
5663 array([ 1., -2., 5., -3., 0., inf], dtype=float32)
5665 Args:
5666 x: A `Tensor`. Must be one of the following types: `bfloat16`, `half`,
5667 `float32`, `float64`.
5668 name: A name for the operation (optional).
5670 Returns:
5671 A `Tensor`. Has the same type as x.
5672 """
5673 return gen_math_ops.floor(x, name)
5676# Register elementwise ops that don't have Python wrappers.
5677# Binary elementwise ops.
5678dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_and)
5679dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_or)
5680dispatch.register_binary_elementwise_api(gen_bitwise_ops.bitwise_xor)
5681dispatch.register_binary_elementwise_api(gen_bitwise_ops.left_shift)
5682dispatch.register_binary_elementwise_api(gen_bitwise_ops.right_shift)
5683dispatch.register_unary_elementwise_api(gen_bitwise_ops.invert)
5684dispatch.register_binary_elementwise_api(gen_math_ops.atan2)
5685dispatch.register_binary_elementwise_api(gen_math_ops.floor_div)
5686dispatch.register_binary_elementwise_api(gen_math_ops.floor_mod)
5687dispatch.register_binary_elementwise_api(gen_math_ops.greater)
5688dispatch.register_binary_elementwise_api(gen_math_ops.greater_equal)
5689dispatch.register_binary_elementwise_api(gen_math_ops.less)
5690dispatch.register_binary_elementwise_api(gen_math_ops.less_equal)
5691dispatch.register_binary_elementwise_api(gen_math_ops.logical_and)
5692dispatch.register_binary_elementwise_api(gen_math_ops.logical_or)
5693dispatch.register_binary_elementwise_api(gen_math_ops.maximum)
5694dispatch.register_binary_elementwise_api(gen_math_ops.minimum)
5695dispatch.register_binary_elementwise_api(gen_math_ops.real_div)
5696dispatch.register_binary_elementwise_api(gen_math_ops.squared_difference)
5697dispatch.register_binary_elementwise_api(gen_math_ops.truncate_div)
5698dispatch.register_binary_elementwise_api(gen_math_ops.truncate_mod)
5699dispatch.register_binary_elementwise_api(gen_math_ops.xlogy)
5700dispatch.register_binary_elementwise_api(gen_math_ops.zeta)
5702# Unary elementwise ops.
5703dispatch.register_unary_elementwise_api(gen_math_ops.acosh)
5704dispatch.register_unary_elementwise_api(gen_math_ops.asin)
5705dispatch.register_unary_elementwise_api(gen_math_ops.asinh)
5706dispatch.register_unary_elementwise_api(gen_math_ops.atan)
5707dispatch.register_unary_elementwise_api(gen_math_ops.atanh)
5708dispatch.register_unary_elementwise_api(gen_math_ops.cos)
5709dispatch.register_unary_elementwise_api(gen_math_ops.cosh)
5710dispatch.register_unary_elementwise_api(gen_math_ops.digamma)
5711dispatch.register_unary_elementwise_api(gen_math_ops.erf)
5712dispatch.register_unary_elementwise_api(gen_math_ops.erfc)
5713dispatch.register_unary_elementwise_api(gen_math_ops.expm1)
5714dispatch.register_unary_elementwise_api(gen_math_ops.is_finite)
5715dispatch.register_unary_elementwise_api(gen_math_ops.is_inf)
5716dispatch.register_unary_elementwise_api(gen_math_ops.is_nan)
5717dispatch.register_unary_elementwise_api(gen_math_ops.lgamma)
5718dispatch.register_unary_elementwise_api(gen_math_ops.log)
5719dispatch.register_unary_elementwise_api(gen_math_ops.log1p)
5720dispatch.register_unary_elementwise_api(gen_math_ops.logical_not)
5721dispatch.register_unary_elementwise_api(gen_math_ops.neg)
5722dispatch.register_unary_elementwise_api(gen_math_ops.next_after)
5723dispatch.register_unary_elementwise_api(gen_math_ops.reciprocal)
5724dispatch.register_unary_elementwise_api(gen_math_ops.rint)
5725dispatch.register_unary_elementwise_api(gen_math_ops.sin)
5726dispatch.register_unary_elementwise_api(gen_math_ops.sinh)
5727dispatch.register_unary_elementwise_api(gen_math_ops.square)
5728dispatch.register_unary_elementwise_api(gen_math_ops.tan)
5729dispatch.register_unary_elementwise_api(gen_math_ops.tanh)