Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/ragged/ragged_map_ops.py: 50%
14 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Functional operations for RaggedTensors."""
17from tensorflow.python.ops.ragged import ragged_tensor
18from tensorflow.python.util import nest
19from tensorflow.python.util.lazy_loader import LazyLoader
22map_fn_lib = LazyLoader(
23 "map_fn_lib", globals(),
24 "tensorflow.python.ops.map_fn")
27def map_fn(fn,
28 elems,
29 dtype=None,
30 parallel_iterations=None,
31 back_prop=True,
32 swap_memory=False,
33 infer_shape=True,
34 name=None):
35 """map on the list of tensors unpacked from `elems` on dimension 0.
37 The simplest version of `map_fn` repeatedly applies the callable `fn` to a
38 sequence of elements from first to last. The elements are made of the
39 tensors unpacked from `elems`. `dtype` is the data type of the return
40 value of `fn`. Users must provide `dtype` if it is different from
41 the data type of `elems`.
43 Suppose that `elems` is unpacked into `values`, a list of tensors. The shape
44 of the result tensor is `[values.shape[0]] + fn(values[0]).shape`.
46 This method also allows multi-arity `elems` and output of `fn`. If `elems`
47 is a (possibly nested) list or tuple of tensors, then each of these tensors
48 must have a matching first (unpack) dimension. The signature of `fn` may
49 match the structure of `elems`. That is, if `elems` is
50 `(t1, [t2, t3, [t4, t5]])`, then an appropriate signature for `fn` is:
51 `fn = lambda (t1, [t2, t3, [t4, t5]]):`.
53 Furthermore, `fn` may emit a different structure than its input. For example,
54 `fn` may look like: `fn = lambda t1: return (t1 + 1, t1 - 1)`. In this case,
55 the `dtype` parameter is not optional: `dtype` must be a type or (possibly
56 nested) tuple of types matching the output of `fn`.
58 To apply a functional operation to the nonzero elements of a SparseTensor
59 one of the following methods is recommended. First, if the function is
60 expressible as TensorFlow ops, use
62 ```python
63 result = SparseTensor(input.indices, fn(input.values), input.dense_shape)
64 ```
66 If, however, the function is not expressible as a TensorFlow op, then use
68 ```python
69 result = SparseTensor(
70 input.indices, map_fn(fn, input.values), input.dense_shape)
71 ```
73 instead.
75 When executing eagerly, map_fn does not execute in parallel even if
76 `parallel_iterations` is set to a value > 1. You can still get the
77 performance benefits of running a function in parallel by using the
78 `tf.contrib.eager.defun` decorator,
80 ```python
81 # Assume the function being used in map_fn is fn.
82 # To ensure map_fn calls fn in parallel, use the defun decorator.
83 @tf.contrib.eager.defun
84 def func(tensor):
85 return tf.map_fn(fn, tensor)
86 ```
88 Note that if you use the defun decorator, any non-TensorFlow Python code
89 that you may have written in your function won't get executed. See
90 `tf.contrib.eager.defun` for more details. The recommendation would be to
91 debug without defun but switch to defun to get performance benefits of
92 running map_fn in parallel.
94 Args:
95 fn: The callable to be performed. It accepts one argument, which will have
96 the same (possibly nested) structure as `elems`. Its output must have the
97 same structure as `dtype` if one is provided, otherwise it must have the
98 same structure as `elems`.
99 elems: A tensor or (possibly nested) sequence of tensors, each of which will
100 be unpacked along their first dimension. The nested sequence of the
101 resulting slices will be applied to `fn`.
102 dtype: (optional) The output type(s) of `fn`. If `fn` returns a structure
103 of Tensors differing from the structure of `elems`, then `dtype` is not
104 optional and must have the same structure as the output of `fn`. Use
105 `RaggedTensorType` to declare an output of type `RaggedTensor`.
106 parallel_iterations: (optional) The number of iterations allowed to run in
107 parallel. When graph building, the default value is 10. While executing
108 eagerly, the default value is set to 1.
109 back_prop: (optional) True enables support for back propagation.
110 swap_memory: (optional) True enables GPU-CPU memory swapping.
111 infer_shape: (optional) False disables tests for consistent output shapes.
112 name: (optional) Name prefix for the returned tensors.
114 Returns:
115 A possibly nested sequence of potentially ragged tensors. Each
116 tensor packs the results of applying `fn` to tensors unpacked from `elems`
117 along the first dimension, from first to last.
119 Raises:
120 TypeError: if `fn` is not callable or the structure of the output of
121 `fn` and `dtype` do not match, or if elems is a SparseTensor.
122 ValueError: if the lengths of the output of `fn` and `dtype` do not match.
124 #### Examples:
126 ```python
127 elems = np.array([1, 2, 3, 4, 5, 6])
128 squares = map_fn(lambda x: x * x, elems)
129 # squares == [1, 4, 9, 16, 25, 36]
130 ```
132 ```python
133 elems = (np.array([1, 2, 3]), np.array([-1, 1, -1]))
134 alternate = map_fn(lambda x: x[0] * x[1], elems, dtype=tf.int64)
135 # alternate == [-1, 2, -3]
136 ```
138 ```python
139 elems = np.array([1, 2, 3])
140 alternates = map_fn(lambda x: (x, -x), elems, dtype=(tf.int64, tf.int64))
141 # alternates[0] == [1, 2, 3]
142 # alternates[1] == [-1, -2, -3]
143 ```
145 ```python
146 elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]])
147 mean = map_fn(tf.reduce_mean, elems)
148 # mean == [2, 4, 6]
149 ```
151 ```python
152 elems=ragged.constant([[1, 2, 3], [4, 5], [6, 7]], dtype=tf.int64)
153 out = map_fn(fn=lambda x: x+1, elems,
154 dtype=ragged.RaggedTensorType(type=tf.int64, ragged_rank=0))
155 # out = tf.ragged.constant([[2, 3, 4], [5, 6], [7, 8]])
156 ```
157 """
158 if dtype is None:
159 dtype = nest.map_structure(lambda e: e.dtype, elems)
160 dtype = nest.map_structure(_ragged_type_to_spec, dtype)
161 return map_fn_lib.map_fn(fn,
162 elems,
163 dtype,
164 parallel_iterations,
165 back_prop,
166 swap_memory,
167 infer_shape,
168 name)
171def _ragged_type_to_spec(t):
172 if isinstance(t, ragged_tensor.RaggedTensorType):
173 # Note: need to adjust ragged_rank by 1, since RaggedTensorSpec gives the
174 # type for the mapped `fn` output, but RaggedTensorType gives the type for
175 # the result of stacking the mapped `fn` outputs.
176 return ragged_tensor.RaggedTensorSpec(
177 None, t.dtype, t.ragged_rank - 1, t.row_splits_dtype)
178 else:
179 return t