Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_tpu_partition_ops.py: 14%
188 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1"""Python wrappers around TensorFlow ops.
3This file is MACHINE GENERATED! Do not edit.
4"""
6import collections
8from tensorflow.python import pywrap_tfe as pywrap_tfe
9from tensorflow.python.eager import context as _context
10from tensorflow.python.eager import core as _core
11from tensorflow.python.eager import execute as _execute
12from tensorflow.python.framework import dtypes as _dtypes
13from tensorflow.security.fuzzing.py import annotation_types as _atypes
15from tensorflow.python.framework import op_def_registry as _op_def_registry
16from tensorflow.python.framework import ops as _ops
17from tensorflow.python.framework import op_def_library as _op_def_library
18from tensorflow.python.util.deprecation import deprecated_endpoints
19from tensorflow.python.util import dispatch as _dispatch
20from tensorflow.python.util.tf_export import tf_export
22from typing import TypeVar
24def tpu_partitioned_input(inputs, partition_dim=0, name=None):
25 r"""An op that groups a list of partitioned inputs together. This op
27 Args:
28 inputs: A list of at least 1 `Tensor` objects with the same type.
29 A list of partitioned inputs which must have the same shape.
30 partition_dim: An optional `int`. Defaults to `0`.
31 An integer describles which dimension is partitioned. -1 means
32 those inputs are replicated.
33 name: A name for the operation (optional).
35 Returns:
36 A `Tensor`. Has the same type as `inputs`.
37 """
38 _ctx = _context._context or _context.context()
39 tld = _ctx._thread_local_data
40 if tld.is_eager:
41 try:
42 _result = pywrap_tfe.TFE_Py_FastPathExecute(
43 _ctx, "TPUPartitionedInput", name, inputs, "partition_dim",
44 partition_dim)
45 return _result
46 except _core._NotOkStatusException as e:
47 _ops.raise_from_not_ok_status(e, name)
48 except _core._FallbackException:
49 pass
50 try:
51 return tpu_partitioned_input_eager_fallback(
52 inputs, partition_dim=partition_dim, name=name, ctx=_ctx)
53 except _core._SymbolicException:
54 pass # Add nodes to the TensorFlow graph.
55 # Add nodes to the TensorFlow graph.
56 if not isinstance(inputs, (list, tuple)):
57 raise TypeError(
58 "Expected list for 'inputs' argument to "
59 "'tpu_partitioned_input' Op, not %r." % inputs)
60 _attr_N = len(inputs)
61 if partition_dim is None:
62 partition_dim = 0
63 partition_dim = _execute.make_int(partition_dim, "partition_dim")
64 _, _, _op, _outputs = _op_def_library._apply_op_helper(
65 "TPUPartitionedInput", inputs=inputs, partition_dim=partition_dim,
66 name=name)
67 _result = _outputs[:]
68 if _execute.must_record_gradient():
69 _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"),
70 "partition_dim", _op._get_attr_int("partition_dim"))
71 _inputs_flat = _op.inputs
72 _execute.record_gradient(
73 "TPUPartitionedInput", _inputs_flat, _attrs, _result)
74 _result, = _result
75 return _result
77TPUPartitionedInput = tf_export("raw_ops.TPUPartitionedInput")(_ops.to_raw_op(tpu_partitioned_input))
80def tpu_partitioned_input_eager_fallback(inputs, partition_dim, name, ctx):
81 if not isinstance(inputs, (list, tuple)):
82 raise TypeError(
83 "Expected list for 'inputs' argument to "
84 "'tpu_partitioned_input' Op, not %r." % inputs)
85 _attr_N = len(inputs)
86 if partition_dim is None:
87 partition_dim = 0
88 partition_dim = _execute.make_int(partition_dim, "partition_dim")
89 _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, [])
90 _inputs_flat = list(inputs)
91 _attrs = ("N", _attr_N, "T", _attr_T, "partition_dim", partition_dim)
92 _result = _execute.execute(b"TPUPartitionedInput", 1, inputs=_inputs_flat,
93 attrs=_attrs, ctx=ctx, name=name)
94 if _execute.must_record_gradient():
95 _execute.record_gradient(
96 "TPUPartitionedInput", _inputs_flat, _attrs, _result)
97 _result, = _result
98 return _result
101def tpu_partitioned_input_v2(inputs, partition_dims, is_packed=False, name=None):
102 r"""An op that groups a list of partitioned inputs together. Supports ND sharding.
104 Args:
105 inputs: A list of at least 1 `Tensor` objects with the same type.
106 A list of partitioned inputs which must have the same shape.
107 partition_dims: A list of `ints`.
108 A list of integers describing how each dimension is partitioned. Emptiness
109 indicates the inputs are replicated.
110 is_packed: An optional `bool`. Defaults to `False`.
111 Indicates whether the input is a packed resource.
112 name: A name for the operation (optional).
114 Returns:
115 A `Tensor`. Has the same type as `inputs`.
116 """
117 _ctx = _context._context or _context.context()
118 tld = _ctx._thread_local_data
119 if tld.is_eager:
120 try:
121 _result = pywrap_tfe.TFE_Py_FastPathExecute(
122 _ctx, "TPUPartitionedInputV2", name, inputs, "partition_dims",
123 partition_dims, "is_packed", is_packed)
124 return _result
125 except _core._NotOkStatusException as e:
126 _ops.raise_from_not_ok_status(e, name)
127 except _core._FallbackException:
128 pass
129 try:
130 return tpu_partitioned_input_v2_eager_fallback(
131 inputs, partition_dims=partition_dims, is_packed=is_packed,
132 name=name, ctx=_ctx)
133 except _core._SymbolicException:
134 pass # Add nodes to the TensorFlow graph.
135 # Add nodes to the TensorFlow graph.
136 if not isinstance(inputs, (list, tuple)):
137 raise TypeError(
138 "Expected list for 'inputs' argument to "
139 "'tpu_partitioned_input_v2' Op, not %r." % inputs)
140 _attr_N = len(inputs)
141 if not isinstance(partition_dims, (list, tuple)):
142 raise TypeError(
143 "Expected list for 'partition_dims' argument to "
144 "'tpu_partitioned_input_v2' Op, not %r." % partition_dims)
145 partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims]
146 if is_packed is None:
147 is_packed = False
148 is_packed = _execute.make_bool(is_packed, "is_packed")
149 _, _, _op, _outputs = _op_def_library._apply_op_helper(
150 "TPUPartitionedInputV2", inputs=inputs, partition_dims=partition_dims,
151 is_packed=is_packed, name=name)
152 _result = _outputs[:]
153 if _execute.must_record_gradient():
154 _attrs = ("N", _op._get_attr_int("N"), "T", _op._get_attr_type("T"),
155 "partition_dims", _op.get_attr("partition_dims"), "is_packed",
156 _op._get_attr_bool("is_packed"))
157 _inputs_flat = _op.inputs
158 _execute.record_gradient(
159 "TPUPartitionedInputV2", _inputs_flat, _attrs, _result)
160 _result, = _result
161 return _result
163TPUPartitionedInputV2 = tf_export("raw_ops.TPUPartitionedInputV2")(_ops.to_raw_op(tpu_partitioned_input_v2))
166def tpu_partitioned_input_v2_eager_fallback(inputs, partition_dims, is_packed, name, ctx):
167 if not isinstance(inputs, (list, tuple)):
168 raise TypeError(
169 "Expected list for 'inputs' argument to "
170 "'tpu_partitioned_input_v2' Op, not %r." % inputs)
171 _attr_N = len(inputs)
172 if not isinstance(partition_dims, (list, tuple)):
173 raise TypeError(
174 "Expected list for 'partition_dims' argument to "
175 "'tpu_partitioned_input_v2' Op, not %r." % partition_dims)
176 partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims]
177 if is_packed is None:
178 is_packed = False
179 is_packed = _execute.make_bool(is_packed, "is_packed")
180 _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, [])
181 _inputs_flat = list(inputs)
182 _attrs = ("N", _attr_N, "T", _attr_T, "partition_dims", partition_dims,
183 "is_packed", is_packed)
184 _result = _execute.execute(b"TPUPartitionedInputV2", 1, inputs=_inputs_flat,
185 attrs=_attrs, ctx=ctx, name=name)
186 if _execute.must_record_gradient():
187 _execute.record_gradient(
188 "TPUPartitionedInputV2", _inputs_flat, _attrs, _result)
189 _result, = _result
190 return _result
193def tpu_partitioned_output(inputs, num_splits, partition_dim=0, name=None):
194 r"""An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
196 outputs outside the XLA computation.
198 Args:
199 inputs: A `Tensor`.
200 A tensor which represents the full shape of partitioned tensors.
201 num_splits: An `int` that is `>= 1`.
202 partition_dim: An optional `int`. Defaults to `0`.
203 An integer describles which dimension is partitioned.
204 name: A name for the operation (optional).
206 Returns:
207 A list of `num_splits` `Tensor` objects with the same type as `inputs`.
208 """
209 _ctx = _context._context or _context.context()
210 tld = _ctx._thread_local_data
211 if tld.is_eager:
212 try:
213 _result = pywrap_tfe.TFE_Py_FastPathExecute(
214 _ctx, "TPUPartitionedOutput", name, inputs, "num_splits", num_splits,
215 "partition_dim", partition_dim)
216 return _result
217 except _core._NotOkStatusException as e:
218 _ops.raise_from_not_ok_status(e, name)
219 except _core._FallbackException:
220 pass
221 try:
222 return tpu_partitioned_output_eager_fallback(
223 inputs, num_splits=num_splits, partition_dim=partition_dim,
224 name=name, ctx=_ctx)
225 except _core._SymbolicException:
226 pass # Add nodes to the TensorFlow graph.
227 # Add nodes to the TensorFlow graph.
228 num_splits = _execute.make_int(num_splits, "num_splits")
229 if partition_dim is None:
230 partition_dim = 0
231 partition_dim = _execute.make_int(partition_dim, "partition_dim")
232 _, _, _op, _outputs = _op_def_library._apply_op_helper(
233 "TPUPartitionedOutput", inputs=inputs, num_splits=num_splits,
234 partition_dim=partition_dim, name=name)
235 _result = _outputs[:]
236 if _execute.must_record_gradient():
237 _attrs = ("T", _op._get_attr_type("T"), "num_splits",
238 _op._get_attr_int("num_splits"), "partition_dim",
239 _op._get_attr_int("partition_dim"))
240 _inputs_flat = _op.inputs
241 _execute.record_gradient(
242 "TPUPartitionedOutput", _inputs_flat, _attrs, _result)
243 return _result
245TPUPartitionedOutput = tf_export("raw_ops.TPUPartitionedOutput")(_ops.to_raw_op(tpu_partitioned_output))
248def tpu_partitioned_output_eager_fallback(inputs, num_splits, partition_dim, name, ctx):
249 num_splits = _execute.make_int(num_splits, "num_splits")
250 if partition_dim is None:
251 partition_dim = 0
252 partition_dim = _execute.make_int(partition_dim, "partition_dim")
253 _attr_T, (inputs,) = _execute.args_to_matching_eager([inputs], ctx, [])
254 _inputs_flat = [inputs]
255 _attrs = ("T", _attr_T, "num_splits", num_splits, "partition_dim",
256 partition_dim)
257 _result = _execute.execute(b"TPUPartitionedOutput", num_splits,
258 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
259 name=name)
260 if _execute.must_record_gradient():
261 _execute.record_gradient(
262 "TPUPartitionedOutput", _inputs_flat, _attrs, _result)
263 return _result
266def tpu_partitioned_output_v2(inputs, num_splits, partition_dims, name=None):
267 r"""An op that demultiplexes a tensor to be sharded by XLA to a list of partitioned
269 outputs outside the XLA computation. Supports ND sharding.
271 Args:
272 inputs: A `Tensor`.
273 A tensor which represents the full shape of partitioned tensors.
274 num_splits: An `int` that is `>= 1`.
275 partition_dims: A list of `ints`.
276 A list of integers describing how each dimension is partitioned. Emptiness
277 indicates the inputs are replicated.
278 name: A name for the operation (optional).
280 Returns:
281 A list of `num_splits` `Tensor` objects with the same type as `inputs`.
282 """
283 _ctx = _context._context or _context.context()
284 tld = _ctx._thread_local_data
285 if tld.is_eager:
286 try:
287 _result = pywrap_tfe.TFE_Py_FastPathExecute(
288 _ctx, "TPUPartitionedOutputV2", name, inputs, "num_splits",
289 num_splits, "partition_dims", partition_dims)
290 return _result
291 except _core._NotOkStatusException as e:
292 _ops.raise_from_not_ok_status(e, name)
293 except _core._FallbackException:
294 pass
295 try:
296 return tpu_partitioned_output_v2_eager_fallback(
297 inputs, num_splits=num_splits, partition_dims=partition_dims,
298 name=name, ctx=_ctx)
299 except _core._SymbolicException:
300 pass # Add nodes to the TensorFlow graph.
301 # Add nodes to the TensorFlow graph.
302 num_splits = _execute.make_int(num_splits, "num_splits")
303 if not isinstance(partition_dims, (list, tuple)):
304 raise TypeError(
305 "Expected list for 'partition_dims' argument to "
306 "'tpu_partitioned_output_v2' Op, not %r." % partition_dims)
307 partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims]
308 _, _, _op, _outputs = _op_def_library._apply_op_helper(
309 "TPUPartitionedOutputV2", inputs=inputs, num_splits=num_splits,
310 partition_dims=partition_dims, name=name)
311 _result = _outputs[:]
312 if _execute.must_record_gradient():
313 _attrs = ("T", _op._get_attr_type("T"), "num_splits",
314 _op._get_attr_int("num_splits"), "partition_dims",
315 _op.get_attr("partition_dims"))
316 _inputs_flat = _op.inputs
317 _execute.record_gradient(
318 "TPUPartitionedOutputV2", _inputs_flat, _attrs, _result)
319 return _result
321TPUPartitionedOutputV2 = tf_export("raw_ops.TPUPartitionedOutputV2")(_ops.to_raw_op(tpu_partitioned_output_v2))
324def tpu_partitioned_output_v2_eager_fallback(inputs, num_splits, partition_dims, name, ctx):
325 num_splits = _execute.make_int(num_splits, "num_splits")
326 if not isinstance(partition_dims, (list, tuple)):
327 raise TypeError(
328 "Expected list for 'partition_dims' argument to "
329 "'tpu_partitioned_output_v2' Op, not %r." % partition_dims)
330 partition_dims = [_execute.make_int(_i, "partition_dims") for _i in partition_dims]
331 _attr_T, (inputs,) = _execute.args_to_matching_eager([inputs], ctx, [])
332 _inputs_flat = [inputs]
333 _attrs = ("T", _attr_T, "num_splits", num_splits, "partition_dims",
334 partition_dims)
335 _result = _execute.execute(b"TPUPartitionedOutputV2", num_splits,
336 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
337 name=name)
338 if _execute.must_record_gradient():
339 _execute.record_gradient(
340 "TPUPartitionedOutputV2", _inputs_flat, _attrs, _result)
341 return _result