Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/dtensor/python/gen_dtensor_ops.py: 15%
474 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1"""Python wrappers around TensorFlow ops.
3This file is MACHINE GENERATED! Do not edit.
4"""
6import collections
8from tensorflow.python import pywrap_tfe as pywrap_tfe
9from tensorflow.python.eager import context as _context
10from tensorflow.python.eager import core as _core
11from tensorflow.python.eager import execute as _execute
12from tensorflow.python.framework import dtypes as _dtypes
13from tensorflow.security.fuzzing.py import annotation_types as _atypes
15from tensorflow.python.framework import op_def_registry as _op_def_registry
16from tensorflow.python.framework import ops as _ops
17from tensorflow.python.framework import op_def_library as _op_def_library
18from tensorflow.python.util.deprecation import deprecated_endpoints
19from tensorflow.python.util import dispatch as _dispatch
20from tensorflow.python.util.tf_export import tf_export
22from typing import TypeVar
24@_dispatch.add_fallback_dispatch_list
25@_dispatch.add_type_based_api_dispatcher
26@tf_export('configure_and_initialize_global_tpu')
27def configure_and_initialize_global_tpu(use_tfrt_host_runtime=True, name=None):
28 r"""TODO: add doc.
30 Args:
31 use_tfrt_host_runtime: An optional `bool`. Defaults to `True`.
32 name: A name for the operation (optional).
34 Returns:
35 A `Tensor` of type `int32`.
36 """
37 _ctx = _context._context or _context.context()
38 tld = _ctx._thread_local_data
39 if tld.is_eager:
40 try:
41 _result = pywrap_tfe.TFE_Py_FastPathExecute(
42 _ctx, "ConfigureAndInitializeGlobalTPU", name,
43 "use_tfrt_host_runtime", use_tfrt_host_runtime)
44 return _result
45 except _core._NotOkStatusException as e:
46 _ops.raise_from_not_ok_status(e, name)
47 except _core._FallbackException:
48 pass
49 try:
50 _result = _dispatcher_for_configure_and_initialize_global_tpu(
51 (use_tfrt_host_runtime, name,), None)
52 if _result is not NotImplemented:
53 return _result
54 return configure_and_initialize_global_tpu_eager_fallback(
55 use_tfrt_host_runtime=use_tfrt_host_runtime, name=name, ctx=_ctx)
56 except _core._SymbolicException:
57 pass # Add nodes to the TensorFlow graph.
58 except (TypeError, ValueError):
59 _result = _dispatch.dispatch(
60 configure_and_initialize_global_tpu, (), dict(use_tfrt_host_runtime=use_tfrt_host_runtime,
61 name=name)
62 )
63 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
64 return _result
65 raise
66 else:
67 _result = _dispatcher_for_configure_and_initialize_global_tpu(
68 (use_tfrt_host_runtime, name,), None)
69 if _result is not NotImplemented:
70 return _result
71 # Add nodes to the TensorFlow graph.
72 if use_tfrt_host_runtime is None:
73 use_tfrt_host_runtime = True
74 use_tfrt_host_runtime = _execute.make_bool(use_tfrt_host_runtime, "use_tfrt_host_runtime")
75 try:
76 _, _, _op, _outputs = _op_def_library._apply_op_helper(
77 "ConfigureAndInitializeGlobalTPU", use_tfrt_host_runtime=use_tfrt_host_runtime,
78 name=name)
79 except (TypeError, ValueError):
80 _result = _dispatch.dispatch(
81 configure_and_initialize_global_tpu, (), dict(use_tfrt_host_runtime=use_tfrt_host_runtime,
82 name=name)
83 )
84 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
85 return _result
86 raise
87 _result = _outputs[:]
88 if _execute.must_record_gradient():
89 _attrs = ("use_tfrt_host_runtime",
90 _op._get_attr_bool("use_tfrt_host_runtime"))
91 _inputs_flat = _op.inputs
92 _execute.record_gradient(
93 "ConfigureAndInitializeGlobalTPU", _inputs_flat, _attrs, _result)
94 _result, = _result
95 return _result
97ConfigureAndInitializeGlobalTPU = tf_export("raw_ops.ConfigureAndInitializeGlobalTPU")(_ops.to_raw_op(configure_and_initialize_global_tpu))
98_dispatcher_for_configure_and_initialize_global_tpu = configure_and_initialize_global_tpu._tf_type_based_dispatcher.Dispatch
101def configure_and_initialize_global_tpu_eager_fallback(use_tfrt_host_runtime, name, ctx):
102 if use_tfrt_host_runtime is None:
103 use_tfrt_host_runtime = True
104 use_tfrt_host_runtime = _execute.make_bool(use_tfrt_host_runtime, "use_tfrt_host_runtime")
105 _inputs_flat = []
106 _attrs = ("use_tfrt_host_runtime", use_tfrt_host_runtime)
107 _result = _execute.execute(b"ConfigureAndInitializeGlobalTPU", 1,
108 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
109 name=name)
110 if _execute.must_record_gradient():
111 _execute.record_gradient(
112 "ConfigureAndInitializeGlobalTPU", _inputs_flat, _attrs, _result)
113 _result, = _result
114 return _result
117@_dispatch.add_fallback_dispatch_list
118@_dispatch.add_type_based_api_dispatcher
119@tf_export('copy_to_mesh')
120def copy_to_mesh(input, layout, name=None):
121 r"""TODO: add doc.
123 Args:
124 input: A `Tensor`.
125 layout: A `string`.
126 name: A name for the operation (optional).
128 Returns:
129 A `Tensor`. Has the same type as `input`.
130 """
131 _ctx = _context._context or _context.context()
132 tld = _ctx._thread_local_data
133 if tld.is_eager:
134 try:
135 _result = pywrap_tfe.TFE_Py_FastPathExecute(
136 _ctx, "CopyToMesh", name, input, "layout", layout)
137 return _result
138 except _core._NotOkStatusException as e:
139 _ops.raise_from_not_ok_status(e, name)
140 except _core._FallbackException:
141 pass
142 try:
143 _result = _dispatcher_for_copy_to_mesh(
144 (input, layout, name,), None)
145 if _result is not NotImplemented:
146 return _result
147 return copy_to_mesh_eager_fallback(
148 input, layout=layout, name=name, ctx=_ctx)
149 except _core._SymbolicException:
150 pass # Add nodes to the TensorFlow graph.
151 except (TypeError, ValueError):
152 _result = _dispatch.dispatch(
153 copy_to_mesh, (), dict(input=input, layout=layout, name=name)
154 )
155 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
156 return _result
157 raise
158 else:
159 _result = _dispatcher_for_copy_to_mesh(
160 (input, layout, name,), None)
161 if _result is not NotImplemented:
162 return _result
163 # Add nodes to the TensorFlow graph.
164 layout = _execute.make_str(layout, "layout")
165 try:
166 _, _, _op, _outputs = _op_def_library._apply_op_helper(
167 "CopyToMesh", input=input, layout=layout, name=name)
168 except (TypeError, ValueError):
169 _result = _dispatch.dispatch(
170 copy_to_mesh, (), dict(input=input, layout=layout, name=name)
171 )
172 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
173 return _result
174 raise
175 _result = _outputs[:]
176 if _execute.must_record_gradient():
177 _attrs = ("layout", _op.get_attr("layout"), "T", _op._get_attr_type("T"))
178 _inputs_flat = _op.inputs
179 _execute.record_gradient(
180 "CopyToMesh", _inputs_flat, _attrs, _result)
181 _result, = _result
182 return _result
184CopyToMesh = tf_export("raw_ops.CopyToMesh")(_ops.to_raw_op(copy_to_mesh))
185_dispatcher_for_copy_to_mesh = copy_to_mesh._tf_type_based_dispatcher.Dispatch
188def copy_to_mesh_eager_fallback(input, layout, name, ctx):
189 layout = _execute.make_str(layout, "layout")
190 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [])
191 _inputs_flat = [input]
192 _attrs = ("layout", layout, "T", _attr_T)
193 _result = _execute.execute(b"CopyToMesh", 1, inputs=_inputs_flat,
194 attrs=_attrs, ctx=ctx, name=name)
195 if _execute.must_record_gradient():
196 _execute.record_gradient(
197 "CopyToMesh", _inputs_flat, _attrs, _result)
198 _result, = _result
199 return _result
202@_dispatch.add_fallback_dispatch_list
203@_dispatch.add_type_based_api_dispatcher
204@tf_export('copy_to_mesh_grad')
205def copy_to_mesh_grad(input, forward_input, reference_layout="", name=None):
206 r"""TODO: add doc.
208 Args:
209 input: A `Tensor`.
210 forward_input: A `Tensor`. Must have the same type as `input`.
211 reference_layout: An optional `string`. Defaults to `""`.
212 name: A name for the operation (optional).
214 Returns:
215 A `Tensor`. Has the same type as `input`.
216 """
217 _ctx = _context._context or _context.context()
218 tld = _ctx._thread_local_data
219 if tld.is_eager:
220 try:
221 _result = pywrap_tfe.TFE_Py_FastPathExecute(
222 _ctx, "CopyToMeshGrad", name, input, forward_input,
223 "reference_layout", reference_layout)
224 return _result
225 except _core._NotOkStatusException as e:
226 _ops.raise_from_not_ok_status(e, name)
227 except _core._FallbackException:
228 pass
229 try:
230 _result = _dispatcher_for_copy_to_mesh_grad(
231 (input, forward_input, reference_layout, name,), None)
232 if _result is not NotImplemented:
233 return _result
234 return copy_to_mesh_grad_eager_fallback(
235 input, forward_input, reference_layout=reference_layout, name=name,
236 ctx=_ctx)
237 except _core._SymbolicException:
238 pass # Add nodes to the TensorFlow graph.
239 except (TypeError, ValueError):
240 _result = _dispatch.dispatch(
241 copy_to_mesh_grad, (), dict(input=input,
242 forward_input=forward_input,
243 reference_layout=reference_layout,
244 name=name)
245 )
246 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
247 return _result
248 raise
249 else:
250 _result = _dispatcher_for_copy_to_mesh_grad(
251 (input, forward_input, reference_layout, name,), None)
252 if _result is not NotImplemented:
253 return _result
254 # Add nodes to the TensorFlow graph.
255 if reference_layout is None:
256 reference_layout = ""
257 reference_layout = _execute.make_str(reference_layout, "reference_layout")
258 try:
259 _, _, _op, _outputs = _op_def_library._apply_op_helper(
260 "CopyToMeshGrad", input=input, forward_input=forward_input,
261 reference_layout=reference_layout, name=name)
262 except (TypeError, ValueError):
263 _result = _dispatch.dispatch(
264 copy_to_mesh_grad, (), dict(input=input,
265 forward_input=forward_input,
266 reference_layout=reference_layout,
267 name=name)
268 )
269 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
270 return _result
271 raise
272 _result = _outputs[:]
273 if _execute.must_record_gradient():
274 _attrs = ("reference_layout", _op.get_attr("reference_layout"), "T",
275 _op._get_attr_type("T"))
276 _inputs_flat = _op.inputs
277 _execute.record_gradient(
278 "CopyToMeshGrad", _inputs_flat, _attrs, _result)
279 _result, = _result
280 return _result
282CopyToMeshGrad = tf_export("raw_ops.CopyToMeshGrad")(_ops.to_raw_op(copy_to_mesh_grad))
283_dispatcher_for_copy_to_mesh_grad = copy_to_mesh_grad._tf_type_based_dispatcher.Dispatch
286def copy_to_mesh_grad_eager_fallback(input, forward_input, reference_layout, name, ctx):
287 if reference_layout is None:
288 reference_layout = ""
289 reference_layout = _execute.make_str(reference_layout, "reference_layout")
290 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, forward_input], ctx, [])
291 (input, forward_input) = _inputs_T
292 _inputs_flat = [input, forward_input]
293 _attrs = ("reference_layout", reference_layout, "T", _attr_T)
294 _result = _execute.execute(b"CopyToMeshGrad", 1, inputs=_inputs_flat,
295 attrs=_attrs, ctx=ctx, name=name)
296 if _execute.must_record_gradient():
297 _execute.record_gradient(
298 "CopyToMeshGrad", _inputs_flat, _attrs, _result)
299 _result, = _result
300 return _result
303@_dispatch.add_fallback_dispatch_list
304@_dispatch.add_type_based_api_dispatcher
305@tf_export('d_tensor_restore_v2')
306def d_tensor_restore_v2(prefix, tensor_names, shape_and_slices, input_shapes, input_layouts, dtypes, name=None):
307 r"""TODO: add doc.
309 Args:
310 prefix: A `Tensor` of type `string`.
311 tensor_names: A `Tensor` of type `string`.
312 shape_and_slices: A `Tensor` of type `string`.
313 input_shapes: A list of shapes (each a `tf.TensorShape` or list of `ints`).
314 input_layouts: A list of `strings`.
315 dtypes: A list of `tf.DTypes` that has length `>= 1`.
316 name: A name for the operation (optional).
318 Returns:
319 A list of `Tensor` objects of type `dtypes`.
320 """
321 _ctx = _context._context or _context.context()
322 tld = _ctx._thread_local_data
323 if tld.is_eager:
324 try:
325 _result = pywrap_tfe.TFE_Py_FastPathExecute(
326 _ctx, "DTensorRestoreV2", name, prefix, tensor_names,
327 shape_and_slices, "input_shapes", input_shapes, "input_layouts",
328 input_layouts, "dtypes", dtypes)
329 return _result
330 except _core._NotOkStatusException as e:
331 _ops.raise_from_not_ok_status(e, name)
332 except _core._FallbackException:
333 pass
334 try:
335 _result = _dispatcher_for_d_tensor_restore_v2(
336 (prefix, tensor_names, shape_and_slices, input_shapes,
337 input_layouts, dtypes, name,), None)
338 if _result is not NotImplemented:
339 return _result
340 return d_tensor_restore_v2_eager_fallback(
341 prefix, tensor_names, shape_and_slices, input_shapes=input_shapes,
342 input_layouts=input_layouts, dtypes=dtypes, name=name, ctx=_ctx)
343 except _core._SymbolicException:
344 pass # Add nodes to the TensorFlow graph.
345 except (TypeError, ValueError):
346 _result = _dispatch.dispatch(
347 d_tensor_restore_v2, (), dict(prefix=prefix,
348 tensor_names=tensor_names,
349 shape_and_slices=shape_and_slices,
350 input_shapes=input_shapes,
351 input_layouts=input_layouts,
352 dtypes=dtypes, name=name)
353 )
354 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
355 return _result
356 raise
357 else:
358 _result = _dispatcher_for_d_tensor_restore_v2(
359 (prefix, tensor_names, shape_and_slices, input_shapes, input_layouts,
360 dtypes, name,), None)
361 if _result is not NotImplemented:
362 return _result
363 # Add nodes to the TensorFlow graph.
364 if not isinstance(input_shapes, (list, tuple)):
365 raise TypeError(
366 "Expected list for 'input_shapes' argument to "
367 "'d_tensor_restore_v2' Op, not %r." % input_shapes)
368 input_shapes = [_execute.make_shape(_s, "input_shapes") for _s in input_shapes]
369 if not isinstance(input_layouts, (list, tuple)):
370 raise TypeError(
371 "Expected list for 'input_layouts' argument to "
372 "'d_tensor_restore_v2' Op, not %r." % input_layouts)
373 input_layouts = [_execute.make_str(_s, "input_layouts") for _s in input_layouts]
374 if not isinstance(dtypes, (list, tuple)):
375 raise TypeError(
376 "Expected list for 'dtypes' argument to "
377 "'d_tensor_restore_v2' Op, not %r." % dtypes)
378 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
379 try:
380 _, _, _op, _outputs = _op_def_library._apply_op_helper(
381 "DTensorRestoreV2", prefix=prefix, tensor_names=tensor_names,
382 shape_and_slices=shape_and_slices,
383 input_shapes=input_shapes,
384 input_layouts=input_layouts, dtypes=dtypes,
385 name=name)
386 except (TypeError, ValueError):
387 _result = _dispatch.dispatch(
388 d_tensor_restore_v2, (), dict(prefix=prefix,
389 tensor_names=tensor_names,
390 shape_and_slices=shape_and_slices,
391 input_shapes=input_shapes,
392 input_layouts=input_layouts,
393 dtypes=dtypes, name=name)
394 )
395 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
396 return _result
397 raise
398 _result = _outputs[:]
399 if not _result:
400 return _op
401 if _execute.must_record_gradient():
402 _attrs = ("input_shapes", _op.get_attr("input_shapes"), "input_layouts",
403 _op.get_attr("input_layouts"), "dtypes", _op.get_attr("dtypes"))
404 _inputs_flat = _op.inputs
405 _execute.record_gradient(
406 "DTensorRestoreV2", _inputs_flat, _attrs, _result)
407 return _result
409DTensorRestoreV2 = tf_export("raw_ops.DTensorRestoreV2")(_ops.to_raw_op(d_tensor_restore_v2))
410_dispatcher_for_d_tensor_restore_v2 = d_tensor_restore_v2._tf_type_based_dispatcher.Dispatch
413def d_tensor_restore_v2_eager_fallback(prefix, tensor_names, shape_and_slices, input_shapes, input_layouts, dtypes, name, ctx):
414 if not isinstance(input_shapes, (list, tuple)):
415 raise TypeError(
416 "Expected list for 'input_shapes' argument to "
417 "'d_tensor_restore_v2' Op, not %r." % input_shapes)
418 input_shapes = [_execute.make_shape(_s, "input_shapes") for _s in input_shapes]
419 if not isinstance(input_layouts, (list, tuple)):
420 raise TypeError(
421 "Expected list for 'input_layouts' argument to "
422 "'d_tensor_restore_v2' Op, not %r." % input_layouts)
423 input_layouts = [_execute.make_str(_s, "input_layouts") for _s in input_layouts]
424 if not isinstance(dtypes, (list, tuple)):
425 raise TypeError(
426 "Expected list for 'dtypes' argument to "
427 "'d_tensor_restore_v2' Op, not %r." % dtypes)
428 dtypes = [_execute.make_type(_t, "dtypes") for _t in dtypes]
429 prefix = _ops.convert_to_tensor(prefix, _dtypes.string)
430 tensor_names = _ops.convert_to_tensor(tensor_names, _dtypes.string)
431 shape_and_slices = _ops.convert_to_tensor(shape_and_slices, _dtypes.string)
432 _inputs_flat = [prefix, tensor_names, shape_and_slices]
433 _attrs = ("input_shapes", input_shapes, "input_layouts", input_layouts,
434 "dtypes", dtypes)
435 _result = _execute.execute(b"DTensorRestoreV2", len(dtypes),
436 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
437 name=name)
438 if _execute.must_record_gradient():
439 _execute.record_gradient(
440 "DTensorRestoreV2", _inputs_flat, _attrs, _result)
441 return _result
444@_dispatch.add_fallback_dispatch_list
445@_dispatch.add_type_based_api_dispatcher
446@tf_export('d_tensor_set_global_tpu_array')
447def d_tensor_set_global_tpu_array(topology, name=None):
448 r"""TODO: add doc.
450 Args:
451 topology: A `Tensor` of type `string`.
452 name: A name for the operation (optional).
454 Returns:
455 The created Operation.
456 """
457 _ctx = _context._context or _context.context()
458 tld = _ctx._thread_local_data
459 if tld.is_eager:
460 try:
461 _result = pywrap_tfe.TFE_Py_FastPathExecute(
462 _ctx, "DTensorSetGlobalTPUArray", name, topology)
463 return _result
464 except _core._NotOkStatusException as e:
465 _ops.raise_from_not_ok_status(e, name)
466 except _core._FallbackException:
467 pass
468 try:
469 _result = _dispatcher_for_d_tensor_set_global_tpu_array(
470 (topology, name,), None)
471 if _result is not NotImplemented:
472 return _result
473 return d_tensor_set_global_tpu_array_eager_fallback(
474 topology, name=name, ctx=_ctx)
475 except _core._SymbolicException:
476 pass # Add nodes to the TensorFlow graph.
477 except (TypeError, ValueError):
478 _result = _dispatch.dispatch(
479 d_tensor_set_global_tpu_array, (), dict(topology=topology,
480 name=name)
481 )
482 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
483 return _result
484 raise
485 else:
486 _result = _dispatcher_for_d_tensor_set_global_tpu_array(
487 (topology, name,), None)
488 if _result is not NotImplemented:
489 return _result
490 # Add nodes to the TensorFlow graph.
491 try:
492 _, _, _op, _outputs = _op_def_library._apply_op_helper(
493 "DTensorSetGlobalTPUArray", topology=topology, name=name)
494 except (TypeError, ValueError):
495 _result = _dispatch.dispatch(
496 d_tensor_set_global_tpu_array, (), dict(topology=topology,
497 name=name)
498 )
499 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
500 return _result
501 raise
502 return _op
503DTensorSetGlobalTPUArray = tf_export("raw_ops.DTensorSetGlobalTPUArray")(_ops.to_raw_op(d_tensor_set_global_tpu_array))
504_dispatcher_for_d_tensor_set_global_tpu_array = d_tensor_set_global_tpu_array._tf_type_based_dispatcher.Dispatch
507def d_tensor_set_global_tpu_array_eager_fallback(topology, name, ctx):
508 topology = _ops.convert_to_tensor(topology, _dtypes.string)
509 _inputs_flat = [topology]
510 _attrs = None
511 _result = _execute.execute(b"DTensorSetGlobalTPUArray", 0,
512 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
513 name=name)
514 _result = None
515 return _result
518@_dispatch.add_fallback_dispatch_list
519@_dispatch.add_type_based_api_dispatcher
520@tf_export('relayout')
521def relayout(input, layout, name=None):
522 r"""TODO: add doc.
524 Args:
525 input: A `Tensor`.
526 layout: A `string`.
527 name: A name for the operation (optional).
529 Returns:
530 A `Tensor`. Has the same type as `input`.
531 """
532 _ctx = _context._context or _context.context()
533 tld = _ctx._thread_local_data
534 if tld.is_eager:
535 try:
536 _result = pywrap_tfe.TFE_Py_FastPathExecute(
537 _ctx, "Relayout", name, input, "layout", layout)
538 return _result
539 except _core._NotOkStatusException as e:
540 _ops.raise_from_not_ok_status(e, name)
541 except _core._FallbackException:
542 pass
543 try:
544 _result = _dispatcher_for_relayout(
545 (input, layout, name,), None)
546 if _result is not NotImplemented:
547 return _result
548 return relayout_eager_fallback(
549 input, layout=layout, name=name, ctx=_ctx)
550 except _core._SymbolicException:
551 pass # Add nodes to the TensorFlow graph.
552 except (TypeError, ValueError):
553 _result = _dispatch.dispatch(
554 relayout, (), dict(input=input, layout=layout, name=name)
555 )
556 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
557 return _result
558 raise
559 else:
560 _result = _dispatcher_for_relayout(
561 (input, layout, name,), None)
562 if _result is not NotImplemented:
563 return _result
564 # Add nodes to the TensorFlow graph.
565 layout = _execute.make_str(layout, "layout")
566 try:
567 _, _, _op, _outputs = _op_def_library._apply_op_helper(
568 "Relayout", input=input, layout=layout, name=name)
569 except (TypeError, ValueError):
570 _result = _dispatch.dispatch(
571 relayout, (), dict(input=input, layout=layout, name=name)
572 )
573 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
574 return _result
575 raise
576 _result = _outputs[:]
577 if _execute.must_record_gradient():
578 _attrs = ("layout", _op.get_attr("layout"), "T", _op._get_attr_type("T"))
579 _inputs_flat = _op.inputs
580 _execute.record_gradient(
581 "Relayout", _inputs_flat, _attrs, _result)
582 _result, = _result
583 return _result
585Relayout = tf_export("raw_ops.Relayout")(_ops.to_raw_op(relayout))
586_dispatcher_for_relayout = relayout._tf_type_based_dispatcher.Dispatch
589def relayout_eager_fallback(input, layout, name, ctx):
590 layout = _execute.make_str(layout, "layout")
591 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [])
592 _inputs_flat = [input]
593 _attrs = ("layout", layout, "T", _attr_T)
594 _result = _execute.execute(b"Relayout", 1, inputs=_inputs_flat,
595 attrs=_attrs, ctx=ctx, name=name)
596 if _execute.must_record_gradient():
597 _execute.record_gradient(
598 "Relayout", _inputs_flat, _attrs, _result)
599 _result, = _result
600 return _result
603@_dispatch.add_fallback_dispatch_list
604@_dispatch.add_type_based_api_dispatcher
605@tf_export('relayout_grad')
606def relayout_grad(input, forward_input, name=None):
607 r"""TODO: add doc.
609 Args:
610 input: A `Tensor`.
611 forward_input: A `Tensor`. Must have the same type as `input`.
612 name: A name for the operation (optional).
614 Returns:
615 A `Tensor`. Has the same type as `input`.
616 """
617 _ctx = _context._context or _context.context()
618 tld = _ctx._thread_local_data
619 if tld.is_eager:
620 try:
621 _result = pywrap_tfe.TFE_Py_FastPathExecute(
622 _ctx, "RelayoutGrad", name, input, forward_input)
623 return _result
624 except _core._NotOkStatusException as e:
625 _ops.raise_from_not_ok_status(e, name)
626 except _core._FallbackException:
627 pass
628 try:
629 _result = _dispatcher_for_relayout_grad(
630 (input, forward_input, name,), None)
631 if _result is not NotImplemented:
632 return _result
633 return relayout_grad_eager_fallback(
634 input, forward_input, name=name, ctx=_ctx)
635 except _core._SymbolicException:
636 pass # Add nodes to the TensorFlow graph.
637 except (TypeError, ValueError):
638 _result = _dispatch.dispatch(
639 relayout_grad, (), dict(input=input, forward_input=forward_input,
640 name=name)
641 )
642 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
643 return _result
644 raise
645 else:
646 _result = _dispatcher_for_relayout_grad(
647 (input, forward_input, name,), None)
648 if _result is not NotImplemented:
649 return _result
650 # Add nodes to the TensorFlow graph.
651 try:
652 _, _, _op, _outputs = _op_def_library._apply_op_helper(
653 "RelayoutGrad", input=input, forward_input=forward_input, name=name)
654 except (TypeError, ValueError):
655 _result = _dispatch.dispatch(
656 relayout_grad, (), dict(input=input, forward_input=forward_input,
657 name=name)
658 )
659 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
660 return _result
661 raise
662 _result = _outputs[:]
663 if _execute.must_record_gradient():
664 _attrs = ("T", _op._get_attr_type("T"))
665 _inputs_flat = _op.inputs
666 _execute.record_gradient(
667 "RelayoutGrad", _inputs_flat, _attrs, _result)
668 _result, = _result
669 return _result
671RelayoutGrad = tf_export("raw_ops.RelayoutGrad")(_ops.to_raw_op(relayout_grad))
672_dispatcher_for_relayout_grad = relayout_grad._tf_type_based_dispatcher.Dispatch
675def relayout_grad_eager_fallback(input, forward_input, name, ctx):
676 _attr_T, _inputs_T = _execute.args_to_matching_eager([input, forward_input], ctx, [])
677 (input, forward_input) = _inputs_T
678 _inputs_flat = [input, forward_input]
679 _attrs = ("T", _attr_T)
680 _result = _execute.execute(b"RelayoutGrad", 1, inputs=_inputs_flat,
681 attrs=_attrs, ctx=ctx, name=name)
682 if _execute.must_record_gradient():
683 _execute.record_gradient(
684 "RelayoutGrad", _inputs_flat, _attrs, _result)
685 _result, = _result
686 return _result
689@_dispatch.add_fallback_dispatch_list
690@_dispatch.add_type_based_api_dispatcher
691@tf_export('shutdown_tpu_system')
692def shutdown_tpu_system(name=None):
693 r"""TODO: add doc.
695 Args:
696 name: A name for the operation (optional).
698 Returns:
699 A `Tensor` of type `bool`.
700 """
701 _ctx = _context._context or _context.context()
702 tld = _ctx._thread_local_data
703 if tld.is_eager:
704 try:
705 _result = pywrap_tfe.TFE_Py_FastPathExecute(
706 _ctx, "ShutdownTPUSystem", name)
707 return _result
708 except _core._NotOkStatusException as e:
709 _ops.raise_from_not_ok_status(e, name)
710 except _core._FallbackException:
711 pass
712 try:
713 _result = _dispatcher_for_shutdown_tpu_system(
714 (name,), None)
715 if _result is not NotImplemented:
716 return _result
717 return shutdown_tpu_system_eager_fallback(
718 name=name, ctx=_ctx)
719 except _core._SymbolicException:
720 pass # Add nodes to the TensorFlow graph.
721 except (TypeError, ValueError):
722 _result = _dispatch.dispatch(
723 shutdown_tpu_system, (), dict(name=name)
724 )
725 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
726 return _result
727 raise
728 else:
729 _result = _dispatcher_for_shutdown_tpu_system(
730 (name,), None)
731 if _result is not NotImplemented:
732 return _result
733 # Add nodes to the TensorFlow graph.
734 try:
735 _, _, _op, _outputs = _op_def_library._apply_op_helper(
736 "ShutdownTPUSystem", name=name)
737 except (TypeError, ValueError):
738 _result = _dispatch.dispatch(
739 shutdown_tpu_system, (), dict(name=name)
740 )
741 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
742 return _result
743 raise
744 _result = _outputs[:]
745 if _execute.must_record_gradient():
746 _attrs = ()
747 _inputs_flat = _op.inputs
748 _execute.record_gradient(
749 "ShutdownTPUSystem", _inputs_flat, _attrs, _result)
750 _result, = _result
751 return _result
753ShutdownTPUSystem = tf_export("raw_ops.ShutdownTPUSystem")(_ops.to_raw_op(shutdown_tpu_system))
754_dispatcher_for_shutdown_tpu_system = shutdown_tpu_system._tf_type_based_dispatcher.Dispatch
757def shutdown_tpu_system_eager_fallback(name, ctx):
758 _inputs_flat = []
759 _attrs = None
760 _result = _execute.execute(b"ShutdownTPUSystem", 1, inputs=_inputs_flat,
761 attrs=_attrs, ctx=ctx, name=name)
762 if _execute.must_record_gradient():
763 _execute.record_gradient(
764 "ShutdownTPUSystem", _inputs_flat, _attrs, _result)
765 _result, = _result
766 return _result