Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_linalg_ops.py: 11%
1366 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1"""Python wrappers around TensorFlow ops.
3This file is MACHINE GENERATED! Do not edit.
4"""
6import collections
8from tensorflow.python import pywrap_tfe as pywrap_tfe
9from tensorflow.python.eager import context as _context
10from tensorflow.python.eager import core as _core
11from tensorflow.python.eager import execute as _execute
12from tensorflow.python.framework import dtypes as _dtypes
13from tensorflow.security.fuzzing.py import annotation_types as _atypes
15from tensorflow.python.framework import op_def_registry as _op_def_registry
16from tensorflow.python.framework import ops as _ops
17from tensorflow.python.framework import op_def_library as _op_def_library
18from tensorflow.python.util.deprecation import deprecated_endpoints
19from tensorflow.python.util import dispatch as _dispatch
20from tensorflow.python.util.tf_export import tf_export
22from typing import TypeVar
24def banded_triangular_solve(matrix, rhs, lower=True, adjoint=False, name=None):
25 r"""TODO: add doc.
27 Args:
28 matrix: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
29 rhs: A `Tensor`. Must have the same type as `matrix`.
30 lower: An optional `bool`. Defaults to `True`.
31 adjoint: An optional `bool`. Defaults to `False`.
32 name: A name for the operation (optional).
34 Returns:
35 A `Tensor`. Has the same type as `matrix`.
36 """
37 _ctx = _context._context or _context.context()
38 tld = _ctx._thread_local_data
39 if tld.is_eager:
40 try:
41 _result = pywrap_tfe.TFE_Py_FastPathExecute(
42 _ctx, "BandedTriangularSolve", name, matrix, rhs, "lower", lower,
43 "adjoint", adjoint)
44 return _result
45 except _core._NotOkStatusException as e:
46 _ops.raise_from_not_ok_status(e, name)
47 except _core._FallbackException:
48 pass
49 try:
50 return banded_triangular_solve_eager_fallback(
51 matrix, rhs, lower=lower, adjoint=adjoint, name=name, ctx=_ctx)
52 except _core._SymbolicException:
53 pass # Add nodes to the TensorFlow graph.
54 # Add nodes to the TensorFlow graph.
55 if lower is None:
56 lower = True
57 lower = _execute.make_bool(lower, "lower")
58 if adjoint is None:
59 adjoint = False
60 adjoint = _execute.make_bool(adjoint, "adjoint")
61 _, _, _op, _outputs = _op_def_library._apply_op_helper(
62 "BandedTriangularSolve", matrix=matrix, rhs=rhs, lower=lower,
63 adjoint=adjoint, name=name)
64 _result = _outputs[:]
65 if _execute.must_record_gradient():
66 _attrs = ("lower", _op._get_attr_bool("lower"), "adjoint",
67 _op._get_attr_bool("adjoint"), "T", _op._get_attr_type("T"))
68 _inputs_flat = _op.inputs
69 _execute.record_gradient(
70 "BandedTriangularSolve", _inputs_flat, _attrs, _result)
71 _result, = _result
72 return _result
74BandedTriangularSolve = tf_export("raw_ops.BandedTriangularSolve")(_ops.to_raw_op(banded_triangular_solve))
77def banded_triangular_solve_eager_fallback(matrix, rhs, lower, adjoint, name, ctx):
78 if lower is None:
79 lower = True
80 lower = _execute.make_bool(lower, "lower")
81 if adjoint is None:
82 adjoint = False
83 adjoint = _execute.make_bool(adjoint, "adjoint")
84 _attr_T, _inputs_T = _execute.args_to_matching_eager([matrix, rhs], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
85 (matrix, rhs) = _inputs_T
86 _inputs_flat = [matrix, rhs]
87 _attrs = ("lower", lower, "adjoint", adjoint, "T", _attr_T)
88 _result = _execute.execute(b"BandedTriangularSolve", 1, inputs=_inputs_flat,
89 attrs=_attrs, ctx=ctx, name=name)
90 if _execute.must_record_gradient():
91 _execute.record_gradient(
92 "BandedTriangularSolve", _inputs_flat, _attrs, _result)
93 _result, = _result
94 return _result
97def batch_cholesky(input, name=None):
98 r"""TODO: add doc.
100 Args:
101 input: A `Tensor`. Must be one of the following types: `float64`, `float32`.
102 name: A name for the operation (optional).
104 Returns:
105 A `Tensor`. Has the same type as `input`.
106 """
107 _ctx = _context._context or _context.context()
108 tld = _ctx._thread_local_data
109 if tld.is_eager:
110 try:
111 _result = pywrap_tfe.TFE_Py_FastPathExecute(
112 _ctx, "BatchCholesky", name, input)
113 return _result
114 except _core._NotOkStatusException as e:
115 _ops.raise_from_not_ok_status(e, name)
116 except _core._FallbackException:
117 pass
118 try:
119 return batch_cholesky_eager_fallback(
120 input, name=name, ctx=_ctx)
121 except _core._SymbolicException:
122 pass # Add nodes to the TensorFlow graph.
123 # Add nodes to the TensorFlow graph.
124 _, _, _op, _outputs = _op_def_library._apply_op_helper(
125 "BatchCholesky", input=input, name=name)
126 _result = _outputs[:]
127 if _execute.must_record_gradient():
128 _attrs = ("T", _op._get_attr_type("T"))
129 _inputs_flat = _op.inputs
130 _execute.record_gradient(
131 "BatchCholesky", _inputs_flat, _attrs, _result)
132 _result, = _result
133 return _result
135BatchCholesky = tf_export("raw_ops.BatchCholesky")(_ops.to_raw_op(batch_cholesky))
138def batch_cholesky_eager_fallback(input, name, ctx):
139 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, ])
140 _inputs_flat = [input]
141 _attrs = ("T", _attr_T)
142 _result = _execute.execute(b"BatchCholesky", 1, inputs=_inputs_flat,
143 attrs=_attrs, ctx=ctx, name=name)
144 if _execute.must_record_gradient():
145 _execute.record_gradient(
146 "BatchCholesky", _inputs_flat, _attrs, _result)
147 _result, = _result
148 return _result
151def batch_cholesky_grad(l, grad, name=None):
152 r"""TODO: add doc.
154 Args:
155 l: A `Tensor`. Must be one of the following types: `float32`, `float64`.
156 grad: A `Tensor`. Must have the same type as `l`.
157 name: A name for the operation (optional).
159 Returns:
160 A `Tensor`. Has the same type as `l`.
161 """
162 _ctx = _context._context or _context.context()
163 tld = _ctx._thread_local_data
164 if tld.is_eager:
165 try:
166 _result = pywrap_tfe.TFE_Py_FastPathExecute(
167 _ctx, "BatchCholeskyGrad", name, l, grad)
168 return _result
169 except _core._NotOkStatusException as e:
170 _ops.raise_from_not_ok_status(e, name)
171 except _core._FallbackException:
172 pass
173 try:
174 return batch_cholesky_grad_eager_fallback(
175 l, grad, name=name, ctx=_ctx)
176 except _core._SymbolicException:
177 pass # Add nodes to the TensorFlow graph.
178 # Add nodes to the TensorFlow graph.
179 _, _, _op, _outputs = _op_def_library._apply_op_helper(
180 "BatchCholeskyGrad", l=l, grad=grad, name=name)
181 _result = _outputs[:]
182 if _execute.must_record_gradient():
183 _attrs = ("T", _op._get_attr_type("T"))
184 _inputs_flat = _op.inputs
185 _execute.record_gradient(
186 "BatchCholeskyGrad", _inputs_flat, _attrs, _result)
187 _result, = _result
188 return _result
190BatchCholeskyGrad = tf_export("raw_ops.BatchCholeskyGrad")(_ops.to_raw_op(batch_cholesky_grad))
193def batch_cholesky_grad_eager_fallback(l, grad, name, ctx):
194 _attr_T, _inputs_T = _execute.args_to_matching_eager([l, grad], ctx, [_dtypes.float32, _dtypes.float64, ])
195 (l, grad) = _inputs_T
196 _inputs_flat = [l, grad]
197 _attrs = ("T", _attr_T)
198 _result = _execute.execute(b"BatchCholeskyGrad", 1, inputs=_inputs_flat,
199 attrs=_attrs, ctx=ctx, name=name)
200 if _execute.must_record_gradient():
201 _execute.record_gradient(
202 "BatchCholeskyGrad", _inputs_flat, _attrs, _result)
203 _result, = _result
204 return _result
207def batch_matrix_determinant(input, name=None):
208 r"""TODO: add doc.
210 Args:
211 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `complex64`, `complex128`.
212 name: A name for the operation (optional).
214 Returns:
215 A `Tensor`. Has the same type as `input`.
216 """
217 _ctx = _context._context or _context.context()
218 tld = _ctx._thread_local_data
219 if tld.is_eager:
220 try:
221 _result = pywrap_tfe.TFE_Py_FastPathExecute(
222 _ctx, "BatchMatrixDeterminant", name, input)
223 return _result
224 except _core._NotOkStatusException as e:
225 _ops.raise_from_not_ok_status(e, name)
226 except _core._FallbackException:
227 pass
228 try:
229 return batch_matrix_determinant_eager_fallback(
230 input, name=name, ctx=_ctx)
231 except _core._SymbolicException:
232 pass # Add nodes to the TensorFlow graph.
233 # Add nodes to the TensorFlow graph.
234 _, _, _op, _outputs = _op_def_library._apply_op_helper(
235 "BatchMatrixDeterminant", input=input, name=name)
236 _result = _outputs[:]
237 if _execute.must_record_gradient():
238 _attrs = ("T", _op._get_attr_type("T"))
239 _inputs_flat = _op.inputs
240 _execute.record_gradient(
241 "BatchMatrixDeterminant", _inputs_flat, _attrs, _result)
242 _result, = _result
243 return _result
245BatchMatrixDeterminant = tf_export("raw_ops.BatchMatrixDeterminant")(_ops.to_raw_op(batch_matrix_determinant))
248def batch_matrix_determinant_eager_fallback(input, name, ctx):
249 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ])
250 _inputs_flat = [input]
251 _attrs = ("T", _attr_T)
252 _result = _execute.execute(b"BatchMatrixDeterminant", 1,
253 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
254 name=name)
255 if _execute.must_record_gradient():
256 _execute.record_gradient(
257 "BatchMatrixDeterminant", _inputs_flat, _attrs, _result)
258 _result, = _result
259 return _result
262def batch_matrix_inverse(input, adjoint=False, name=None):
263 r"""TODO: add doc.
265 Args:
266 input: A `Tensor`. Must be one of the following types: `float64`, `float32`.
267 adjoint: An optional `bool`. Defaults to `False`.
268 name: A name for the operation (optional).
270 Returns:
271 A `Tensor`. Has the same type as `input`.
272 """
273 _ctx = _context._context or _context.context()
274 tld = _ctx._thread_local_data
275 if tld.is_eager:
276 try:
277 _result = pywrap_tfe.TFE_Py_FastPathExecute(
278 _ctx, "BatchMatrixInverse", name, input, "adjoint", adjoint)
279 return _result
280 except _core._NotOkStatusException as e:
281 _ops.raise_from_not_ok_status(e, name)
282 except _core._FallbackException:
283 pass
284 try:
285 return batch_matrix_inverse_eager_fallback(
286 input, adjoint=adjoint, name=name, ctx=_ctx)
287 except _core._SymbolicException:
288 pass # Add nodes to the TensorFlow graph.
289 # Add nodes to the TensorFlow graph.
290 if adjoint is None:
291 adjoint = False
292 adjoint = _execute.make_bool(adjoint, "adjoint")
293 _, _, _op, _outputs = _op_def_library._apply_op_helper(
294 "BatchMatrixInverse", input=input, adjoint=adjoint, name=name)
295 _result = _outputs[:]
296 if _execute.must_record_gradient():
297 _attrs = ("adjoint", _op._get_attr_bool("adjoint"), "T",
298 _op._get_attr_type("T"))
299 _inputs_flat = _op.inputs
300 _execute.record_gradient(
301 "BatchMatrixInverse", _inputs_flat, _attrs, _result)
302 _result, = _result
303 return _result
305BatchMatrixInverse = tf_export("raw_ops.BatchMatrixInverse")(_ops.to_raw_op(batch_matrix_inverse))
308def batch_matrix_inverse_eager_fallback(input, adjoint, name, ctx):
309 if adjoint is None:
310 adjoint = False
311 adjoint = _execute.make_bool(adjoint, "adjoint")
312 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, ])
313 _inputs_flat = [input]
314 _attrs = ("adjoint", adjoint, "T", _attr_T)
315 _result = _execute.execute(b"BatchMatrixInverse", 1, inputs=_inputs_flat,
316 attrs=_attrs, ctx=ctx, name=name)
317 if _execute.must_record_gradient():
318 _execute.record_gradient(
319 "BatchMatrixInverse", _inputs_flat, _attrs, _result)
320 _result, = _result
321 return _result
324def batch_matrix_solve(matrix, rhs, adjoint=False, name=None):
325 r"""TODO: add doc.
327 Args:
328 matrix: A `Tensor`. Must be one of the following types: `float64`, `float32`.
329 rhs: A `Tensor`. Must have the same type as `matrix`.
330 adjoint: An optional `bool`. Defaults to `False`.
331 name: A name for the operation (optional).
333 Returns:
334 A `Tensor`. Has the same type as `matrix`.
335 """
336 _ctx = _context._context or _context.context()
337 tld = _ctx._thread_local_data
338 if tld.is_eager:
339 try:
340 _result = pywrap_tfe.TFE_Py_FastPathExecute(
341 _ctx, "BatchMatrixSolve", name, matrix, rhs, "adjoint", adjoint)
342 return _result
343 except _core._NotOkStatusException as e:
344 _ops.raise_from_not_ok_status(e, name)
345 except _core._FallbackException:
346 pass
347 try:
348 return batch_matrix_solve_eager_fallback(
349 matrix, rhs, adjoint=adjoint, name=name, ctx=_ctx)
350 except _core._SymbolicException:
351 pass # Add nodes to the TensorFlow graph.
352 # Add nodes to the TensorFlow graph.
353 if adjoint is None:
354 adjoint = False
355 adjoint = _execute.make_bool(adjoint, "adjoint")
356 _, _, _op, _outputs = _op_def_library._apply_op_helper(
357 "BatchMatrixSolve", matrix=matrix, rhs=rhs, adjoint=adjoint,
358 name=name)
359 _result = _outputs[:]
360 if _execute.must_record_gradient():
361 _attrs = ("adjoint", _op._get_attr_bool("adjoint"), "T",
362 _op._get_attr_type("T"))
363 _inputs_flat = _op.inputs
364 _execute.record_gradient(
365 "BatchMatrixSolve", _inputs_flat, _attrs, _result)
366 _result, = _result
367 return _result
369BatchMatrixSolve = tf_export("raw_ops.BatchMatrixSolve")(_ops.to_raw_op(batch_matrix_solve))
372def batch_matrix_solve_eager_fallback(matrix, rhs, adjoint, name, ctx):
373 if adjoint is None:
374 adjoint = False
375 adjoint = _execute.make_bool(adjoint, "adjoint")
376 _attr_T, _inputs_T = _execute.args_to_matching_eager([matrix, rhs], ctx, [_dtypes.float64, _dtypes.float32, ])
377 (matrix, rhs) = _inputs_T
378 _inputs_flat = [matrix, rhs]
379 _attrs = ("adjoint", adjoint, "T", _attr_T)
380 _result = _execute.execute(b"BatchMatrixSolve", 1, inputs=_inputs_flat,
381 attrs=_attrs, ctx=ctx, name=name)
382 if _execute.must_record_gradient():
383 _execute.record_gradient(
384 "BatchMatrixSolve", _inputs_flat, _attrs, _result)
385 _result, = _result
386 return _result
389def batch_matrix_solve_ls(matrix, rhs, l2_regularizer, fast=True, name=None):
390 r"""TODO: add doc.
392 Args:
393 matrix: A `Tensor`. Must be one of the following types: `float64`, `float32`.
394 rhs: A `Tensor`. Must have the same type as `matrix`.
395 l2_regularizer: A `Tensor` of type `float64`.
396 fast: An optional `bool`. Defaults to `True`.
397 name: A name for the operation (optional).
399 Returns:
400 A `Tensor`. Has the same type as `matrix`.
401 """
402 _ctx = _context._context or _context.context()
403 tld = _ctx._thread_local_data
404 if tld.is_eager:
405 try:
406 _result = pywrap_tfe.TFE_Py_FastPathExecute(
407 _ctx, "BatchMatrixSolveLs", name, matrix, rhs, l2_regularizer, "fast",
408 fast)
409 return _result
410 except _core._NotOkStatusException as e:
411 _ops.raise_from_not_ok_status(e, name)
412 except _core._FallbackException:
413 pass
414 try:
415 return batch_matrix_solve_ls_eager_fallback(
416 matrix, rhs, l2_regularizer, fast=fast, name=name, ctx=_ctx)
417 except _core._SymbolicException:
418 pass # Add nodes to the TensorFlow graph.
419 # Add nodes to the TensorFlow graph.
420 if fast is None:
421 fast = True
422 fast = _execute.make_bool(fast, "fast")
423 _, _, _op, _outputs = _op_def_library._apply_op_helper(
424 "BatchMatrixSolveLs", matrix=matrix, rhs=rhs,
425 l2_regularizer=l2_regularizer, fast=fast,
426 name=name)
427 _result = _outputs[:]
428 if _execute.must_record_gradient():
429 _attrs = ("T", _op._get_attr_type("T"), "fast",
430 _op._get_attr_bool("fast"))
431 _inputs_flat = _op.inputs
432 _execute.record_gradient(
433 "BatchMatrixSolveLs", _inputs_flat, _attrs, _result)
434 _result, = _result
435 return _result
437BatchMatrixSolveLs = tf_export("raw_ops.BatchMatrixSolveLs")(_ops.to_raw_op(batch_matrix_solve_ls))
440def batch_matrix_solve_ls_eager_fallback(matrix, rhs, l2_regularizer, fast, name, ctx):
441 if fast is None:
442 fast = True
443 fast = _execute.make_bool(fast, "fast")
444 _attr_T, _inputs_T = _execute.args_to_matching_eager([matrix, rhs], ctx, [_dtypes.float64, _dtypes.float32, ])
445 (matrix, rhs) = _inputs_T
446 l2_regularizer = _ops.convert_to_tensor(l2_regularizer, _dtypes.float64)
447 _inputs_flat = [matrix, rhs, l2_regularizer]
448 _attrs = ("T", _attr_T, "fast", fast)
449 _result = _execute.execute(b"BatchMatrixSolveLs", 1, inputs=_inputs_flat,
450 attrs=_attrs, ctx=ctx, name=name)
451 if _execute.must_record_gradient():
452 _execute.record_gradient(
453 "BatchMatrixSolveLs", _inputs_flat, _attrs, _result)
454 _result, = _result
455 return _result
458def batch_matrix_triangular_solve(matrix, rhs, lower=True, adjoint=False, name=None):
459 r"""TODO: add doc.
461 Args:
462 matrix: A `Tensor`. Must be one of the following types: `float64`, `float32`.
463 rhs: A `Tensor`. Must have the same type as `matrix`.
464 lower: An optional `bool`. Defaults to `True`.
465 adjoint: An optional `bool`. Defaults to `False`.
466 name: A name for the operation (optional).
468 Returns:
469 A `Tensor`. Has the same type as `matrix`.
470 """
471 _ctx = _context._context or _context.context()
472 tld = _ctx._thread_local_data
473 if tld.is_eager:
474 try:
475 _result = pywrap_tfe.TFE_Py_FastPathExecute(
476 _ctx, "BatchMatrixTriangularSolve", name, matrix, rhs, "lower", lower,
477 "adjoint", adjoint)
478 return _result
479 except _core._NotOkStatusException as e:
480 _ops.raise_from_not_ok_status(e, name)
481 except _core._FallbackException:
482 pass
483 try:
484 return batch_matrix_triangular_solve_eager_fallback(
485 matrix, rhs, lower=lower, adjoint=adjoint, name=name, ctx=_ctx)
486 except _core._SymbolicException:
487 pass # Add nodes to the TensorFlow graph.
488 # Add nodes to the TensorFlow graph.
489 if lower is None:
490 lower = True
491 lower = _execute.make_bool(lower, "lower")
492 if adjoint is None:
493 adjoint = False
494 adjoint = _execute.make_bool(adjoint, "adjoint")
495 _, _, _op, _outputs = _op_def_library._apply_op_helper(
496 "BatchMatrixTriangularSolve", matrix=matrix, rhs=rhs, lower=lower,
497 adjoint=adjoint, name=name)
498 _result = _outputs[:]
499 if _execute.must_record_gradient():
500 _attrs = ("lower", _op._get_attr_bool("lower"), "adjoint",
501 _op._get_attr_bool("adjoint"), "T", _op._get_attr_type("T"))
502 _inputs_flat = _op.inputs
503 _execute.record_gradient(
504 "BatchMatrixTriangularSolve", _inputs_flat, _attrs, _result)
505 _result, = _result
506 return _result
508BatchMatrixTriangularSolve = tf_export("raw_ops.BatchMatrixTriangularSolve")(_ops.to_raw_op(batch_matrix_triangular_solve))
511def batch_matrix_triangular_solve_eager_fallback(matrix, rhs, lower, adjoint, name, ctx):
512 if lower is None:
513 lower = True
514 lower = _execute.make_bool(lower, "lower")
515 if adjoint is None:
516 adjoint = False
517 adjoint = _execute.make_bool(adjoint, "adjoint")
518 _attr_T, _inputs_T = _execute.args_to_matching_eager([matrix, rhs], ctx, [_dtypes.float64, _dtypes.float32, ])
519 (matrix, rhs) = _inputs_T
520 _inputs_flat = [matrix, rhs]
521 _attrs = ("lower", lower, "adjoint", adjoint, "T", _attr_T)
522 _result = _execute.execute(b"BatchMatrixTriangularSolve", 1,
523 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
524 name=name)
525 if _execute.must_record_gradient():
526 _execute.record_gradient(
527 "BatchMatrixTriangularSolve", _inputs_flat, _attrs, _result)
528 _result, = _result
529 return _result
532def batch_self_adjoint_eig(input, name=None):
533 r"""TODO: add doc.
535 Args:
536 input: A `Tensor`. Must be one of the following types: `float64`, `float32`.
537 name: A name for the operation (optional).
539 Returns:
540 A `Tensor`. Has the same type as `input`.
541 """
542 _ctx = _context._context or _context.context()
543 tld = _ctx._thread_local_data
544 if tld.is_eager:
545 try:
546 _result = pywrap_tfe.TFE_Py_FastPathExecute(
547 _ctx, "BatchSelfAdjointEig", name, input)
548 return _result
549 except _core._NotOkStatusException as e:
550 _ops.raise_from_not_ok_status(e, name)
551 except _core._FallbackException:
552 pass
553 try:
554 return batch_self_adjoint_eig_eager_fallback(
555 input, name=name, ctx=_ctx)
556 except _core._SymbolicException:
557 pass # Add nodes to the TensorFlow graph.
558 # Add nodes to the TensorFlow graph.
559 _, _, _op, _outputs = _op_def_library._apply_op_helper(
560 "BatchSelfAdjointEig", input=input, name=name)
561 _result = _outputs[:]
562 if _execute.must_record_gradient():
563 _attrs = ("T", _op._get_attr_type("T"))
564 _inputs_flat = _op.inputs
565 _execute.record_gradient(
566 "BatchSelfAdjointEig", _inputs_flat, _attrs, _result)
567 _result, = _result
568 return _result
570BatchSelfAdjointEig = tf_export("raw_ops.BatchSelfAdjointEig")(_ops.to_raw_op(batch_self_adjoint_eig))
573def batch_self_adjoint_eig_eager_fallback(input, name, ctx):
574 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, ])
575 _inputs_flat = [input]
576 _attrs = ("T", _attr_T)
577 _result = _execute.execute(b"BatchSelfAdjointEig", 1, inputs=_inputs_flat,
578 attrs=_attrs, ctx=ctx, name=name)
579 if _execute.must_record_gradient():
580 _execute.record_gradient(
581 "BatchSelfAdjointEig", _inputs_flat, _attrs, _result)
582 _result, = _result
583 return _result
585_BatchSelfAdjointEigV2Output = collections.namedtuple(
586 "BatchSelfAdjointEigV2",
587 ["e", "v"])
590def batch_self_adjoint_eig_v2(input, compute_v=True, name=None):
591 r"""TODO: add doc.
593 Args:
594 input: A `Tensor`. Must be one of the following types: `float64`, `float32`.
595 compute_v: An optional `bool`. Defaults to `True`.
596 name: A name for the operation (optional).
598 Returns:
599 A tuple of `Tensor` objects (e, v).
601 e: A `Tensor`. Has the same type as `input`.
602 v: A `Tensor`. Has the same type as `input`.
603 """
604 _ctx = _context._context or _context.context()
605 tld = _ctx._thread_local_data
606 if tld.is_eager:
607 try:
608 _result = pywrap_tfe.TFE_Py_FastPathExecute(
609 _ctx, "BatchSelfAdjointEigV2", name, input, "compute_v", compute_v)
610 _result = _BatchSelfAdjointEigV2Output._make(_result)
611 return _result
612 except _core._NotOkStatusException as e:
613 _ops.raise_from_not_ok_status(e, name)
614 except _core._FallbackException:
615 pass
616 try:
617 return batch_self_adjoint_eig_v2_eager_fallback(
618 input, compute_v=compute_v, name=name, ctx=_ctx)
619 except _core._SymbolicException:
620 pass # Add nodes to the TensorFlow graph.
621 # Add nodes to the TensorFlow graph.
622 if compute_v is None:
623 compute_v = True
624 compute_v = _execute.make_bool(compute_v, "compute_v")
625 _, _, _op, _outputs = _op_def_library._apply_op_helper(
626 "BatchSelfAdjointEigV2", input=input, compute_v=compute_v, name=name)
627 _result = _outputs[:]
628 if _execute.must_record_gradient():
629 _attrs = ("compute_v", _op._get_attr_bool("compute_v"), "T",
630 _op._get_attr_type("T"))
631 _inputs_flat = _op.inputs
632 _execute.record_gradient(
633 "BatchSelfAdjointEigV2", _inputs_flat, _attrs, _result)
634 _result = _BatchSelfAdjointEigV2Output._make(_result)
635 return _result
637BatchSelfAdjointEigV2 = tf_export("raw_ops.BatchSelfAdjointEigV2")(_ops.to_raw_op(batch_self_adjoint_eig_v2))
640def batch_self_adjoint_eig_v2_eager_fallback(input, compute_v, name, ctx):
641 if compute_v is None:
642 compute_v = True
643 compute_v = _execute.make_bool(compute_v, "compute_v")
644 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, ])
645 _inputs_flat = [input]
646 _attrs = ("compute_v", compute_v, "T", _attr_T)
647 _result = _execute.execute(b"BatchSelfAdjointEigV2", 2, inputs=_inputs_flat,
648 attrs=_attrs, ctx=ctx, name=name)
649 if _execute.must_record_gradient():
650 _execute.record_gradient(
651 "BatchSelfAdjointEigV2", _inputs_flat, _attrs, _result)
652 _result = _BatchSelfAdjointEigV2Output._make(_result)
653 return _result
655_BatchSvdOutput = collections.namedtuple(
656 "BatchSvd",
657 ["s", "u", "v"])
660def batch_svd(input, compute_uv=True, full_matrices=False, name=None):
661 r"""TODO: add doc.
663 Args:
664 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `complex64`, `complex128`.
665 compute_uv: An optional `bool`. Defaults to `True`.
666 full_matrices: An optional `bool`. Defaults to `False`.
667 name: A name for the operation (optional).
669 Returns:
670 A tuple of `Tensor` objects (s, u, v).
672 s: A `Tensor`. Has the same type as `input`.
673 u: A `Tensor`. Has the same type as `input`.
674 v: A `Tensor`. Has the same type as `input`.
675 """
676 _ctx = _context._context or _context.context()
677 tld = _ctx._thread_local_data
678 if tld.is_eager:
679 try:
680 _result = pywrap_tfe.TFE_Py_FastPathExecute(
681 _ctx, "BatchSvd", name, input, "compute_uv", compute_uv,
682 "full_matrices", full_matrices)
683 _result = _BatchSvdOutput._make(_result)
684 return _result
685 except _core._NotOkStatusException as e:
686 _ops.raise_from_not_ok_status(e, name)
687 except _core._FallbackException:
688 pass
689 try:
690 return batch_svd_eager_fallback(
691 input, compute_uv=compute_uv, full_matrices=full_matrices,
692 name=name, ctx=_ctx)
693 except _core._SymbolicException:
694 pass # Add nodes to the TensorFlow graph.
695 # Add nodes to the TensorFlow graph.
696 if compute_uv is None:
697 compute_uv = True
698 compute_uv = _execute.make_bool(compute_uv, "compute_uv")
699 if full_matrices is None:
700 full_matrices = False
701 full_matrices = _execute.make_bool(full_matrices, "full_matrices")
702 _, _, _op, _outputs = _op_def_library._apply_op_helper(
703 "BatchSvd", input=input, compute_uv=compute_uv,
704 full_matrices=full_matrices, name=name)
705 _result = _outputs[:]
706 if _execute.must_record_gradient():
707 _attrs = ("compute_uv", _op._get_attr_bool("compute_uv"), "full_matrices",
708 _op._get_attr_bool("full_matrices"), "T",
709 _op._get_attr_type("T"))
710 _inputs_flat = _op.inputs
711 _execute.record_gradient(
712 "BatchSvd", _inputs_flat, _attrs, _result)
713 _result = _BatchSvdOutput._make(_result)
714 return _result
716BatchSvd = tf_export("raw_ops.BatchSvd")(_ops.to_raw_op(batch_svd))
719def batch_svd_eager_fallback(input, compute_uv, full_matrices, name, ctx):
720 if compute_uv is None:
721 compute_uv = True
722 compute_uv = _execute.make_bool(compute_uv, "compute_uv")
723 if full_matrices is None:
724 full_matrices = False
725 full_matrices = _execute.make_bool(full_matrices, "full_matrices")
726 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.complex64, _dtypes.complex128, ])
727 _inputs_flat = [input]
728 _attrs = ("compute_uv", compute_uv, "full_matrices", full_matrices, "T",
729 _attr_T)
730 _result = _execute.execute(b"BatchSvd", 3, inputs=_inputs_flat,
731 attrs=_attrs, ctx=ctx, name=name)
732 if _execute.must_record_gradient():
733 _execute.record_gradient(
734 "BatchSvd", _inputs_flat, _attrs, _result)
735 _result = _BatchSvdOutput._make(_result)
736 return _result
739@_dispatch.add_fallback_dispatch_list
740@_dispatch.add_type_based_api_dispatcher
741@tf_export('linalg.cholesky', v1=['linalg.cholesky', 'cholesky'])
742@deprecated_endpoints('cholesky')
743def cholesky(input, name=None):
744 r"""Computes the Cholesky decomposition of one or more square matrices.
746 The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
747 form square matrices.
749 The input has to be symmetric and positive definite. Only the lower-triangular
750 part of the input will be used for this operation. The upper-triangular part
751 will not be read.
753 The output is a tensor of the same shape as the input
754 containing the Cholesky decompositions for all input submatrices `[..., :, :]`.
756 **Note**: The gradient computation on GPU is faster for large matrices but
757 not for large batch dimensions when the submatrices are small. In this
758 case it might be faster to use the CPU.
760 Args:
761 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
762 Shape is `[..., M, M]`.
763 name: A name for the operation (optional).
765 Returns:
766 A `Tensor`. Has the same type as `input`.
767 """
768 _ctx = _context._context or _context.context()
769 tld = _ctx._thread_local_data
770 if tld.is_eager:
771 try:
772 _result = pywrap_tfe.TFE_Py_FastPathExecute(
773 _ctx, "Cholesky", name, input)
774 return _result
775 except _core._NotOkStatusException as e:
776 _ops.raise_from_not_ok_status(e, name)
777 except _core._FallbackException:
778 pass
779 try:
780 _result = _dispatcher_for_cholesky(
781 (input, name,), None)
782 if _result is not NotImplemented:
783 return _result
784 return cholesky_eager_fallback(
785 input, name=name, ctx=_ctx)
786 except _core._SymbolicException:
787 pass # Add nodes to the TensorFlow graph.
788 except (TypeError, ValueError):
789 _result = _dispatch.dispatch(
790 cholesky, (), dict(input=input, name=name)
791 )
792 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
793 return _result
794 raise
795 else:
796 _result = _dispatcher_for_cholesky(
797 (input, name,), None)
798 if _result is not NotImplemented:
799 return _result
800 # Add nodes to the TensorFlow graph.
801 try:
802 _, _, _op, _outputs = _op_def_library._apply_op_helper(
803 "Cholesky", input=input, name=name)
804 except (TypeError, ValueError):
805 _result = _dispatch.dispatch(
806 cholesky, (), dict(input=input, name=name)
807 )
808 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
809 return _result
810 raise
811 _result = _outputs[:]
812 if _execute.must_record_gradient():
813 _attrs = ("T", _op._get_attr_type("T"))
814 _inputs_flat = _op.inputs
815 _execute.record_gradient(
816 "Cholesky", _inputs_flat, _attrs, _result)
817 _result, = _result
818 return _result
820Cholesky = tf_export("raw_ops.Cholesky")(_ops.to_raw_op(cholesky))
821_dispatcher_for_cholesky = cholesky._tf_type_based_dispatcher.Dispatch
824def cholesky_eager_fallback(input, name, ctx):
825 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
826 _inputs_flat = [input]
827 _attrs = ("T", _attr_T)
828 _result = _execute.execute(b"Cholesky", 1, inputs=_inputs_flat,
829 attrs=_attrs, ctx=ctx, name=name)
830 if _execute.must_record_gradient():
831 _execute.record_gradient(
832 "Cholesky", _inputs_flat, _attrs, _result)
833 _result, = _result
834 return _result
837def cholesky_grad(l, grad, name=None):
838 r"""Computes the reverse mode backpropagated gradient of the Cholesky algorithm.
840 For an explanation see "Differentiation of the Cholesky algorithm" by
841 Iain Murray http://arxiv.org/abs/1602.07527.
843 Args:
844 l: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
845 Output of batch Cholesky algorithm l = cholesky(A). Shape is `[..., M, M]`.
846 Algorithm depends only on lower triangular part of the innermost matrices of
847 this tensor.
848 grad: A `Tensor`. Must have the same type as `l`.
849 df/dl where f is some scalar function. Shape is `[..., M, M]`.
850 Algorithm depends only on lower triangular part of the innermost matrices of
851 this tensor.
852 name: A name for the operation (optional).
854 Returns:
855 A `Tensor`. Has the same type as `l`.
856 """
857 _ctx = _context._context or _context.context()
858 tld = _ctx._thread_local_data
859 if tld.is_eager:
860 try:
861 _result = pywrap_tfe.TFE_Py_FastPathExecute(
862 _ctx, "CholeskyGrad", name, l, grad)
863 return _result
864 except _core._NotOkStatusException as e:
865 _ops.raise_from_not_ok_status(e, name)
866 except _core._FallbackException:
867 pass
868 try:
869 return cholesky_grad_eager_fallback(
870 l, grad, name=name, ctx=_ctx)
871 except _core._SymbolicException:
872 pass # Add nodes to the TensorFlow graph.
873 # Add nodes to the TensorFlow graph.
874 _, _, _op, _outputs = _op_def_library._apply_op_helper(
875 "CholeskyGrad", l=l, grad=grad, name=name)
876 _result = _outputs[:]
877 if _execute.must_record_gradient():
878 _attrs = ("T", _op._get_attr_type("T"))
879 _inputs_flat = _op.inputs
880 _execute.record_gradient(
881 "CholeskyGrad", _inputs_flat, _attrs, _result)
882 _result, = _result
883 return _result
885CholeskyGrad = tf_export("raw_ops.CholeskyGrad")(_ops.to_raw_op(cholesky_grad))
888def cholesky_grad_eager_fallback(l, grad, name, ctx):
889 _attr_T, _inputs_T = _execute.args_to_matching_eager([l, grad], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ])
890 (l, grad) = _inputs_T
891 _inputs_flat = [l, grad]
892 _attrs = ("T", _attr_T)
893 _result = _execute.execute(b"CholeskyGrad", 1, inputs=_inputs_flat,
894 attrs=_attrs, ctx=ctx, name=name)
895 if _execute.must_record_gradient():
896 _execute.record_gradient(
897 "CholeskyGrad", _inputs_flat, _attrs, _result)
898 _result, = _result
899 return _result
901_EigOutput = collections.namedtuple(
902 "Eig",
903 ["e", "v"])
906def eig(input, Tout, compute_v=True, name=None):
907 r"""Computes the eigen decomposition of one or more square matrices.
909 Computes the eigenvalues and (optionally) right eigenvectors of each inner matrix in
910 `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
911 are sorted in non-decreasing order.
913 ```python
914 # a is a tensor.
915 # e is a tensor of eigenvalues.
916 # v is a tensor of eigenvectors.
917 e, v = eig(a)
918 e = eig(a, compute_v=False)
919 ```
921 Args:
922 input: A `Tensor`. Must be one of the following types: `float32`, `float64`, `complex64`, `complex128`.
923 `Tensor` input of shape `[N, N]`.
924 Tout: A `tf.DType` from: `tf.complex64, tf.complex128`.
925 compute_v: An optional `bool`. Defaults to `True`.
926 If `True` then eigenvectors will be computed and returned in `v`.
927 Otherwise, only the eigenvalues will be computed.
928 name: A name for the operation (optional).
930 Returns:
931 A tuple of `Tensor` objects (e, v).
933 e: A `Tensor` of type `Tout`.
934 v: A `Tensor` of type `Tout`.
935 """
936 _ctx = _context._context or _context.context()
937 tld = _ctx._thread_local_data
938 if tld.is_eager:
939 try:
940 _result = pywrap_tfe.TFE_Py_FastPathExecute(
941 _ctx, "Eig", name, input, "compute_v", compute_v, "Tout", Tout)
942 _result = _EigOutput._make(_result)
943 return _result
944 except _core._NotOkStatusException as e:
945 _ops.raise_from_not_ok_status(e, name)
946 except _core._FallbackException:
947 pass
948 try:
949 return eig_eager_fallback(
950 input, compute_v=compute_v, Tout=Tout, name=name, ctx=_ctx)
951 except _core._SymbolicException:
952 pass # Add nodes to the TensorFlow graph.
953 # Add nodes to the TensorFlow graph.
954 Tout = _execute.make_type(Tout, "Tout")
955 if compute_v is None:
956 compute_v = True
957 compute_v = _execute.make_bool(compute_v, "compute_v")
958 _, _, _op, _outputs = _op_def_library._apply_op_helper(
959 "Eig", input=input, Tout=Tout, compute_v=compute_v, name=name)
960 _result = _outputs[:]
961 if _execute.must_record_gradient():
962 _attrs = ("compute_v", _op._get_attr_bool("compute_v"), "T",
963 _op._get_attr_type("T"), "Tout", _op._get_attr_type("Tout"))
964 _inputs_flat = _op.inputs
965 _execute.record_gradient(
966 "Eig", _inputs_flat, _attrs, _result)
967 _result = _EigOutput._make(_result)
968 return _result
970Eig = tf_export("raw_ops.Eig")(_ops.to_raw_op(eig))
973def eig_eager_fallback(input, Tout, compute_v, name, ctx):
974 Tout = _execute.make_type(Tout, "Tout")
975 if compute_v is None:
976 compute_v = True
977 compute_v = _execute.make_bool(compute_v, "compute_v")
978 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ])
979 _inputs_flat = [input]
980 _attrs = ("compute_v", compute_v, "T", _attr_T, "Tout", Tout)
981 _result = _execute.execute(b"Eig", 2, inputs=_inputs_flat, attrs=_attrs,
982 ctx=ctx, name=name)
983 if _execute.must_record_gradient():
984 _execute.record_gradient(
985 "Eig", _inputs_flat, _attrs, _result)
986 _result = _EigOutput._make(_result)
987 return _result
990def einsum(inputs, equation, name=None):
991 r"""Tensor contraction according to Einstein summation convention.
993 Implements generalized Tensor contraction and reduction. Each input Tensor must
994 have a corresponding input subscript appearing in the comma-separated left-hand
995 side of the equation. The right-hand side of the equation consists of the
996 output subscript. The input subscripts and the output subscript should consist
997 of zero or more named axis labels and at most one ellipsis (`...`).
999 The named axis labels may be any single character other than those having
1000 special meaning, namely `,.->`. The behavior of this Op is undefined if it
1001 receives an ill-formatted equation; since the validation is done at
1002 graph-building time, we omit format validation checks at runtime.
1004 Note: This Op is *not* intended to be called by the user; instead users should
1005 call `tf.einsum` directly. It is a hidden Op used by `tf.einsum`.
1007 Operations are applied to the input(s) according to the following rules:
1009 (a) Generalized Diagonals: For input dimensions corresponding to axis labels
1010 appearing more than once in the same input subscript, we take the
1011 generalized (`k`-dimensional) diagonal.
1012 For example, in the equation `iii->i` with input shape `[3, 3, 3]`, the
1013 generalized diagonal would consist of `3` elements at indices `(0, 0, 0)`,
1014 `(1, 1, 1)` and `(2, 2, 2)` to create a Tensor of shape `[3]`.
1016 (b) Reduction: Axes corresponding to labels appearing only in one input
1017 subscript but not in the output subscript are summed over prior to Tensor
1018 contraction.
1019 For example, in the equation `ab,bc->b`, the axis labels `a` and `c` are
1020 the reduction axis labels.
1022 (c) Batch Dimensions: Axes corresponding to labels appearing in each of the
1023 input subscripts and also in the output subscript make up the batch
1024 dimensions in Tensor contraction. Unnamed axis labels corresponding to
1025 ellipsis (`...`) also correspond to batch dimensions.
1026 For example, for the equation denoting batch matrix multiplication,
1027 `bij,bjk->bik`, the axis label `b` corresponds to a batch dimension.
1029 (d) Contraction: In case of binary einsum, axes corresponding to labels
1030 appearing in two different inputs (and not in the output) are contracted
1031 against each other.
1032 Considering the batch matrix multiplication equation again
1033 (`bij,bjk->bik`), the contracted axis label is `j`.
1035 (e) Expand Diagonal: If the output subscripts contain repeated (explicit) axis
1036 labels, the opposite operation of (a) is applied. For example, in the
1037 equation `i->iii`, and input shape `[3]`, the output of shape `[3, 3, 3]`
1038 are all zeros, except for the (generalized) diagonal which is populated
1039 with values from the input.
1040 Note: This operation is not supported by `np.einsum` or `tf.einsum`; it is
1041 provided to enable computing the symbolic gradient of `tf.einsum`.
1043 The output subscripts must contain only labels appearing in at least one of the
1044 input subscripts. Furthermore, all dimensions mapping to the same axis label
1045 must be equal.
1047 Any of the input and output subscripts may contain at most a single ellipsis
1048 (`...`). These ellipsis are mapped against dimensions not corresponding to any
1049 named axis label. If two inputs contain ellipsis, then they are broadcasted
1050 according to standard NumPy broadcasting
1051 [rules](http://docs.scipy.org/doc/numpy/user/basics.broadcasting.html).
1053 The broadcasted dimensions are placed in the corresponding location of the
1054 ellipsis in the output subscript. If the broadcasted dimensions are non-empty
1055 and the output subscripts do not contain ellipsis, then an InvalidArgument error
1056 is raised.
1058 @compatibility(numpy)
1059 Similar to [`numpy.einsum`](https://docs.scipy.org/doc/numpy/reference/generated/numpy.einsum.html).
1061 Comparison with `numpy.einsum`:
1063 * This Op only supports unary and binary forms of `numpy.einsum`.
1064 * This Op does not support implicit form. (i.e. equations without `->`).
1065 * This Op also supports repeated indices in the output subscript, which is not
1066 supported by `numpy.einsum`.
1067 @end_compatibility
1069 Args:
1070 inputs: A list of at least 1 `Tensor` objects with the same type.
1071 List of 1 or 2 Tensors.
1072 equation: A `string`.
1073 String describing the Einstein Summation operation; in the format of np.einsum.
1074 name: A name for the operation (optional).
1076 Returns:
1077 A `Tensor`. Has the same type as `inputs`.
1078 """
1079 _ctx = _context._context or _context.context()
1080 tld = _ctx._thread_local_data
1081 if tld.is_eager:
1082 try:
1083 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1084 _ctx, "Einsum", name, inputs, "equation", equation)
1085 return _result
1086 except _core._NotOkStatusException as e:
1087 _ops.raise_from_not_ok_status(e, name)
1088 except _core._FallbackException:
1089 pass
1090 try:
1091 return einsum_eager_fallback(
1092 inputs, equation=equation, name=name, ctx=_ctx)
1093 except _core._SymbolicException:
1094 pass # Add nodes to the TensorFlow graph.
1095 # Add nodes to the TensorFlow graph.
1096 if not isinstance(inputs, (list, tuple)):
1097 raise TypeError(
1098 "Expected list for 'inputs' argument to "
1099 "'einsum' Op, not %r." % inputs)
1100 _attr_N = len(inputs)
1101 equation = _execute.make_str(equation, "equation")
1102 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1103 "Einsum", inputs=inputs, equation=equation, name=name)
1104 _result = _outputs[:]
1105 if _execute.must_record_gradient():
1106 _attrs = ("equation", _op.get_attr("equation"), "N",
1107 _op._get_attr_int("N"), "T", _op._get_attr_type("T"))
1108 _inputs_flat = _op.inputs
1109 _execute.record_gradient(
1110 "Einsum", _inputs_flat, _attrs, _result)
1111 _result, = _result
1112 return _result
1114Einsum = tf_export("raw_ops.Einsum")(_ops.to_raw_op(einsum))
1117def einsum_eager_fallback(inputs, equation, name, ctx):
1118 if not isinstance(inputs, (list, tuple)):
1119 raise TypeError(
1120 "Expected list for 'inputs' argument to "
1121 "'einsum' Op, not %r." % inputs)
1122 _attr_N = len(inputs)
1123 equation = _execute.make_str(equation, "equation")
1124 _attr_T, inputs = _execute.args_to_matching_eager(list(inputs), ctx, [])
1125 _inputs_flat = list(inputs)
1126 _attrs = ("equation", equation, "N", _attr_N, "T", _attr_T)
1127 _result = _execute.execute(b"Einsum", 1, inputs=_inputs_flat, attrs=_attrs,
1128 ctx=ctx, name=name)
1129 if _execute.must_record_gradient():
1130 _execute.record_gradient(
1131 "Einsum", _inputs_flat, _attrs, _result)
1132 _result, = _result
1133 return _result
1135_LogMatrixDeterminantOutput = collections.namedtuple(
1136 "LogMatrixDeterminant",
1137 ["sign", "log_abs_determinant"])
1140def log_matrix_determinant(input, name=None):
1141 r"""Computes the sign and the log of the absolute value of the determinant of
1143 one or more square matrices.
1145 The input is a tensor of shape `[N, M, M]` whose inner-most 2 dimensions
1146 form square matrices. The outputs are two tensors containing the signs and
1147 absolute values of the log determinants for all N input submatrices
1148 `[..., :, :]` such that `determinant = sign*exp(log_abs_determinant)`.
1149 The `log_abs_determinant` is computed as `det(P)*sum(log(diag(LU)))` where `LU`
1150 is the `LU` decomposition of the input and `P` is the corresponding
1151 permutation matrix.
1153 Args:
1154 input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `complex64`, `complex128`.
1155 Shape is `[N, M, M]`.
1156 name: A name for the operation (optional).
1158 Returns:
1159 A tuple of `Tensor` objects (sign, log_abs_determinant).
1161 sign: A `Tensor`. Has the same type as `input`.
1162 log_abs_determinant: A `Tensor`. Has the same type as `input`.
1163 """
1164 _ctx = _context._context or _context.context()
1165 tld = _ctx._thread_local_data
1166 if tld.is_eager:
1167 try:
1168 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1169 _ctx, "LogMatrixDeterminant", name, input)
1170 _result = _LogMatrixDeterminantOutput._make(_result)
1171 return _result
1172 except _core._NotOkStatusException as e:
1173 _ops.raise_from_not_ok_status(e, name)
1174 except _core._FallbackException:
1175 pass
1176 try:
1177 return log_matrix_determinant_eager_fallback(
1178 input, name=name, ctx=_ctx)
1179 except _core._SymbolicException:
1180 pass # Add nodes to the TensorFlow graph.
1181 # Add nodes to the TensorFlow graph.
1182 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1183 "LogMatrixDeterminant", input=input, name=name)
1184 _result = _outputs[:]
1185 if _execute.must_record_gradient():
1186 _attrs = ("T", _op._get_attr_type("T"))
1187 _inputs_flat = _op.inputs
1188 _execute.record_gradient(
1189 "LogMatrixDeterminant", _inputs_flat, _attrs, _result)
1190 _result = _LogMatrixDeterminantOutput._make(_result)
1191 return _result
1193LogMatrixDeterminant = tf_export("raw_ops.LogMatrixDeterminant")(_ops.to_raw_op(log_matrix_determinant))
1196def log_matrix_determinant_eager_fallback(input, name, ctx):
1197 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ])
1198 _inputs_flat = [input]
1199 _attrs = ("T", _attr_T)
1200 _result = _execute.execute(b"LogMatrixDeterminant", 2, inputs=_inputs_flat,
1201 attrs=_attrs, ctx=ctx, name=name)
1202 if _execute.must_record_gradient():
1203 _execute.record_gradient(
1204 "LogMatrixDeterminant", _inputs_flat, _attrs, _result)
1205 _result = _LogMatrixDeterminantOutput._make(_result)
1206 return _result
1208_LuOutput = collections.namedtuple(
1209 "Lu",
1210 ["lu", "p"])
1213@_dispatch.add_fallback_dispatch_list
1214@_dispatch.add_type_based_api_dispatcher
1215@tf_export('linalg.lu')
1216def lu(input, output_idx_type=_dtypes.int32, name=None):
1217 r"""Computes the LU decomposition of one or more square matrices.
1219 The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1220 form square matrices.
1222 The input has to be invertible.
1224 The output consists of two tensors LU and P containing the LU decomposition
1225 of all input submatrices `[..., :, :]`. LU encodes the lower triangular and
1226 upper triangular factors.
1228 For each input submatrix of shape `[M, M]`, L is a lower triangular matrix of
1229 shape `[M, M]` with unit diagonal whose entries correspond to the strictly lower
1230 triangular part of LU. U is a upper triangular matrix of shape `[M, M]` whose
1231 entries correspond to the upper triangular part, including the diagonal, of LU.
1233 P represents a permutation matrix encoded as a list of indices each between `0`
1234 and `M-1`, inclusive. If P_mat denotes the permutation matrix corresponding to
1235 P, then the L, U and P satisfies P_mat * input = L * U.
1237 Args:
1238 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
1239 A tensor of shape `[..., M, M]` whose inner-most 2 dimensions form matrices of
1240 size `[M, M]`.
1241 output_idx_type: An optional `tf.DType` from: `tf.int32, tf.int64`. Defaults to `tf.int32`.
1242 name: A name for the operation (optional).
1244 Returns:
1245 A tuple of `Tensor` objects (lu, p).
1247 lu: A `Tensor`. Has the same type as `input`.
1248 p: A `Tensor` of type `output_idx_type`.
1249 """
1250 _ctx = _context._context or _context.context()
1251 tld = _ctx._thread_local_data
1252 if tld.is_eager:
1253 try:
1254 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1255 _ctx, "Lu", name, input, "output_idx_type", output_idx_type)
1256 _result = _LuOutput._make(_result)
1257 return _result
1258 except _core._NotOkStatusException as e:
1259 _ops.raise_from_not_ok_status(e, name)
1260 except _core._FallbackException:
1261 pass
1262 try:
1263 _result = _dispatcher_for_lu(
1264 (input, output_idx_type, name,), None)
1265 if _result is not NotImplemented:
1266 return _result
1267 return lu_eager_fallback(
1268 input, output_idx_type=output_idx_type, name=name, ctx=_ctx)
1269 except _core._SymbolicException:
1270 pass # Add nodes to the TensorFlow graph.
1271 except (TypeError, ValueError):
1272 _result = _dispatch.dispatch(
1273 lu, (), dict(input=input, output_idx_type=output_idx_type,
1274 name=name)
1275 )
1276 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1277 return _result
1278 raise
1279 else:
1280 _result = _dispatcher_for_lu(
1281 (input, output_idx_type, name,), None)
1282 if _result is not NotImplemented:
1283 return _result
1284 # Add nodes to the TensorFlow graph.
1285 if output_idx_type is None:
1286 output_idx_type = _dtypes.int32
1287 output_idx_type = _execute.make_type(output_idx_type, "output_idx_type")
1288 try:
1289 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1290 "Lu", input=input, output_idx_type=output_idx_type, name=name)
1291 except (TypeError, ValueError):
1292 _result = _dispatch.dispatch(
1293 lu, (), dict(input=input, output_idx_type=output_idx_type,
1294 name=name)
1295 )
1296 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1297 return _result
1298 raise
1299 _result = _outputs[:]
1300 if _execute.must_record_gradient():
1301 _attrs = ("T", _op._get_attr_type("T"), "output_idx_type",
1302 _op._get_attr_type("output_idx_type"))
1303 _inputs_flat = _op.inputs
1304 _execute.record_gradient(
1305 "Lu", _inputs_flat, _attrs, _result)
1306 _result = _LuOutput._make(_result)
1307 return _result
1309Lu = tf_export("raw_ops.Lu")(_ops.to_raw_op(lu))
1310_dispatcher_for_lu = lu._tf_type_based_dispatcher.Dispatch
1313def lu_eager_fallback(input, output_idx_type, name, ctx):
1314 if output_idx_type is None:
1315 output_idx_type = _dtypes.int32
1316 output_idx_type = _execute.make_type(output_idx_type, "output_idx_type")
1317 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
1318 _inputs_flat = [input]
1319 _attrs = ("T", _attr_T, "output_idx_type", output_idx_type)
1320 _result = _execute.execute(b"Lu", 2, inputs=_inputs_flat, attrs=_attrs,
1321 ctx=ctx, name=name)
1322 if _execute.must_record_gradient():
1323 _execute.record_gradient(
1324 "Lu", _inputs_flat, _attrs, _result)
1325 _result = _LuOutput._make(_result)
1326 return _result
1329@_dispatch.add_fallback_dispatch_list
1330@_dispatch.add_type_based_api_dispatcher
1331@tf_export('linalg.det', v1=['linalg.det', 'matrix_determinant'])
1332@deprecated_endpoints('matrix_determinant')
1333def matrix_determinant(input, name=None):
1334 r"""Computes the determinant of one or more square matrices.
1336 The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1337 form square matrices. The output is a tensor containing the determinants
1338 for all input submatrices `[..., :, :]`.
1340 Args:
1341 input: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`, `complex64`, `complex128`.
1342 Shape is `[..., M, M]`.
1343 name: A name for the operation (optional).
1345 Returns:
1346 A `Tensor`. Has the same type as `input`.
1347 """
1348 _ctx = _context._context or _context.context()
1349 tld = _ctx._thread_local_data
1350 if tld.is_eager:
1351 try:
1352 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1353 _ctx, "MatrixDeterminant", name, input)
1354 return _result
1355 except _core._NotOkStatusException as e:
1356 _ops.raise_from_not_ok_status(e, name)
1357 except _core._FallbackException:
1358 pass
1359 try:
1360 _result = _dispatcher_for_matrix_determinant(
1361 (input, name,), None)
1362 if _result is not NotImplemented:
1363 return _result
1364 return matrix_determinant_eager_fallback(
1365 input, name=name, ctx=_ctx)
1366 except _core._SymbolicException:
1367 pass # Add nodes to the TensorFlow graph.
1368 except (TypeError, ValueError):
1369 _result = _dispatch.dispatch(
1370 matrix_determinant, (), dict(input=input, name=name)
1371 )
1372 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1373 return _result
1374 raise
1375 else:
1376 _result = _dispatcher_for_matrix_determinant(
1377 (input, name,), None)
1378 if _result is not NotImplemented:
1379 return _result
1380 # Add nodes to the TensorFlow graph.
1381 try:
1382 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1383 "MatrixDeterminant", input=input, name=name)
1384 except (TypeError, ValueError):
1385 _result = _dispatch.dispatch(
1386 matrix_determinant, (), dict(input=input, name=name)
1387 )
1388 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1389 return _result
1390 raise
1391 _result = _outputs[:]
1392 if _execute.must_record_gradient():
1393 _attrs = ("T", _op._get_attr_type("T"))
1394 _inputs_flat = _op.inputs
1395 _execute.record_gradient(
1396 "MatrixDeterminant", _inputs_flat, _attrs, _result)
1397 _result, = _result
1398 return _result
1400MatrixDeterminant = tf_export("raw_ops.MatrixDeterminant")(_ops.to_raw_op(matrix_determinant))
1401_dispatcher_for_matrix_determinant = matrix_determinant._tf_type_based_dispatcher.Dispatch
1404def matrix_determinant_eager_fallback(input, name, ctx):
1405 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, _dtypes.complex64, _dtypes.complex128, ])
1406 _inputs_flat = [input]
1407 _attrs = ("T", _attr_T)
1408 _result = _execute.execute(b"MatrixDeterminant", 1, inputs=_inputs_flat,
1409 attrs=_attrs, ctx=ctx, name=name)
1410 if _execute.must_record_gradient():
1411 _execute.record_gradient(
1412 "MatrixDeterminant", _inputs_flat, _attrs, _result)
1413 _result, = _result
1414 return _result
1417def matrix_exponential(input, name=None):
1418 r"""Deprecated, use python implementation tf.linalg.matrix_exponential.
1420 Args:
1421 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
1422 name: A name for the operation (optional).
1424 Returns:
1425 A `Tensor`. Has the same type as `input`.
1426 """
1427 _ctx = _context._context or _context.context()
1428 tld = _ctx._thread_local_data
1429 if tld.is_eager:
1430 try:
1431 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1432 _ctx, "MatrixExponential", name, input)
1433 return _result
1434 except _core._NotOkStatusException as e:
1435 _ops.raise_from_not_ok_status(e, name)
1436 except _core._FallbackException:
1437 pass
1438 try:
1439 return matrix_exponential_eager_fallback(
1440 input, name=name, ctx=_ctx)
1441 except _core._SymbolicException:
1442 pass # Add nodes to the TensorFlow graph.
1443 # Add nodes to the TensorFlow graph.
1444 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1445 "MatrixExponential", input=input, name=name)
1446 _result = _outputs[:]
1447 if _execute.must_record_gradient():
1448 _attrs = ("T", _op._get_attr_type("T"))
1449 _inputs_flat = _op.inputs
1450 _execute.record_gradient(
1451 "MatrixExponential", _inputs_flat, _attrs, _result)
1452 _result, = _result
1453 return _result
1455MatrixExponential = tf_export("raw_ops.MatrixExponential")(_ops.to_raw_op(matrix_exponential))
1458def matrix_exponential_eager_fallback(input, name, ctx):
1459 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
1460 _inputs_flat = [input]
1461 _attrs = ("T", _attr_T)
1462 _result = _execute.execute(b"MatrixExponential", 1, inputs=_inputs_flat,
1463 attrs=_attrs, ctx=ctx, name=name)
1464 if _execute.must_record_gradient():
1465 _execute.record_gradient(
1466 "MatrixExponential", _inputs_flat, _attrs, _result)
1467 _result, = _result
1468 return _result
1471@_dispatch.add_fallback_dispatch_list
1472@_dispatch.add_type_based_api_dispatcher
1473@tf_export('linalg.inv', v1=['linalg.inv', 'matrix_inverse'])
1474@deprecated_endpoints('matrix_inverse')
1475def matrix_inverse(input, adjoint=False, name=None):
1476 r"""Computes the inverse of one or more square invertible matrices or their adjoints (conjugate transposes).
1479 The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1480 form square matrices. The output is a tensor of the same shape as the input
1481 containing the inverse for all input submatrices `[..., :, :]`.
1483 The op uses LU decomposition with partial pivoting to compute the inverses.
1485 If a matrix is not invertible there is no guarantee what the op does. It
1486 may detect the condition and raise an exception or it may simply return a
1487 garbage result.
1489 Args:
1490 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
1491 Shape is `[..., M, M]`.
1492 adjoint: An optional `bool`. Defaults to `False`.
1493 name: A name for the operation (optional).
1495 Returns:
1496 A `Tensor`. Has the same type as `input`.
1497 """
1498 _ctx = _context._context or _context.context()
1499 tld = _ctx._thread_local_data
1500 if tld.is_eager:
1501 try:
1502 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1503 _ctx, "MatrixInverse", name, input, "adjoint", adjoint)
1504 return _result
1505 except _core._NotOkStatusException as e:
1506 _ops.raise_from_not_ok_status(e, name)
1507 except _core._FallbackException:
1508 pass
1509 try:
1510 _result = _dispatcher_for_matrix_inverse(
1511 (input, adjoint, name,), None)
1512 if _result is not NotImplemented:
1513 return _result
1514 return matrix_inverse_eager_fallback(
1515 input, adjoint=adjoint, name=name, ctx=_ctx)
1516 except _core._SymbolicException:
1517 pass # Add nodes to the TensorFlow graph.
1518 except (TypeError, ValueError):
1519 _result = _dispatch.dispatch(
1520 matrix_inverse, (), dict(input=input, adjoint=adjoint, name=name)
1521 )
1522 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1523 return _result
1524 raise
1525 else:
1526 _result = _dispatcher_for_matrix_inverse(
1527 (input, adjoint, name,), None)
1528 if _result is not NotImplemented:
1529 return _result
1530 # Add nodes to the TensorFlow graph.
1531 if adjoint is None:
1532 adjoint = False
1533 adjoint = _execute.make_bool(adjoint, "adjoint")
1534 try:
1535 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1536 "MatrixInverse", input=input, adjoint=adjoint, name=name)
1537 except (TypeError, ValueError):
1538 _result = _dispatch.dispatch(
1539 matrix_inverse, (), dict(input=input, adjoint=adjoint, name=name)
1540 )
1541 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1542 return _result
1543 raise
1544 _result = _outputs[:]
1545 if _execute.must_record_gradient():
1546 _attrs = ("adjoint", _op._get_attr_bool("adjoint"), "T",
1547 _op._get_attr_type("T"))
1548 _inputs_flat = _op.inputs
1549 _execute.record_gradient(
1550 "MatrixInverse", _inputs_flat, _attrs, _result)
1551 _result, = _result
1552 return _result
1554MatrixInverse = tf_export("raw_ops.MatrixInverse")(_ops.to_raw_op(matrix_inverse))
1555_dispatcher_for_matrix_inverse = matrix_inverse._tf_type_based_dispatcher.Dispatch
1558def matrix_inverse_eager_fallback(input, adjoint, name, ctx):
1559 if adjoint is None:
1560 adjoint = False
1561 adjoint = _execute.make_bool(adjoint, "adjoint")
1562 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
1563 _inputs_flat = [input]
1564 _attrs = ("adjoint", adjoint, "T", _attr_T)
1565 _result = _execute.execute(b"MatrixInverse", 1, inputs=_inputs_flat,
1566 attrs=_attrs, ctx=ctx, name=name)
1567 if _execute.must_record_gradient():
1568 _execute.record_gradient(
1569 "MatrixInverse", _inputs_flat, _attrs, _result)
1570 _result, = _result
1571 return _result
1574def matrix_logarithm(input, name=None):
1575 r"""Computes the matrix logarithm of one or more square matrices:
1578 \\(log(exp(A)) = A\\)
1580 This op is only defined for complex matrices. If A is positive-definite and
1581 real, then casting to a complex matrix, taking the logarithm and casting back
1582 to a real matrix will give the correct result.
1584 This function computes the matrix logarithm using the Schur-Parlett algorithm.
1585 Details of the algorithm can be found in Section 11.6.2 of:
1586 Nicholas J. Higham, Functions of Matrices: Theory and Computation, SIAM 2008.
1587 ISBN 978-0-898716-46-7.
1589 The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1590 form square matrices. The output is a tensor of the same shape as the input
1591 containing the exponential for all input submatrices `[..., :, :]`.
1593 Args:
1594 input: A `Tensor`. Must be one of the following types: `complex64`, `complex128`.
1595 Shape is `[..., M, M]`.
1596 name: A name for the operation (optional).
1598 Returns:
1599 A `Tensor`. Has the same type as `input`.
1600 """
1601 _ctx = _context._context or _context.context()
1602 tld = _ctx._thread_local_data
1603 if tld.is_eager:
1604 try:
1605 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1606 _ctx, "MatrixLogarithm", name, input)
1607 return _result
1608 except _core._NotOkStatusException as e:
1609 _ops.raise_from_not_ok_status(e, name)
1610 except _core._FallbackException:
1611 pass
1612 try:
1613 return matrix_logarithm_eager_fallback(
1614 input, name=name, ctx=_ctx)
1615 except _core._SymbolicException:
1616 pass # Add nodes to the TensorFlow graph.
1617 # Add nodes to the TensorFlow graph.
1618 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1619 "MatrixLogarithm", input=input, name=name)
1620 _result = _outputs[:]
1621 if _execute.must_record_gradient():
1622 _attrs = ("T", _op._get_attr_type("T"))
1623 _inputs_flat = _op.inputs
1624 _execute.record_gradient(
1625 "MatrixLogarithm", _inputs_flat, _attrs, _result)
1626 _result, = _result
1627 return _result
1629MatrixLogarithm = tf_export("raw_ops.MatrixLogarithm")(_ops.to_raw_op(matrix_logarithm))
1632def matrix_logarithm_eager_fallback(input, name, ctx):
1633 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.complex64, _dtypes.complex128, ])
1634 _inputs_flat = [input]
1635 _attrs = ("T", _attr_T)
1636 _result = _execute.execute(b"MatrixLogarithm", 1, inputs=_inputs_flat,
1637 attrs=_attrs, ctx=ctx, name=name)
1638 if _execute.must_record_gradient():
1639 _execute.record_gradient(
1640 "MatrixLogarithm", _inputs_flat, _attrs, _result)
1641 _result, = _result
1642 return _result
1645@_dispatch.add_fallback_dispatch_list
1646@_dispatch.add_type_based_api_dispatcher
1647@tf_export('linalg.solve', v1=['linalg.solve', 'matrix_solve'])
1648@deprecated_endpoints('matrix_solve')
1649def matrix_solve(matrix, rhs, adjoint=False, name=None):
1650 r"""Solves systems of linear equations.
1652 `Matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1653 form square matrices. `Rhs` is a tensor of shape `[..., M, K]`. The `output` is
1654 a tensor shape `[..., M, K]`. If `adjoint` is `False` then each output matrix
1655 satisfies `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
1656 If `adjoint` is `True` then each output matrix satisfies
1657 `adjoint(matrix[..., :, :]) * output[..., :, :] = rhs[..., :, :]`.
1659 Args:
1660 matrix: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
1661 Shape is `[..., M, M]`.
1662 rhs: A `Tensor`. Must have the same type as `matrix`.
1663 Shape is `[..., M, K]`.
1664 adjoint: An optional `bool`. Defaults to `False`.
1665 Boolean indicating whether to solve with `matrix` or its (block-wise)
1666 adjoint.
1667 name: A name for the operation (optional).
1669 Returns:
1670 A `Tensor`. Has the same type as `matrix`.
1671 """
1672 _ctx = _context._context or _context.context()
1673 tld = _ctx._thread_local_data
1674 if tld.is_eager:
1675 try:
1676 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1677 _ctx, "MatrixSolve", name, matrix, rhs, "adjoint", adjoint)
1678 return _result
1679 except _core._NotOkStatusException as e:
1680 _ops.raise_from_not_ok_status(e, name)
1681 except _core._FallbackException:
1682 pass
1683 try:
1684 _result = _dispatcher_for_matrix_solve(
1685 (matrix, rhs, adjoint, name,), None)
1686 if _result is not NotImplemented:
1687 return _result
1688 return matrix_solve_eager_fallback(
1689 matrix, rhs, adjoint=adjoint, name=name, ctx=_ctx)
1690 except _core._SymbolicException:
1691 pass # Add nodes to the TensorFlow graph.
1692 except (TypeError, ValueError):
1693 _result = _dispatch.dispatch(
1694 matrix_solve, (), dict(matrix=matrix, rhs=rhs, adjoint=adjoint,
1695 name=name)
1696 )
1697 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1698 return _result
1699 raise
1700 else:
1701 _result = _dispatcher_for_matrix_solve(
1702 (matrix, rhs, adjoint, name,), None)
1703 if _result is not NotImplemented:
1704 return _result
1705 # Add nodes to the TensorFlow graph.
1706 if adjoint is None:
1707 adjoint = False
1708 adjoint = _execute.make_bool(adjoint, "adjoint")
1709 try:
1710 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1711 "MatrixSolve", matrix=matrix, rhs=rhs, adjoint=adjoint, name=name)
1712 except (TypeError, ValueError):
1713 _result = _dispatch.dispatch(
1714 matrix_solve, (), dict(matrix=matrix, rhs=rhs, adjoint=adjoint,
1715 name=name)
1716 )
1717 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1718 return _result
1719 raise
1720 _result = _outputs[:]
1721 if _execute.must_record_gradient():
1722 _attrs = ("adjoint", _op._get_attr_bool("adjoint"), "T",
1723 _op._get_attr_type("T"))
1724 _inputs_flat = _op.inputs
1725 _execute.record_gradient(
1726 "MatrixSolve", _inputs_flat, _attrs, _result)
1727 _result, = _result
1728 return _result
1730MatrixSolve = tf_export("raw_ops.MatrixSolve")(_ops.to_raw_op(matrix_solve))
1731_dispatcher_for_matrix_solve = matrix_solve._tf_type_based_dispatcher.Dispatch
1734def matrix_solve_eager_fallback(matrix, rhs, adjoint, name, ctx):
1735 if adjoint is None:
1736 adjoint = False
1737 adjoint = _execute.make_bool(adjoint, "adjoint")
1738 _attr_T, _inputs_T = _execute.args_to_matching_eager([matrix, rhs], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
1739 (matrix, rhs) = _inputs_T
1740 _inputs_flat = [matrix, rhs]
1741 _attrs = ("adjoint", adjoint, "T", _attr_T)
1742 _result = _execute.execute(b"MatrixSolve", 1, inputs=_inputs_flat,
1743 attrs=_attrs, ctx=ctx, name=name)
1744 if _execute.must_record_gradient():
1745 _execute.record_gradient(
1746 "MatrixSolve", _inputs_flat, _attrs, _result)
1747 _result, = _result
1748 return _result
1751def matrix_solve_ls(matrix, rhs, l2_regularizer, fast=True, name=None):
1752 r"""Solves one or more linear least-squares problems.
1754 `matrix` is a tensor of shape `[..., M, N]` whose inner-most 2 dimensions
1755 form real or complex matrices of size `[M, N]`. `Rhs` is a tensor of the same
1756 type as `matrix` and shape `[..., M, K]`.
1757 The output is a tensor shape `[..., N, K]` where each output matrix solves
1758 each of the equations
1759 `matrix[..., :, :]` * `output[..., :, :]` = `rhs[..., :, :]`
1760 in the least squares sense.
1762 We use the following notation for (complex) matrix and right-hand sides
1763 in the batch:
1765 `matrix`=\\(A \in \mathbb{C}^{m \times n}\\),
1766 `rhs`=\\(B \in \mathbb{C}^{m \times k}\\),
1767 `output`=\\(X \in \mathbb{C}^{n \times k}\\),
1768 `l2_regularizer`=\\(\lambda \in \mathbb{R}\\).
1770 If `fast` is `True`, then the solution is computed by solving the normal
1771 equations using Cholesky decomposition. Specifically, if \\(m \ge n\\) then
1772 \\(X = (A^H A + \lambda I)^{-1} A^H B\\), which solves the least-squares
1773 problem \\(X = \mathrm{argmin}_{Z \in \Re^{n \times k} } ||A Z - B||_F^2 + \lambda ||Z||_F^2\\).
1774 If \\(m \lt n\\) then `output` is computed as
1775 \\(X = A^H (A A^H + \lambda I)^{-1} B\\), which (for \\(\lambda = 0\\)) is the
1776 minimum-norm solution to the under-determined linear system, i.e.
1777 \\(X = \mathrm{argmin}_{Z \in \mathbb{C}^{n \times k} } ||Z||_F^2 \\),
1778 subject to \\(A Z = B\\). Notice that the fast path is only numerically stable
1779 when \\(A\\) is numerically full rank and has a condition number
1780 \\(\mathrm{cond}(A) \lt \frac{1}{\sqrt{\epsilon_{mach} } }\\) or \\(\lambda\\) is
1781 sufficiently large.
1783 If `fast` is `False` an algorithm based on the numerically robust complete
1784 orthogonal decomposition is used. This computes the minimum-norm
1785 least-squares solution, even when \\(A\\) is rank deficient. This path is
1786 typically 6-7 times slower than the fast path. If `fast` is `False` then
1787 `l2_regularizer` is ignored.
1789 Args:
1790 matrix: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
1791 Shape is `[..., M, N]`.
1792 rhs: A `Tensor`. Must have the same type as `matrix`.
1793 Shape is `[..., M, K]`.
1794 l2_regularizer: A `Tensor` of type `float64`. Scalar tensor.
1796 @compatibility(numpy)
1797 Equivalent to np.linalg.lstsq
1798 @end_compatibility
1799 fast: An optional `bool`. Defaults to `True`.
1800 name: A name for the operation (optional).
1802 Returns:
1803 A `Tensor`. Has the same type as `matrix`.
1804 """
1805 _ctx = _context._context or _context.context()
1806 tld = _ctx._thread_local_data
1807 if tld.is_eager:
1808 try:
1809 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1810 _ctx, "MatrixSolveLs", name, matrix, rhs, l2_regularizer, "fast",
1811 fast)
1812 return _result
1813 except _core._NotOkStatusException as e:
1814 _ops.raise_from_not_ok_status(e, name)
1815 except _core._FallbackException:
1816 pass
1817 try:
1818 return matrix_solve_ls_eager_fallback(
1819 matrix, rhs, l2_regularizer, fast=fast, name=name, ctx=_ctx)
1820 except _core._SymbolicException:
1821 pass # Add nodes to the TensorFlow graph.
1822 # Add nodes to the TensorFlow graph.
1823 if fast is None:
1824 fast = True
1825 fast = _execute.make_bool(fast, "fast")
1826 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1827 "MatrixSolveLs", matrix=matrix, rhs=rhs,
1828 l2_regularizer=l2_regularizer, fast=fast, name=name)
1829 _result = _outputs[:]
1830 if _execute.must_record_gradient():
1831 _attrs = ("T", _op._get_attr_type("T"), "fast",
1832 _op._get_attr_bool("fast"))
1833 _inputs_flat = _op.inputs
1834 _execute.record_gradient(
1835 "MatrixSolveLs", _inputs_flat, _attrs, _result)
1836 _result, = _result
1837 return _result
1839MatrixSolveLs = tf_export("raw_ops.MatrixSolveLs")(_ops.to_raw_op(matrix_solve_ls))
1842def matrix_solve_ls_eager_fallback(matrix, rhs, l2_regularizer, fast, name, ctx):
1843 if fast is None:
1844 fast = True
1845 fast = _execute.make_bool(fast, "fast")
1846 _attr_T, _inputs_T = _execute.args_to_matching_eager([matrix, rhs], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
1847 (matrix, rhs) = _inputs_T
1848 l2_regularizer = _ops.convert_to_tensor(l2_regularizer, _dtypes.float64)
1849 _inputs_flat = [matrix, rhs, l2_regularizer]
1850 _attrs = ("T", _attr_T, "fast", fast)
1851 _result = _execute.execute(b"MatrixSolveLs", 1, inputs=_inputs_flat,
1852 attrs=_attrs, ctx=ctx, name=name)
1853 if _execute.must_record_gradient():
1854 _execute.record_gradient(
1855 "MatrixSolveLs", _inputs_flat, _attrs, _result)
1856 _result, = _result
1857 return _result
1860@_dispatch.add_fallback_dispatch_list
1861@_dispatch.add_type_based_api_dispatcher
1862@tf_export('linalg.sqrtm', 'matrix_square_root')
1863def matrix_square_root(input, name=None):
1864 r"""Computes the matrix square root of one or more square matrices:
1866 matmul(sqrtm(A), sqrtm(A)) = A
1868 The input matrix should be invertible. If the input matrix is real, it should
1869 have no eigenvalues which are real and negative (pairs of complex conjugate
1870 eigenvalues are allowed).
1872 The matrix square root is computed by first reducing the matrix to
1873 quasi-triangular form with the real Schur decomposition. The square root
1874 of the quasi-triangular matrix is then computed directly. Details of
1875 the algorithm can be found in: Nicholas J. Higham, "Computing real
1876 square roots of a real matrix", Linear Algebra Appl., 1987.
1878 The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
1879 form square matrices. The output is a tensor of the same shape as the input
1880 containing the matrix square root for all input submatrices `[..., :, :]`.
1882 Args:
1883 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
1884 Shape is `[..., M, M]`.
1885 name: A name for the operation (optional).
1887 Returns:
1888 A `Tensor`. Has the same type as `input`.
1889 """
1890 _ctx = _context._context or _context.context()
1891 tld = _ctx._thread_local_data
1892 if tld.is_eager:
1893 try:
1894 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1895 _ctx, "MatrixSquareRoot", name, input)
1896 return _result
1897 except _core._NotOkStatusException as e:
1898 _ops.raise_from_not_ok_status(e, name)
1899 except _core._FallbackException:
1900 pass
1901 try:
1902 _result = _dispatcher_for_matrix_square_root(
1903 (input, name,), None)
1904 if _result is not NotImplemented:
1905 return _result
1906 return matrix_square_root_eager_fallback(
1907 input, name=name, ctx=_ctx)
1908 except _core._SymbolicException:
1909 pass # Add nodes to the TensorFlow graph.
1910 except (TypeError, ValueError):
1911 _result = _dispatch.dispatch(
1912 matrix_square_root, (), dict(input=input, name=name)
1913 )
1914 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1915 return _result
1916 raise
1917 else:
1918 _result = _dispatcher_for_matrix_square_root(
1919 (input, name,), None)
1920 if _result is not NotImplemented:
1921 return _result
1922 # Add nodes to the TensorFlow graph.
1923 try:
1924 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1925 "MatrixSquareRoot", input=input, name=name)
1926 except (TypeError, ValueError):
1927 _result = _dispatch.dispatch(
1928 matrix_square_root, (), dict(input=input, name=name)
1929 )
1930 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
1931 return _result
1932 raise
1933 _result = _outputs[:]
1934 if _execute.must_record_gradient():
1935 _attrs = ("T", _op._get_attr_type("T"))
1936 _inputs_flat = _op.inputs
1937 _execute.record_gradient(
1938 "MatrixSquareRoot", _inputs_flat, _attrs, _result)
1939 _result, = _result
1940 return _result
1942MatrixSquareRoot = tf_export("raw_ops.MatrixSquareRoot")(_ops.to_raw_op(matrix_square_root))
1943_dispatcher_for_matrix_square_root = matrix_square_root._tf_type_based_dispatcher.Dispatch
1946def matrix_square_root_eager_fallback(input, name, ctx):
1947 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
1948 _inputs_flat = [input]
1949 _attrs = ("T", _attr_T)
1950 _result = _execute.execute(b"MatrixSquareRoot", 1, inputs=_inputs_flat,
1951 attrs=_attrs, ctx=ctx, name=name)
1952 if _execute.must_record_gradient():
1953 _execute.record_gradient(
1954 "MatrixSquareRoot", _inputs_flat, _attrs, _result)
1955 _result, = _result
1956 return _result
1959def matrix_triangular_solve(matrix, rhs, lower=True, adjoint=False, name=None):
1960 r"""Solves systems of linear equations with upper or lower triangular matrices by backsubstitution.
1963 `matrix` is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions form
1964 square matrices. If `lower` is `True` then the strictly upper triangular part
1965 of each inner-most matrix is assumed to be zero and not accessed.
1966 If `lower` is False then the strictly lower triangular part of each inner-most
1967 matrix is assumed to be zero and not accessed.
1968 `rhs` is a tensor of shape `[..., M, N]`.
1970 The output is a tensor of shape `[..., M, N]`. If `adjoint` is
1971 `True` then the innermost matrices in `output` satisfy matrix equations
1972 `matrix[..., :, :] * output[..., :, :] = rhs[..., :, :]`.
1973 If `adjoint` is `False` then the strictly then the innermost matrices in
1974 `output` satisfy matrix equations
1975 `adjoint(matrix[..., i, k]) * output[..., k, j] = rhs[..., i, j]`.
1977 Note, the batch shapes for the inputs only need to broadcast.
1979 Example:
1980 ```python
1982 a = tf.constant([[3, 0, 0, 0],
1983 [2, 1, 0, 0],
1984 [1, 0, 1, 0],
1985 [1, 1, 1, 1]], dtype=tf.float32)
1987 b = tf.constant([[4],
1988 [2],
1989 [4],
1990 [2]], dtype=tf.float32)
1992 x = tf.linalg.triangular_solve(a, b, lower=True)
1993 x
1994 # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
1995 # array([[ 1.3333334 ],
1996 # [-0.66666675],
1997 # [ 2.6666665 ],
1998 # [-1.3333331 ]], dtype=float32)>
2000 # in python3 one can use `a@x`
2001 tf.matmul(a, x)
2002 # <tf.Tensor: shape=(4, 1), dtype=float32, numpy=
2003 # array([[4. ],
2004 # [2. ],
2005 # [4. ],
2006 # [1.9999999]], dtype=float32)>
2007 ```
2009 Args:
2010 matrix: A `Tensor`. Must be one of the following types: `bfloat16`, `float64`, `float32`, `half`, `complex64`, `complex128`.
2011 Shape is `[..., M, M]`.
2012 rhs: A `Tensor`. Must have the same type as `matrix`.
2013 Shape is `[..., M, K]`.
2014 lower: An optional `bool`. Defaults to `True`.
2015 Boolean indicating whether the innermost matrices in `matrix` are
2016 lower or upper triangular.
2017 adjoint: An optional `bool`. Defaults to `False`.
2018 Boolean indicating whether to solve with `matrix` or its (block-wise)
2019 adjoint.
2021 @compatibility(numpy)
2022 Equivalent to scipy.linalg.solve_triangular
2023 @end_compatibility
2024 name: A name for the operation (optional).
2026 Returns:
2027 A `Tensor`. Has the same type as `matrix`.
2028 """
2029 _ctx = _context._context or _context.context()
2030 tld = _ctx._thread_local_data
2031 if tld.is_eager:
2032 try:
2033 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2034 _ctx, "MatrixTriangularSolve", name, matrix, rhs, "lower", lower,
2035 "adjoint", adjoint)
2036 return _result
2037 except _core._NotOkStatusException as e:
2038 _ops.raise_from_not_ok_status(e, name)
2039 except _core._FallbackException:
2040 pass
2041 try:
2042 return matrix_triangular_solve_eager_fallback(
2043 matrix, rhs, lower=lower, adjoint=adjoint, name=name, ctx=_ctx)
2044 except _core._SymbolicException:
2045 pass # Add nodes to the TensorFlow graph.
2046 # Add nodes to the TensorFlow graph.
2047 if lower is None:
2048 lower = True
2049 lower = _execute.make_bool(lower, "lower")
2050 if adjoint is None:
2051 adjoint = False
2052 adjoint = _execute.make_bool(adjoint, "adjoint")
2053 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2054 "MatrixTriangularSolve", matrix=matrix, rhs=rhs, lower=lower,
2055 adjoint=adjoint, name=name)
2056 _result = _outputs[:]
2057 if _execute.must_record_gradient():
2058 _attrs = ("lower", _op._get_attr_bool("lower"), "adjoint",
2059 _op._get_attr_bool("adjoint"), "T", _op._get_attr_type("T"))
2060 _inputs_flat = _op.inputs
2061 _execute.record_gradient(
2062 "MatrixTriangularSolve", _inputs_flat, _attrs, _result)
2063 _result, = _result
2064 return _result
2066MatrixTriangularSolve = tf_export("raw_ops.MatrixTriangularSolve")(_ops.to_raw_op(matrix_triangular_solve))
2069def matrix_triangular_solve_eager_fallback(matrix, rhs, lower, adjoint, name, ctx):
2070 if lower is None:
2071 lower = True
2072 lower = _execute.make_bool(lower, "lower")
2073 if adjoint is None:
2074 adjoint = False
2075 adjoint = _execute.make_bool(adjoint, "adjoint")
2076 _attr_T, _inputs_T = _execute.args_to_matching_eager([matrix, rhs], ctx, [_dtypes.bfloat16, _dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
2077 (matrix, rhs) = _inputs_T
2078 _inputs_flat = [matrix, rhs]
2079 _attrs = ("lower", lower, "adjoint", adjoint, "T", _attr_T)
2080 _result = _execute.execute(b"MatrixTriangularSolve", 1, inputs=_inputs_flat,
2081 attrs=_attrs, ctx=ctx, name=name)
2082 if _execute.must_record_gradient():
2083 _execute.record_gradient(
2084 "MatrixTriangularSolve", _inputs_flat, _attrs, _result)
2085 _result, = _result
2086 return _result
2088_QrOutput = collections.namedtuple(
2089 "Qr",
2090 ["q", "r"])
2093@_dispatch.add_fallback_dispatch_list
2094@_dispatch.add_type_based_api_dispatcher
2095@tf_export('linalg.qr', v1=['linalg.qr', 'qr'])
2096@deprecated_endpoints('qr')
2097def qr(input, full_matrices=False, name=None):
2098 r"""Computes the QR decompositions of one or more matrices.
2100 Computes the QR decomposition of each inner matrix in `tensor` such that
2101 `tensor[..., :, :] = q[..., :, :] * r[..., :,:])`
2103 Currently, the gradient for the QR decomposition is well-defined only when
2104 the first `P` columns of the inner matrix are linearly independent, where
2105 `P` is the minimum of `M` and `N`, the 2 inner-most dimmensions of `tensor`.
2107 ```python
2108 # a is a tensor.
2109 # q is a tensor of orthonormal matrices.
2110 # r is a tensor of upper triangular matrices.
2111 q, r = qr(a)
2112 q_full, r_full = qr(a, full_matrices=True)
2113 ```
2115 Args:
2116 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
2117 A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
2118 form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
2119 full_matrices: An optional `bool`. Defaults to `False`.
2120 If true, compute full-sized `q` and `r`. If false
2121 (the default), compute only the leading `P` columns of `q`.
2122 name: A name for the operation (optional).
2124 Returns:
2125 A tuple of `Tensor` objects (q, r).
2127 q: A `Tensor`. Has the same type as `input`.
2128 r: A `Tensor`. Has the same type as `input`.
2129 """
2130 _ctx = _context._context or _context.context()
2131 tld = _ctx._thread_local_data
2132 if tld.is_eager:
2133 try:
2134 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2135 _ctx, "Qr", name, input, "full_matrices", full_matrices)
2136 _result = _QrOutput._make(_result)
2137 return _result
2138 except _core._NotOkStatusException as e:
2139 _ops.raise_from_not_ok_status(e, name)
2140 except _core._FallbackException:
2141 pass
2142 try:
2143 _result = _dispatcher_for_qr(
2144 (input, full_matrices, name,), None)
2145 if _result is not NotImplemented:
2146 return _result
2147 return qr_eager_fallback(
2148 input, full_matrices=full_matrices, name=name, ctx=_ctx)
2149 except _core._SymbolicException:
2150 pass # Add nodes to the TensorFlow graph.
2151 except (TypeError, ValueError):
2152 _result = _dispatch.dispatch(
2153 qr, (), dict(input=input, full_matrices=full_matrices, name=name)
2154 )
2155 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
2156 return _result
2157 raise
2158 else:
2159 _result = _dispatcher_for_qr(
2160 (input, full_matrices, name,), None)
2161 if _result is not NotImplemented:
2162 return _result
2163 # Add nodes to the TensorFlow graph.
2164 if full_matrices is None:
2165 full_matrices = False
2166 full_matrices = _execute.make_bool(full_matrices, "full_matrices")
2167 try:
2168 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2169 "Qr", input=input, full_matrices=full_matrices, name=name)
2170 except (TypeError, ValueError):
2171 _result = _dispatch.dispatch(
2172 qr, (), dict(input=input, full_matrices=full_matrices, name=name)
2173 )
2174 if _result is not _dispatch.OpDispatcher.NOT_SUPPORTED:
2175 return _result
2176 raise
2177 _result = _outputs[:]
2178 if _execute.must_record_gradient():
2179 _attrs = ("full_matrices", _op._get_attr_bool("full_matrices"), "T",
2180 _op._get_attr_type("T"))
2181 _inputs_flat = _op.inputs
2182 _execute.record_gradient(
2183 "Qr", _inputs_flat, _attrs, _result)
2184 _result = _QrOutput._make(_result)
2185 return _result
2187Qr = tf_export("raw_ops.Qr")(_ops.to_raw_op(qr))
2188_dispatcher_for_qr = qr._tf_type_based_dispatcher.Dispatch
2191def qr_eager_fallback(input, full_matrices, name, ctx):
2192 if full_matrices is None:
2193 full_matrices = False
2194 full_matrices = _execute.make_bool(full_matrices, "full_matrices")
2195 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
2196 _inputs_flat = [input]
2197 _attrs = ("full_matrices", full_matrices, "T", _attr_T)
2198 _result = _execute.execute(b"Qr", 2, inputs=_inputs_flat, attrs=_attrs,
2199 ctx=ctx, name=name)
2200 if _execute.must_record_gradient():
2201 _execute.record_gradient(
2202 "Qr", _inputs_flat, _attrs, _result)
2203 _result = _QrOutput._make(_result)
2204 return _result
2207def self_adjoint_eig(input, name=None):
2208 r"""Computes the Eigen Decomposition of a batch of square self-adjoint matrices.
2210 The input is a tensor of shape `[..., M, M]` whose inner-most 2 dimensions
2211 form square matrices, with the same constraints as the single matrix
2212 SelfAdjointEig.
2214 The result is a [..., M+1, M] matrix with [..., 0,:] containing the
2215 eigenvalues, and subsequent [...,1:, :] containing the eigenvectors. The eigenvalues
2216 are sorted in non-decreasing order.
2218 Args:
2219 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`.
2220 Shape is `[..., M, M]`.
2221 name: A name for the operation (optional).
2223 Returns:
2224 A `Tensor`. Has the same type as `input`.
2225 """
2226 _ctx = _context._context or _context.context()
2227 tld = _ctx._thread_local_data
2228 if tld.is_eager:
2229 try:
2230 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2231 _ctx, "SelfAdjointEig", name, input)
2232 return _result
2233 except _core._NotOkStatusException as e:
2234 _ops.raise_from_not_ok_status(e, name)
2235 except _core._FallbackException:
2236 pass
2237 try:
2238 return self_adjoint_eig_eager_fallback(
2239 input, name=name, ctx=_ctx)
2240 except _core._SymbolicException:
2241 pass # Add nodes to the TensorFlow graph.
2242 # Add nodes to the TensorFlow graph.
2243 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2244 "SelfAdjointEig", input=input, name=name)
2245 _result = _outputs[:]
2246 if _execute.must_record_gradient():
2247 _attrs = ("T", _op._get_attr_type("T"))
2248 _inputs_flat = _op.inputs
2249 _execute.record_gradient(
2250 "SelfAdjointEig", _inputs_flat, _attrs, _result)
2251 _result, = _result
2252 return _result
2254SelfAdjointEig = tf_export("raw_ops.SelfAdjointEig")(_ops.to_raw_op(self_adjoint_eig))
2257def self_adjoint_eig_eager_fallback(input, name, ctx):
2258 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, ])
2259 _inputs_flat = [input]
2260 _attrs = ("T", _attr_T)
2261 _result = _execute.execute(b"SelfAdjointEig", 1, inputs=_inputs_flat,
2262 attrs=_attrs, ctx=ctx, name=name)
2263 if _execute.must_record_gradient():
2264 _execute.record_gradient(
2265 "SelfAdjointEig", _inputs_flat, _attrs, _result)
2266 _result, = _result
2267 return _result
2269_SelfAdjointEigV2Output = collections.namedtuple(
2270 "SelfAdjointEigV2",
2271 ["e", "v"])
2274def self_adjoint_eig_v2(input, compute_v=True, name=None):
2275 r"""Computes the eigen decomposition of one or more square self-adjoint matrices.
2277 Computes the eigenvalues and (optionally) eigenvectors of each inner matrix in
2278 `input` such that `input[..., :, :] = v[..., :, :] * diag(e[..., :])`. The eigenvalues
2279 are sorted in non-decreasing order.
2281 ```python
2282 # a is a tensor.
2283 # e is a tensor of eigenvalues.
2284 # v is a tensor of eigenvectors.
2285 e, v = self_adjoint_eig(a)
2286 e = self_adjoint_eig(a, compute_v=False)
2287 ```
2289 Args:
2290 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
2291 `Tensor` input of shape `[N, N]`.
2292 compute_v: An optional `bool`. Defaults to `True`.
2293 If `True` then eigenvectors will be computed and returned in `v`.
2294 Otherwise, only the eigenvalues will be computed.
2295 name: A name for the operation (optional).
2297 Returns:
2298 A tuple of `Tensor` objects (e, v).
2300 e: A `Tensor`. Has the same type as `input`.
2301 v: A `Tensor`. Has the same type as `input`.
2302 """
2303 _ctx = _context._context or _context.context()
2304 tld = _ctx._thread_local_data
2305 if tld.is_eager:
2306 try:
2307 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2308 _ctx, "SelfAdjointEigV2", name, input, "compute_v", compute_v)
2309 _result = _SelfAdjointEigV2Output._make(_result)
2310 return _result
2311 except _core._NotOkStatusException as e:
2312 _ops.raise_from_not_ok_status(e, name)
2313 except _core._FallbackException:
2314 pass
2315 try:
2316 return self_adjoint_eig_v2_eager_fallback(
2317 input, compute_v=compute_v, name=name, ctx=_ctx)
2318 except _core._SymbolicException:
2319 pass # Add nodes to the TensorFlow graph.
2320 # Add nodes to the TensorFlow graph.
2321 if compute_v is None:
2322 compute_v = True
2323 compute_v = _execute.make_bool(compute_v, "compute_v")
2324 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2325 "SelfAdjointEigV2", input=input, compute_v=compute_v, name=name)
2326 _result = _outputs[:]
2327 if _execute.must_record_gradient():
2328 _attrs = ("compute_v", _op._get_attr_bool("compute_v"), "T",
2329 _op._get_attr_type("T"))
2330 _inputs_flat = _op.inputs
2331 _execute.record_gradient(
2332 "SelfAdjointEigV2", _inputs_flat, _attrs, _result)
2333 _result = _SelfAdjointEigV2Output._make(_result)
2334 return _result
2336SelfAdjointEigV2 = tf_export("raw_ops.SelfAdjointEigV2")(_ops.to_raw_op(self_adjoint_eig_v2))
2339def self_adjoint_eig_v2_eager_fallback(input, compute_v, name, ctx):
2340 if compute_v is None:
2341 compute_v = True
2342 compute_v = _execute.make_bool(compute_v, "compute_v")
2343 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
2344 _inputs_flat = [input]
2345 _attrs = ("compute_v", compute_v, "T", _attr_T)
2346 _result = _execute.execute(b"SelfAdjointEigV2", 2, inputs=_inputs_flat,
2347 attrs=_attrs, ctx=ctx, name=name)
2348 if _execute.must_record_gradient():
2349 _execute.record_gradient(
2350 "SelfAdjointEigV2", _inputs_flat, _attrs, _result)
2351 _result = _SelfAdjointEigV2Output._make(_result)
2352 return _result
2354_SvdOutput = collections.namedtuple(
2355 "Svd",
2356 ["s", "u", "v"])
2359def svd(input, compute_uv=True, full_matrices=False, name=None):
2360 r"""Computes the singular value decompositions of one or more matrices.
2362 Computes the SVD of each inner matrix in `input` such that
2363 `input[..., :, :] = u[..., :, :] * diag(s[..., :, :]) * transpose(v[..., :, :])`
2365 ```python
2366 # a is a tensor containing a batch of matrices.
2367 # s is a tensor of singular values for each matrix.
2368 # u is the tensor containing the left singular vectors for each matrix.
2369 # v is the tensor containing the right singular vectors for each matrix.
2370 s, u, v = svd(a)
2371 s, _, _ = svd(a, compute_uv=False)
2372 ```
2374 Args:
2375 input: A `Tensor`. Must be one of the following types: `float64`, `float32`, `half`, `complex64`, `complex128`.
2376 A tensor of shape `[..., M, N]` whose inner-most 2 dimensions
2377 form matrices of size `[M, N]`. Let `P` be the minimum of `M` and `N`.
2378 compute_uv: An optional `bool`. Defaults to `True`.
2379 If true, left and right singular vectors will be
2380 computed and returned in `u` and `v`, respectively.
2381 If false, `u` and `v` are not set and should never referenced.
2382 full_matrices: An optional `bool`. Defaults to `False`.
2383 If true, compute full-sized `u` and `v`. If false
2384 (the default), compute only the leading `P` singular vectors.
2385 Ignored if `compute_uv` is `False`.
2386 name: A name for the operation (optional).
2388 Returns:
2389 A tuple of `Tensor` objects (s, u, v).
2391 s: A `Tensor`. Has the same type as `input`.
2392 u: A `Tensor`. Has the same type as `input`.
2393 v: A `Tensor`. Has the same type as `input`.
2394 """
2395 _ctx = _context._context or _context.context()
2396 tld = _ctx._thread_local_data
2397 if tld.is_eager:
2398 try:
2399 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2400 _ctx, "Svd", name, input, "compute_uv", compute_uv, "full_matrices",
2401 full_matrices)
2402 _result = _SvdOutput._make(_result)
2403 return _result
2404 except _core._NotOkStatusException as e:
2405 _ops.raise_from_not_ok_status(e, name)
2406 except _core._FallbackException:
2407 pass
2408 try:
2409 return svd_eager_fallback(
2410 input, compute_uv=compute_uv, full_matrices=full_matrices,
2411 name=name, ctx=_ctx)
2412 except _core._SymbolicException:
2413 pass # Add nodes to the TensorFlow graph.
2414 # Add nodes to the TensorFlow graph.
2415 if compute_uv is None:
2416 compute_uv = True
2417 compute_uv = _execute.make_bool(compute_uv, "compute_uv")
2418 if full_matrices is None:
2419 full_matrices = False
2420 full_matrices = _execute.make_bool(full_matrices, "full_matrices")
2421 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2422 "Svd", input=input, compute_uv=compute_uv,
2423 full_matrices=full_matrices, name=name)
2424 _result = _outputs[:]
2425 if _execute.must_record_gradient():
2426 _attrs = ("compute_uv", _op._get_attr_bool("compute_uv"), "full_matrices",
2427 _op._get_attr_bool("full_matrices"), "T",
2428 _op._get_attr_type("T"))
2429 _inputs_flat = _op.inputs
2430 _execute.record_gradient(
2431 "Svd", _inputs_flat, _attrs, _result)
2432 _result = _SvdOutput._make(_result)
2433 return _result
2435Svd = tf_export("raw_ops.Svd")(_ops.to_raw_op(svd))
2438def svd_eager_fallback(input, compute_uv, full_matrices, name, ctx):
2439 if compute_uv is None:
2440 compute_uv = True
2441 compute_uv = _execute.make_bool(compute_uv, "compute_uv")
2442 if full_matrices is None:
2443 full_matrices = False
2444 full_matrices = _execute.make_bool(full_matrices, "full_matrices")
2445 _attr_T, (input,) = _execute.args_to_matching_eager([input], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.half, _dtypes.complex64, _dtypes.complex128, ])
2446 _inputs_flat = [input]
2447 _attrs = ("compute_uv", compute_uv, "full_matrices", full_matrices, "T",
2448 _attr_T)
2449 _result = _execute.execute(b"Svd", 3, inputs=_inputs_flat, attrs=_attrs,
2450 ctx=ctx, name=name)
2451 if _execute.must_record_gradient():
2452 _execute.record_gradient(
2453 "Svd", _inputs_flat, _attrs, _result)
2454 _result = _SvdOutput._make(_result)
2455 return _result
2458def tridiagonal_mat_mul(superdiag, maindiag, subdiag, rhs, name=None):
2459 r"""Calculate product with tridiagonal matrix.
2461 Calculates product of two matrices, where left matrix is a tridiagonal matrix.
2463 Args:
2464 superdiag: A `Tensor`. Must be one of the following types: `float64`, `float32`, `complex64`, `complex128`.
2465 Tensor of shape `[..., 1, M]`, representing superdiagonals of
2466 tri-diagonal matrices to the left of multiplication. Last element is ignored.
2467 maindiag: A `Tensor`. Must have the same type as `superdiag`.
2468 Tensor of shape `[..., 1, M]`, representing main diagonals of tri-diagonal
2469 matrices to the left of multiplication.
2470 subdiag: A `Tensor`. Must have the same type as `superdiag`.
2471 Tensor of shape `[..., 1, M]`, representing subdiagonals of tri-diagonal
2472 matrices to the left of multiplication. First element is ignored.
2473 rhs: A `Tensor`. Must have the same type as `superdiag`.
2474 Tensor of shape `[..., M, N]`, representing MxN matrices to the right of
2475 multiplication.
2476 name: A name for the operation (optional).
2478 Returns:
2479 A `Tensor`. Has the same type as `superdiag`.
2480 """
2481 _ctx = _context._context or _context.context()
2482 tld = _ctx._thread_local_data
2483 if tld.is_eager:
2484 try:
2485 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2486 _ctx, "TridiagonalMatMul", name, superdiag, maindiag, subdiag, rhs)
2487 return _result
2488 except _core._NotOkStatusException as e:
2489 _ops.raise_from_not_ok_status(e, name)
2490 except _core._FallbackException:
2491 pass
2492 try:
2493 return tridiagonal_mat_mul_eager_fallback(
2494 superdiag, maindiag, subdiag, rhs, name=name, ctx=_ctx)
2495 except _core._SymbolicException:
2496 pass # Add nodes to the TensorFlow graph.
2497 # Add nodes to the TensorFlow graph.
2498 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2499 "TridiagonalMatMul", superdiag=superdiag, maindiag=maindiag,
2500 subdiag=subdiag, rhs=rhs, name=name)
2501 _result = _outputs[:]
2502 if _execute.must_record_gradient():
2503 _attrs = ("T", _op._get_attr_type("T"))
2504 _inputs_flat = _op.inputs
2505 _execute.record_gradient(
2506 "TridiagonalMatMul", _inputs_flat, _attrs, _result)
2507 _result, = _result
2508 return _result
2510TridiagonalMatMul = tf_export("raw_ops.TridiagonalMatMul")(_ops.to_raw_op(tridiagonal_mat_mul))
2513def tridiagonal_mat_mul_eager_fallback(superdiag, maindiag, subdiag, rhs, name, ctx):
2514 _attr_T, _inputs_T = _execute.args_to_matching_eager([superdiag, maindiag, subdiag, rhs], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.complex64, _dtypes.complex128, ])
2515 (superdiag, maindiag, subdiag, rhs) = _inputs_T
2516 _inputs_flat = [superdiag, maindiag, subdiag, rhs]
2517 _attrs = ("T", _attr_T)
2518 _result = _execute.execute(b"TridiagonalMatMul", 1, inputs=_inputs_flat,
2519 attrs=_attrs, ctx=ctx, name=name)
2520 if _execute.must_record_gradient():
2521 _execute.record_gradient(
2522 "TridiagonalMatMul", _inputs_flat, _attrs, _result)
2523 _result, = _result
2524 return _result
2527def tridiagonal_solve(diagonals, rhs, partial_pivoting=True, perturb_singular=False, name=None):
2528 r"""Solves tridiagonal systems of equations.
2530 Solves tridiagonal systems of equations.
2531 Supports batch dimensions and multiple right-hand sides per each left-hand
2532 side.
2533 On CPU, solution is computed via Gaussian elimination with or without partial
2534 pivoting, depending on `partial_pivoting` attribute. On GPU, Nvidia's cuSPARSE
2535 library is used: https://docs.nvidia.com/cuda/cusparse/index.html#gtsv
2536 Partial pivoting is not yet supported by XLA backends.
2538 Args:
2539 diagonals: A `Tensor`. Must be one of the following types: `float64`, `float32`, `complex64`, `complex128`.
2540 Tensor of shape `[..., 3, M]` whose innermost 2 dimensions represent the
2541 tridiagonal matrices with three rows being the superdiagonal, diagonals, and
2542 subdiagonals, in order. The last element of the superdiagonal and the first
2543 element of the subdiagonal is ignored.
2544 rhs: A `Tensor`. Must have the same type as `diagonals`.
2545 Tensor of shape `[..., M, K]`, representing K right-hand sides per each
2546 left-hand side.
2547 partial_pivoting: An optional `bool`. Defaults to `True`.
2548 Whether to apply partial pivoting. Partial pivoting makes the procedure more
2549 stable, but slower.
2550 perturb_singular: An optional `bool`. Defaults to `False`.
2551 name: A name for the operation (optional).
2553 Returns:
2554 A `Tensor`. Has the same type as `diagonals`.
2555 """
2556 _ctx = _context._context or _context.context()
2557 tld = _ctx._thread_local_data
2558 if tld.is_eager:
2559 try:
2560 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2561 _ctx, "TridiagonalSolve", name, diagonals, rhs, "partial_pivoting",
2562 partial_pivoting, "perturb_singular", perturb_singular)
2563 return _result
2564 except _core._NotOkStatusException as e:
2565 _ops.raise_from_not_ok_status(e, name)
2566 except _core._FallbackException:
2567 pass
2568 try:
2569 return tridiagonal_solve_eager_fallback(
2570 diagonals, rhs, partial_pivoting=partial_pivoting,
2571 perturb_singular=perturb_singular, name=name, ctx=_ctx)
2572 except _core._SymbolicException:
2573 pass # Add nodes to the TensorFlow graph.
2574 # Add nodes to the TensorFlow graph.
2575 if partial_pivoting is None:
2576 partial_pivoting = True
2577 partial_pivoting = _execute.make_bool(partial_pivoting, "partial_pivoting")
2578 if perturb_singular is None:
2579 perturb_singular = False
2580 perturb_singular = _execute.make_bool(perturb_singular, "perturb_singular")
2581 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2582 "TridiagonalSolve", diagonals=diagonals, rhs=rhs,
2583 partial_pivoting=partial_pivoting,
2584 perturb_singular=perturb_singular, name=name)
2585 _result = _outputs[:]
2586 if _execute.must_record_gradient():
2587 _attrs = ("partial_pivoting", _op._get_attr_bool("partial_pivoting"),
2588 "perturb_singular", _op._get_attr_bool("perturb_singular"), "T",
2589 _op._get_attr_type("T"))
2590 _inputs_flat = _op.inputs
2591 _execute.record_gradient(
2592 "TridiagonalSolve", _inputs_flat, _attrs, _result)
2593 _result, = _result
2594 return _result
2596TridiagonalSolve = tf_export("raw_ops.TridiagonalSolve")(_ops.to_raw_op(tridiagonal_solve))
2599def tridiagonal_solve_eager_fallback(diagonals, rhs, partial_pivoting, perturb_singular, name, ctx):
2600 if partial_pivoting is None:
2601 partial_pivoting = True
2602 partial_pivoting = _execute.make_bool(partial_pivoting, "partial_pivoting")
2603 if perturb_singular is None:
2604 perturb_singular = False
2605 perturb_singular = _execute.make_bool(perturb_singular, "perturb_singular")
2606 _attr_T, _inputs_T = _execute.args_to_matching_eager([diagonals, rhs], ctx, [_dtypes.float64, _dtypes.float32, _dtypes.complex64, _dtypes.complex128, ])
2607 (diagonals, rhs) = _inputs_T
2608 _inputs_flat = [diagonals, rhs]
2609 _attrs = ("partial_pivoting", partial_pivoting, "perturb_singular",
2610 perturb_singular, "T", _attr_T)
2611 _result = _execute.execute(b"TridiagonalSolve", 1, inputs=_inputs_flat,
2612 attrs=_attrs, ctx=ctx, name=name)
2613 if _execute.must_record_gradient():
2614 _execute.record_gradient(
2615 "TridiagonalSolve", _inputs_flat, _attrs, _result)
2616 _result, = _result
2617 return _result