Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/gen_sparse_ops.py: 9%
1413 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1"""Python wrappers around TensorFlow ops.
3This file is MACHINE GENERATED! Do not edit.
4"""
6import collections
8from tensorflow.python import pywrap_tfe as pywrap_tfe
9from tensorflow.python.eager import context as _context
10from tensorflow.python.eager import core as _core
11from tensorflow.python.eager import execute as _execute
12from tensorflow.python.framework import dtypes as _dtypes
13from tensorflow.security.fuzzing.py import annotation_types as _atypes
15from tensorflow.python.framework import op_def_registry as _op_def_registry
16from tensorflow.python.framework import ops as _ops
17from tensorflow.python.framework import op_def_library as _op_def_library
18from tensorflow.python.util.deprecation import deprecated_endpoints
19from tensorflow.python.util import dispatch as _dispatch
20from tensorflow.python.util.tf_export import tf_export
22from typing import TypeVar
24def add_many_sparse_to_tensors_map(sparse_indices, sparse_values, sparse_shape, container="", shared_name="", name=None):
25 r"""Add an `N`-minibatch `SparseTensor` to a `SparseTensorsMap`, return `N` handles.
27 A `SparseTensor` of rank `R` is represented by three tensors: `sparse_indices`,
28 `sparse_values`, and `sparse_shape`, where
30 ```sparse_indices.shape[1] == sparse_shape.shape[0] == R```
32 An `N`-minibatch of `SparseTensor` objects is represented as a `SparseTensor`
33 having a first `sparse_indices` column taking values between `[0, N)`, where
34 the minibatch size `N == sparse_shape[0]`.
36 The input `SparseTensor` must have rank `R` greater than 1, and the first
37 dimension is treated as the minibatch dimension. Elements of the `SparseTensor`
38 must be sorted in increasing order of this first dimension. The stored
39 `SparseTensor` objects pointed to by each row of the output `sparse_handles`
40 will have rank `R-1`.
42 The `SparseTensor` values can then be read out as part of a minibatch by passing
43 the given keys as vector elements to `TakeManySparseFromTensorsMap`. To ensure
44 the correct `SparseTensorsMap` is accessed, ensure that the same
45 `container` and `shared_name` are passed to that Op. If no `shared_name`
46 is provided here, instead use the *name* of the Operation created by calling
47 `AddManySparseToTensorsMap` as the `shared_name` passed to
48 `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
50 Args:
51 sparse_indices: A `Tensor` of type `int64`.
52 2-D. The `indices` of the minibatch `SparseTensor`.
53 `sparse_indices[:, 0]` must be ordered values in `[0, N)`.
54 sparse_values: A `Tensor`.
55 1-D. The `values` of the minibatch `SparseTensor`.
56 sparse_shape: A `Tensor` of type `int64`.
57 1-D. The `shape` of the minibatch `SparseTensor`.
58 The minibatch size `N == sparse_shape[0]`.
59 container: An optional `string`. Defaults to `""`.
60 The container name for the `SparseTensorsMap` created by this op.
61 shared_name: An optional `string`. Defaults to `""`.
62 The shared name for the `SparseTensorsMap` created by this op.
63 If blank, the new Operation's unique name is used.
64 name: A name for the operation (optional).
66 Returns:
67 A `Tensor` of type `int64`.
68 """
69 _ctx = _context._context or _context.context()
70 tld = _ctx._thread_local_data
71 if tld.is_eager:
72 try:
73 _result = pywrap_tfe.TFE_Py_FastPathExecute(
74 _ctx, "AddManySparseToTensorsMap", name, sparse_indices,
75 sparse_values, sparse_shape, "container", container, "shared_name",
76 shared_name)
77 return _result
78 except _core._NotOkStatusException as e:
79 _ops.raise_from_not_ok_status(e, name)
80 except _core._FallbackException:
81 pass
82 try:
83 return add_many_sparse_to_tensors_map_eager_fallback(
84 sparse_indices, sparse_values, sparse_shape, container=container,
85 shared_name=shared_name, name=name, ctx=_ctx)
86 except _core._SymbolicException:
87 pass # Add nodes to the TensorFlow graph.
88 # Add nodes to the TensorFlow graph.
89 if container is None:
90 container = ""
91 container = _execute.make_str(container, "container")
92 if shared_name is None:
93 shared_name = ""
94 shared_name = _execute.make_str(shared_name, "shared_name")
95 _, _, _op, _outputs = _op_def_library._apply_op_helper(
96 "AddManySparseToTensorsMap", sparse_indices=sparse_indices,
97 sparse_values=sparse_values,
98 sparse_shape=sparse_shape,
99 container=container,
100 shared_name=shared_name, name=name)
101 _result = _outputs[:]
102 if _execute.must_record_gradient():
103 _attrs = ("T", _op._get_attr_type("T"), "container",
104 _op.get_attr("container"), "shared_name",
105 _op.get_attr("shared_name"))
106 _inputs_flat = _op.inputs
107 _execute.record_gradient(
108 "AddManySparseToTensorsMap", _inputs_flat, _attrs, _result)
109 _result, = _result
110 return _result
112AddManySparseToTensorsMap = tf_export("raw_ops.AddManySparseToTensorsMap")(_ops.to_raw_op(add_many_sparse_to_tensors_map))
115def add_many_sparse_to_tensors_map_eager_fallback(sparse_indices, sparse_values, sparse_shape, container, shared_name, name, ctx):
116 if container is None:
117 container = ""
118 container = _execute.make_str(container, "container")
119 if shared_name is None:
120 shared_name = ""
121 shared_name = _execute.make_str(shared_name, "shared_name")
122 _attr_T, (sparse_values,) = _execute.args_to_matching_eager([sparse_values], ctx, [])
123 sparse_indices = _ops.convert_to_tensor(sparse_indices, _dtypes.int64)
124 sparse_shape = _ops.convert_to_tensor(sparse_shape, _dtypes.int64)
125 _inputs_flat = [sparse_indices, sparse_values, sparse_shape]
126 _attrs = ("T", _attr_T, "container", container, "shared_name", shared_name)
127 _result = _execute.execute(b"AddManySparseToTensorsMap", 1,
128 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
129 name=name)
130 if _execute.must_record_gradient():
131 _execute.record_gradient(
132 "AddManySparseToTensorsMap", _inputs_flat, _attrs, _result)
133 _result, = _result
134 return _result
137def add_sparse_to_tensors_map(sparse_indices, sparse_values, sparse_shape, container="", shared_name="", name=None):
138 r"""Add a `SparseTensor` to a `SparseTensorsMap` return its handle.
140 A `SparseTensor` is represented by three tensors: `sparse_indices`,
141 `sparse_values`, and `sparse_shape`.
143 This operator takes the given `SparseTensor` and adds it to a container
144 object (a `SparseTensorsMap`). A unique key within this container is generated
145 in the form of an `int64`, and this is the value that is returned.
147 The `SparseTensor` can then be read out as part of a minibatch by passing
148 the key as a vector element to `TakeManySparseFromTensorsMap`. To ensure
149 the correct `SparseTensorsMap` is accessed, ensure that the same
150 `container` and `shared_name` are passed to that Op. If no `shared_name`
151 is provided here, instead use the *name* of the Operation created by calling
152 `AddSparseToTensorsMap` as the `shared_name` passed to
153 `TakeManySparseFromTensorsMap`. Ensure the Operations are colocated.
155 Args:
156 sparse_indices: A `Tensor` of type `int64`.
157 2-D. The `indices` of the `SparseTensor`.
158 sparse_values: A `Tensor`. 1-D. The `values` of the `SparseTensor`.
159 sparse_shape: A `Tensor` of type `int64`.
160 1-D. The `shape` of the `SparseTensor`.
161 container: An optional `string`. Defaults to `""`.
162 The container name for the `SparseTensorsMap` created by this op.
163 shared_name: An optional `string`. Defaults to `""`.
164 The shared name for the `SparseTensorsMap` created by this op.
165 If blank, the new Operation's unique name is used.
166 name: A name for the operation (optional).
168 Returns:
169 A `Tensor` of type `int64`.
170 """
171 _ctx = _context._context or _context.context()
172 tld = _ctx._thread_local_data
173 if tld.is_eager:
174 try:
175 _result = pywrap_tfe.TFE_Py_FastPathExecute(
176 _ctx, "AddSparseToTensorsMap", name, sparse_indices, sparse_values,
177 sparse_shape, "container", container, "shared_name", shared_name)
178 return _result
179 except _core._NotOkStatusException as e:
180 _ops.raise_from_not_ok_status(e, name)
181 except _core._FallbackException:
182 pass
183 try:
184 return add_sparse_to_tensors_map_eager_fallback(
185 sparse_indices, sparse_values, sparse_shape, container=container,
186 shared_name=shared_name, name=name, ctx=_ctx)
187 except _core._SymbolicException:
188 pass # Add nodes to the TensorFlow graph.
189 # Add nodes to the TensorFlow graph.
190 if container is None:
191 container = ""
192 container = _execute.make_str(container, "container")
193 if shared_name is None:
194 shared_name = ""
195 shared_name = _execute.make_str(shared_name, "shared_name")
196 _, _, _op, _outputs = _op_def_library._apply_op_helper(
197 "AddSparseToTensorsMap", sparse_indices=sparse_indices,
198 sparse_values=sparse_values,
199 sparse_shape=sparse_shape,
200 container=container, shared_name=shared_name,
201 name=name)
202 _result = _outputs[:]
203 if _execute.must_record_gradient():
204 _attrs = ("T", _op._get_attr_type("T"), "container",
205 _op.get_attr("container"), "shared_name",
206 _op.get_attr("shared_name"))
207 _inputs_flat = _op.inputs
208 _execute.record_gradient(
209 "AddSparseToTensorsMap", _inputs_flat, _attrs, _result)
210 _result, = _result
211 return _result
213AddSparseToTensorsMap = tf_export("raw_ops.AddSparseToTensorsMap")(_ops.to_raw_op(add_sparse_to_tensors_map))
216def add_sparse_to_tensors_map_eager_fallback(sparse_indices, sparse_values, sparse_shape, container, shared_name, name, ctx):
217 if container is None:
218 container = ""
219 container = _execute.make_str(container, "container")
220 if shared_name is None:
221 shared_name = ""
222 shared_name = _execute.make_str(shared_name, "shared_name")
223 _attr_T, (sparse_values,) = _execute.args_to_matching_eager([sparse_values], ctx, [])
224 sparse_indices = _ops.convert_to_tensor(sparse_indices, _dtypes.int64)
225 sparse_shape = _ops.convert_to_tensor(sparse_shape, _dtypes.int64)
226 _inputs_flat = [sparse_indices, sparse_values, sparse_shape]
227 _attrs = ("T", _attr_T, "container", container, "shared_name", shared_name)
228 _result = _execute.execute(b"AddSparseToTensorsMap", 1, inputs=_inputs_flat,
229 attrs=_attrs, ctx=ctx, name=name)
230 if _execute.must_record_gradient():
231 _execute.record_gradient(
232 "AddSparseToTensorsMap", _inputs_flat, _attrs, _result)
233 _result, = _result
234 return _result
236_DeserializeManySparseOutput = collections.namedtuple(
237 "DeserializeManySparse",
238 ["sparse_indices", "sparse_values", "sparse_shape"])
241def deserialize_many_sparse(serialized_sparse, dtype, name=None):
242 r"""Deserialize and concatenate `SparseTensors` from a serialized minibatch.
244 The input `serialized_sparse` must be a string matrix of shape `[N x 3]` where
245 `N` is the minibatch size and the rows correspond to packed outputs of
246 `SerializeSparse`. The ranks of the original `SparseTensor` objects
247 must all match. When the final `SparseTensor` is created, it has rank one
248 higher than the ranks of the incoming `SparseTensor` objects
249 (they have been concatenated along a new row dimension).
251 The output `SparseTensor` object's shape values for all dimensions but the
252 first are the max across the input `SparseTensor` objects' shape values
253 for the corresponding dimensions. Its first shape value is `N`, the minibatch
254 size.
256 The input `SparseTensor` objects' indices are assumed ordered in
257 standard lexicographic order. If this is not the case, after this
258 step run `SparseReorder` to restore index ordering.
260 For example, if the serialized input is a `[2 x 3]` matrix representing two
261 original `SparseTensor` objects:
263 index = [ 0]
264 [10]
265 [20]
266 values = [1, 2, 3]
267 shape = [50]
269 and
271 index = [ 2]
272 [10]
273 values = [4, 5]
274 shape = [30]
276 then the final deserialized `SparseTensor` will be:
278 index = [0 0]
279 [0 10]
280 [0 20]
281 [1 2]
282 [1 10]
283 values = [1, 2, 3, 4, 5]
284 shape = [2 50]
286 Args:
287 serialized_sparse: A `Tensor` of type `string`.
288 2-D, The `N` serialized `SparseTensor` objects.
289 Must have 3 columns.
290 dtype: A `tf.DType`. The `dtype` of the serialized `SparseTensor` objects.
291 name: A name for the operation (optional).
293 Returns:
294 A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shape).
296 sparse_indices: A `Tensor` of type `int64`.
297 sparse_values: A `Tensor` of type `dtype`.
298 sparse_shape: A `Tensor` of type `int64`.
299 """
300 _ctx = _context._context or _context.context()
301 tld = _ctx._thread_local_data
302 if tld.is_eager:
303 try:
304 _result = pywrap_tfe.TFE_Py_FastPathExecute(
305 _ctx, "DeserializeManySparse", name, serialized_sparse, "dtype",
306 dtype)
307 _result = _DeserializeManySparseOutput._make(_result)
308 return _result
309 except _core._NotOkStatusException as e:
310 _ops.raise_from_not_ok_status(e, name)
311 except _core._FallbackException:
312 pass
313 try:
314 return deserialize_many_sparse_eager_fallback(
315 serialized_sparse, dtype=dtype, name=name, ctx=_ctx)
316 except _core._SymbolicException:
317 pass # Add nodes to the TensorFlow graph.
318 # Add nodes to the TensorFlow graph.
319 dtype = _execute.make_type(dtype, "dtype")
320 _, _, _op, _outputs = _op_def_library._apply_op_helper(
321 "DeserializeManySparse", serialized_sparse=serialized_sparse,
322 dtype=dtype, name=name)
323 _result = _outputs[:]
324 if _execute.must_record_gradient():
325 _attrs = ("dtype", _op._get_attr_type("dtype"))
326 _inputs_flat = _op.inputs
327 _execute.record_gradient(
328 "DeserializeManySparse", _inputs_flat, _attrs, _result)
329 _result = _DeserializeManySparseOutput._make(_result)
330 return _result
332DeserializeManySparse = tf_export("raw_ops.DeserializeManySparse")(_ops.to_raw_op(deserialize_many_sparse))
335def deserialize_many_sparse_eager_fallback(serialized_sparse, dtype, name, ctx):
336 dtype = _execute.make_type(dtype, "dtype")
337 serialized_sparse = _ops.convert_to_tensor(serialized_sparse, _dtypes.string)
338 _inputs_flat = [serialized_sparse]
339 _attrs = ("dtype", dtype)
340 _result = _execute.execute(b"DeserializeManySparse", 3, inputs=_inputs_flat,
341 attrs=_attrs, ctx=ctx, name=name)
342 if _execute.must_record_gradient():
343 _execute.record_gradient(
344 "DeserializeManySparse", _inputs_flat, _attrs, _result)
345 _result = _DeserializeManySparseOutput._make(_result)
346 return _result
348_DeserializeSparseOutput = collections.namedtuple(
349 "DeserializeSparse",
350 ["sparse_indices", "sparse_values", "sparse_shape"])
353def deserialize_sparse(serialized_sparse, dtype, name=None):
354 r"""Deserialize `SparseTensor` objects.
356 The input `serialized_sparse` must have the shape `[?, ?, ..., ?, 3]` where
357 the last dimension stores serialized `SparseTensor` objects and the other N
358 dimensions (N >= 0) correspond to a batch. The ranks of the original
359 `SparseTensor` objects must all match. When the final `SparseTensor` is
360 created, its rank is the rank of the incoming `SparseTensor` objects plus N;
361 the sparse tensors have been concatenated along new dimensions, one for each
362 batch.
364 The output `SparseTensor` object's shape values for the original dimensions
365 are the max across the input `SparseTensor` objects' shape values for the
366 corresponding dimensions. The new dimensions match the size of the batch.
368 The input `SparseTensor` objects' indices are assumed ordered in
369 standard lexicographic order. If this is not the case, after this
370 step run `SparseReorder` to restore index ordering.
372 For example, if the serialized input is a `[2 x 3]` matrix representing two
373 original `SparseTensor` objects:
375 index = [ 0]
376 [10]
377 [20]
378 values = [1, 2, 3]
379 shape = [50]
381 and
383 index = [ 2]
384 [10]
385 values = [4, 5]
386 shape = [30]
388 then the final deserialized `SparseTensor` will be:
390 index = [0 0]
391 [0 10]
392 [0 20]
393 [1 2]
394 [1 10]
395 values = [1, 2, 3, 4, 5]
396 shape = [2 50]
398 Args:
399 serialized_sparse: A `Tensor`. Must be one of the following types: `string`, `variant`.
400 The serialized `SparseTensor` objects. The last dimension
401 must have 3 columns.
402 dtype: A `tf.DType`. The `dtype` of the serialized `SparseTensor` objects.
403 name: A name for the operation (optional).
405 Returns:
406 A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shape).
408 sparse_indices: A `Tensor` of type `int64`.
409 sparse_values: A `Tensor` of type `dtype`.
410 sparse_shape: A `Tensor` of type `int64`.
411 """
412 _ctx = _context._context or _context.context()
413 tld = _ctx._thread_local_data
414 if tld.is_eager:
415 try:
416 _result = pywrap_tfe.TFE_Py_FastPathExecute(
417 _ctx, "DeserializeSparse", name, serialized_sparse, "dtype", dtype)
418 _result = _DeserializeSparseOutput._make(_result)
419 return _result
420 except _core._NotOkStatusException as e:
421 _ops.raise_from_not_ok_status(e, name)
422 except _core._FallbackException:
423 pass
424 try:
425 return deserialize_sparse_eager_fallback(
426 serialized_sparse, dtype=dtype, name=name, ctx=_ctx)
427 except _core._SymbolicException:
428 pass # Add nodes to the TensorFlow graph.
429 # Add nodes to the TensorFlow graph.
430 dtype = _execute.make_type(dtype, "dtype")
431 _, _, _op, _outputs = _op_def_library._apply_op_helper(
432 "DeserializeSparse", serialized_sparse=serialized_sparse, dtype=dtype,
433 name=name)
434 _result = _outputs[:]
435 if _execute.must_record_gradient():
436 _attrs = ("dtype", _op._get_attr_type("dtype"), "Tserialized",
437 _op._get_attr_type("Tserialized"))
438 _inputs_flat = _op.inputs
439 _execute.record_gradient(
440 "DeserializeSparse", _inputs_flat, _attrs, _result)
441 _result = _DeserializeSparseOutput._make(_result)
442 return _result
444DeserializeSparse = tf_export("raw_ops.DeserializeSparse")(_ops.to_raw_op(deserialize_sparse))
447def deserialize_sparse_eager_fallback(serialized_sparse, dtype, name, ctx):
448 dtype = _execute.make_type(dtype, "dtype")
449 _attr_Tserialized, (serialized_sparse,) = _execute.args_to_matching_eager([serialized_sparse], ctx, [_dtypes.string, _dtypes.variant, ], _dtypes.string)
450 _inputs_flat = [serialized_sparse]
451 _attrs = ("dtype", dtype, "Tserialized", _attr_Tserialized)
452 _result = _execute.execute(b"DeserializeSparse", 3, inputs=_inputs_flat,
453 attrs=_attrs, ctx=ctx, name=name)
454 if _execute.must_record_gradient():
455 _execute.record_gradient(
456 "DeserializeSparse", _inputs_flat, _attrs, _result)
457 _result = _DeserializeSparseOutput._make(_result)
458 return _result
461def serialize_many_sparse(sparse_indices, sparse_values, sparse_shape, out_type=_dtypes.string, name=None):
462 r"""Serialize an `N`-minibatch `SparseTensor` into an `[N, 3]` `Tensor` object.
464 The `SparseTensor` must have rank `R` greater than 1, and the first dimension
465 is treated as the minibatch dimension. Elements of the `SparseTensor`
466 must be sorted in increasing order of this first dimension. The serialized
467 `SparseTensor` objects going into each row of `serialized_sparse` will have
468 rank `R-1`.
470 The minibatch size `N` is extracted from `sparse_shape[0]`.
472 Args:
473 sparse_indices: A `Tensor` of type `int64`.
474 2-D. The `indices` of the minibatch `SparseTensor`.
475 sparse_values: A `Tensor`.
476 1-D. The `values` of the minibatch `SparseTensor`.
477 sparse_shape: A `Tensor` of type `int64`.
478 1-D. The `shape` of the minibatch `SparseTensor`.
479 out_type: An optional `tf.DType` from: `tf.string, tf.variant`. Defaults to `tf.string`.
480 The `dtype` to use for serialization; the supported types are `string`
481 (default) and `variant`.
482 name: A name for the operation (optional).
484 Returns:
485 A `Tensor` of type `out_type`.
486 """
487 _ctx = _context._context or _context.context()
488 tld = _ctx._thread_local_data
489 if tld.is_eager:
490 try:
491 _result = pywrap_tfe.TFE_Py_FastPathExecute(
492 _ctx, "SerializeManySparse", name, sparse_indices, sparse_values,
493 sparse_shape, "out_type", out_type)
494 return _result
495 except _core._NotOkStatusException as e:
496 _ops.raise_from_not_ok_status(e, name)
497 except _core._FallbackException:
498 pass
499 try:
500 return serialize_many_sparse_eager_fallback(
501 sparse_indices, sparse_values, sparse_shape, out_type=out_type,
502 name=name, ctx=_ctx)
503 except _core._SymbolicException:
504 pass # Add nodes to the TensorFlow graph.
505 # Add nodes to the TensorFlow graph.
506 if out_type is None:
507 out_type = _dtypes.string
508 out_type = _execute.make_type(out_type, "out_type")
509 _, _, _op, _outputs = _op_def_library._apply_op_helper(
510 "SerializeManySparse", sparse_indices=sparse_indices,
511 sparse_values=sparse_values,
512 sparse_shape=sparse_shape, out_type=out_type,
513 name=name)
514 _result = _outputs[:]
515 if _execute.must_record_gradient():
516 _attrs = ("T", _op._get_attr_type("T"), "out_type",
517 _op._get_attr_type("out_type"))
518 _inputs_flat = _op.inputs
519 _execute.record_gradient(
520 "SerializeManySparse", _inputs_flat, _attrs, _result)
521 _result, = _result
522 return _result
524SerializeManySparse = tf_export("raw_ops.SerializeManySparse")(_ops.to_raw_op(serialize_many_sparse))
527def serialize_many_sparse_eager_fallback(sparse_indices, sparse_values, sparse_shape, out_type, name, ctx):
528 if out_type is None:
529 out_type = _dtypes.string
530 out_type = _execute.make_type(out_type, "out_type")
531 _attr_T, (sparse_values,) = _execute.args_to_matching_eager([sparse_values], ctx, [])
532 sparse_indices = _ops.convert_to_tensor(sparse_indices, _dtypes.int64)
533 sparse_shape = _ops.convert_to_tensor(sparse_shape, _dtypes.int64)
534 _inputs_flat = [sparse_indices, sparse_values, sparse_shape]
535 _attrs = ("T", _attr_T, "out_type", out_type)
536 _result = _execute.execute(b"SerializeManySparse", 1, inputs=_inputs_flat,
537 attrs=_attrs, ctx=ctx, name=name)
538 if _execute.must_record_gradient():
539 _execute.record_gradient(
540 "SerializeManySparse", _inputs_flat, _attrs, _result)
541 _result, = _result
542 return _result
545def serialize_sparse(sparse_indices, sparse_values, sparse_shape, out_type=_dtypes.string, name=None):
546 r"""Serialize a `SparseTensor` into a `[3]` `Tensor` object.
548 Args:
549 sparse_indices: A `Tensor` of type `int64`.
550 2-D. The `indices` of the `SparseTensor`.
551 sparse_values: A `Tensor`. 1-D. The `values` of the `SparseTensor`.
552 sparse_shape: A `Tensor` of type `int64`.
553 1-D. The `shape` of the `SparseTensor`.
554 out_type: An optional `tf.DType` from: `tf.string, tf.variant`. Defaults to `tf.string`.
555 The `dtype` to use for serialization; the supported types are `string`
556 (default) and `variant`.
557 name: A name for the operation (optional).
559 Returns:
560 A `Tensor` of type `out_type`.
561 """
562 _ctx = _context._context or _context.context()
563 tld = _ctx._thread_local_data
564 if tld.is_eager:
565 try:
566 _result = pywrap_tfe.TFE_Py_FastPathExecute(
567 _ctx, "SerializeSparse", name, sparse_indices, sparse_values,
568 sparse_shape, "out_type", out_type)
569 return _result
570 except _core._NotOkStatusException as e:
571 _ops.raise_from_not_ok_status(e, name)
572 except _core._FallbackException:
573 pass
574 try:
575 return serialize_sparse_eager_fallback(
576 sparse_indices, sparse_values, sparse_shape, out_type=out_type,
577 name=name, ctx=_ctx)
578 except _core._SymbolicException:
579 pass # Add nodes to the TensorFlow graph.
580 # Add nodes to the TensorFlow graph.
581 if out_type is None:
582 out_type = _dtypes.string
583 out_type = _execute.make_type(out_type, "out_type")
584 _, _, _op, _outputs = _op_def_library._apply_op_helper(
585 "SerializeSparse", sparse_indices=sparse_indices,
586 sparse_values=sparse_values,
587 sparse_shape=sparse_shape, out_type=out_type,
588 name=name)
589 _result = _outputs[:]
590 if _execute.must_record_gradient():
591 _attrs = ("T", _op._get_attr_type("T"), "out_type",
592 _op._get_attr_type("out_type"))
593 _inputs_flat = _op.inputs
594 _execute.record_gradient(
595 "SerializeSparse", _inputs_flat, _attrs, _result)
596 _result, = _result
597 return _result
599SerializeSparse = tf_export("raw_ops.SerializeSparse")(_ops.to_raw_op(serialize_sparse))
602def serialize_sparse_eager_fallback(sparse_indices, sparse_values, sparse_shape, out_type, name, ctx):
603 if out_type is None:
604 out_type = _dtypes.string
605 out_type = _execute.make_type(out_type, "out_type")
606 _attr_T, (sparse_values,) = _execute.args_to_matching_eager([sparse_values], ctx, [])
607 sparse_indices = _ops.convert_to_tensor(sparse_indices, _dtypes.int64)
608 sparse_shape = _ops.convert_to_tensor(sparse_shape, _dtypes.int64)
609 _inputs_flat = [sparse_indices, sparse_values, sparse_shape]
610 _attrs = ("T", _attr_T, "out_type", out_type)
611 _result = _execute.execute(b"SerializeSparse", 1, inputs=_inputs_flat,
612 attrs=_attrs, ctx=ctx, name=name)
613 if _execute.must_record_gradient():
614 _execute.record_gradient(
615 "SerializeSparse", _inputs_flat, _attrs, _result)
616 _result, = _result
617 return _result
619_SparseAddOutput = collections.namedtuple(
620 "SparseAdd",
621 ["sum_indices", "sum_values", "sum_shape"])
624def sparse_add(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh, name=None):
625 r"""Adds two `SparseTensor` objects to produce another `SparseTensor`.
627 The input `SparseTensor` objects' indices are assumed ordered in standard
628 lexicographic order. If this is not the case, before this step run
629 `SparseReorder` to restore index ordering.
631 By default, if two values sum to zero at some index, the output `SparseTensor`
632 would still include that particular location in its index, storing a zero in the
633 corresponding value slot. To override this, callers can specify `thresh`,
634 indicating that if the sum has a magnitude strictly smaller than `thresh`, its
635 corresponding value and index would then not be included. In particular,
636 `thresh == 0` (default) means everything is kept and actual thresholding happens
637 only for a positive value.
639 In the following shapes, `nnz` is the count after taking `thresh` into account.
641 Args:
642 a_indices: A `Tensor` of type `int64`.
643 2-D. The `indices` of the first `SparseTensor`, size `[nnz, ndims]` Matrix.
644 a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
645 1-D. The `values` of the first `SparseTensor`, size `[nnz]` Vector.
646 a_shape: A `Tensor` of type `int64`.
647 1-D. The `shape` of the first `SparseTensor`, size `[ndims]` Vector.
648 b_indices: A `Tensor` of type `int64`.
649 2-D. The `indices` of the second `SparseTensor`, size `[nnz, ndims]` Matrix.
650 b_values: A `Tensor`. Must have the same type as `a_values`.
651 1-D. The `values` of the second `SparseTensor`, size `[nnz]` Vector.
652 b_shape: A `Tensor` of type `int64`.
653 1-D. The `shape` of the second `SparseTensor`, size `[ndims]` Vector.
654 thresh: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
655 0-D. The magnitude threshold that determines if an output value/index
656 pair takes space.
657 name: A name for the operation (optional).
659 Returns:
660 A tuple of `Tensor` objects (sum_indices, sum_values, sum_shape).
662 sum_indices: A `Tensor` of type `int64`.
663 sum_values: A `Tensor`. Has the same type as `a_values`.
664 sum_shape: A `Tensor` of type `int64`.
665 """
666 _ctx = _context._context or _context.context()
667 tld = _ctx._thread_local_data
668 if tld.is_eager:
669 try:
670 _result = pywrap_tfe.TFE_Py_FastPathExecute(
671 _ctx, "SparseAdd", name, a_indices, a_values, a_shape, b_indices,
672 b_values, b_shape, thresh)
673 _result = _SparseAddOutput._make(_result)
674 return _result
675 except _core._NotOkStatusException as e:
676 _ops.raise_from_not_ok_status(e, name)
677 except _core._FallbackException:
678 pass
679 try:
680 return sparse_add_eager_fallback(
681 a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh,
682 name=name, ctx=_ctx)
683 except _core._SymbolicException:
684 pass # Add nodes to the TensorFlow graph.
685 # Add nodes to the TensorFlow graph.
686 _, _, _op, _outputs = _op_def_library._apply_op_helper(
687 "SparseAdd", a_indices=a_indices, a_values=a_values, a_shape=a_shape,
688 b_indices=b_indices, b_values=b_values, b_shape=b_shape,
689 thresh=thresh, name=name)
690 _result = _outputs[:]
691 if _execute.must_record_gradient():
692 _attrs = ("T", _op._get_attr_type("T"), "Treal",
693 _op._get_attr_type("Treal"))
694 _inputs_flat = _op.inputs
695 _execute.record_gradient(
696 "SparseAdd", _inputs_flat, _attrs, _result)
697 _result = _SparseAddOutput._make(_result)
698 return _result
700SparseAdd = tf_export("raw_ops.SparseAdd")(_ops.to_raw_op(sparse_add))
703def sparse_add_eager_fallback(a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh, name, ctx):
704 _attr_T, _inputs_T = _execute.args_to_matching_eager([a_values, b_values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
705 (a_values, b_values) = _inputs_T
706 _attr_Treal, (thresh,) = _execute.args_to_matching_eager([thresh], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
707 a_indices = _ops.convert_to_tensor(a_indices, _dtypes.int64)
708 a_shape = _ops.convert_to_tensor(a_shape, _dtypes.int64)
709 b_indices = _ops.convert_to_tensor(b_indices, _dtypes.int64)
710 b_shape = _ops.convert_to_tensor(b_shape, _dtypes.int64)
711 _inputs_flat = [a_indices, a_values, a_shape, b_indices, b_values, b_shape, thresh]
712 _attrs = ("T", _attr_T, "Treal", _attr_Treal)
713 _result = _execute.execute(b"SparseAdd", 3, inputs=_inputs_flat,
714 attrs=_attrs, ctx=ctx, name=name)
715 if _execute.must_record_gradient():
716 _execute.record_gradient(
717 "SparseAdd", _inputs_flat, _attrs, _result)
718 _result = _SparseAddOutput._make(_result)
719 return _result
721_SparseAddGradOutput = collections.namedtuple(
722 "SparseAddGrad",
723 ["a_val_grad", "b_val_grad"])
726def sparse_add_grad(backprop_val_grad, a_indices, b_indices, sum_indices, name=None):
727 r"""The gradient operator for the SparseAdd op.
729 The SparseAdd op calculates A + B, where A, B, and the sum are all represented
730 as `SparseTensor` objects. This op takes in the upstream gradient w.r.t.
731 non-empty values of the sum, and outputs the gradients w.r.t. the non-empty
732 values of A and B.
734 Args:
735 backprop_val_grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
736 1-D with shape `[nnz(sum)]`. The gradient with respect to
737 the non-empty values of the sum.
738 a_indices: A `Tensor` of type `int64`.
739 2-D. The `indices` of the `SparseTensor` A, size `[nnz(A), ndims]`.
740 b_indices: A `Tensor` of type `int64`.
741 2-D. The `indices` of the `SparseTensor` B, size `[nnz(B), ndims]`.
742 sum_indices: A `Tensor` of type `int64`.
743 2-D. The `indices` of the sum `SparseTensor`, size
744 `[nnz(sum), ndims]`.
745 name: A name for the operation (optional).
747 Returns:
748 A tuple of `Tensor` objects (a_val_grad, b_val_grad).
750 a_val_grad: A `Tensor`. Has the same type as `backprop_val_grad`.
751 b_val_grad: A `Tensor`. Has the same type as `backprop_val_grad`.
752 """
753 _ctx = _context._context or _context.context()
754 tld = _ctx._thread_local_data
755 if tld.is_eager:
756 try:
757 _result = pywrap_tfe.TFE_Py_FastPathExecute(
758 _ctx, "SparseAddGrad", name, backprop_val_grad, a_indices, b_indices,
759 sum_indices)
760 _result = _SparseAddGradOutput._make(_result)
761 return _result
762 except _core._NotOkStatusException as e:
763 _ops.raise_from_not_ok_status(e, name)
764 except _core._FallbackException:
765 pass
766 try:
767 return sparse_add_grad_eager_fallback(
768 backprop_val_grad, a_indices, b_indices, sum_indices, name=name,
769 ctx=_ctx)
770 except _core._SymbolicException:
771 pass # Add nodes to the TensorFlow graph.
772 # Add nodes to the TensorFlow graph.
773 _, _, _op, _outputs = _op_def_library._apply_op_helper(
774 "SparseAddGrad", backprop_val_grad=backprop_val_grad,
775 a_indices=a_indices, b_indices=b_indices,
776 sum_indices=sum_indices, name=name)
777 _result = _outputs[:]
778 if _execute.must_record_gradient():
779 _attrs = ("T", _op._get_attr_type("T"))
780 _inputs_flat = _op.inputs
781 _execute.record_gradient(
782 "SparseAddGrad", _inputs_flat, _attrs, _result)
783 _result = _SparseAddGradOutput._make(_result)
784 return _result
786SparseAddGrad = tf_export("raw_ops.SparseAddGrad")(_ops.to_raw_op(sparse_add_grad))
789def sparse_add_grad_eager_fallback(backprop_val_grad, a_indices, b_indices, sum_indices, name, ctx):
790 _attr_T, (backprop_val_grad,) = _execute.args_to_matching_eager([backprop_val_grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
791 a_indices = _ops.convert_to_tensor(a_indices, _dtypes.int64)
792 b_indices = _ops.convert_to_tensor(b_indices, _dtypes.int64)
793 sum_indices = _ops.convert_to_tensor(sum_indices, _dtypes.int64)
794 _inputs_flat = [backprop_val_grad, a_indices, b_indices, sum_indices]
795 _attrs = ("T", _attr_T)
796 _result = _execute.execute(b"SparseAddGrad", 2, inputs=_inputs_flat,
797 attrs=_attrs, ctx=ctx, name=name)
798 if _execute.must_record_gradient():
799 _execute.record_gradient(
800 "SparseAddGrad", _inputs_flat, _attrs, _result)
801 _result = _SparseAddGradOutput._make(_result)
802 return _result
804_SparseConcatOutput = collections.namedtuple(
805 "SparseConcat",
806 ["output_indices", "output_values", "output_shape"])
809def sparse_concat(indices, values, shapes, concat_dim, name=None):
810 r"""Concatenates a list of `SparseTensor` along the specified dimension.
812 Concatenation is with respect to the dense versions of these sparse tensors.
813 It is assumed that each input is a `SparseTensor` whose elements are ordered
814 along increasing dimension number.
816 All inputs' shapes must match, except for the concat dimension. The
817 `indices`, `values`, and `shapes` lists must have the same length.
819 The output shape is identical to the inputs', except along the concat
820 dimension, where it is the sum of the inputs' sizes along that dimension.
822 The output elements will be resorted to preserve the sort order along
823 increasing dimension number.
825 This op runs in `O(M log M)` time, where `M` is the total number of non-empty
826 values across all inputs. This is due to the need for an internal sort in
827 order to concatenate efficiently across an arbitrary dimension.
829 For example, if `concat_dim = 1` and the inputs are
831 sp_inputs[0]: shape = [2, 3]
832 [0, 2]: "a"
833 [1, 0]: "b"
834 [1, 1]: "c"
836 sp_inputs[1]: shape = [2, 4]
837 [0, 1]: "d"
838 [0, 2]: "e"
840 then the output will be
842 shape = [2, 7]
843 [0, 2]: "a"
844 [0, 4]: "d"
845 [0, 5]: "e"
846 [1, 0]: "b"
847 [1, 1]: "c"
849 Graphically this is equivalent to doing
851 [ a] concat [ d e ] = [ a d e ]
852 [b c ] [ ] [b c ]
854 Args:
855 indices: A list of at least 2 `Tensor` objects with type `int64`.
856 2-D. Indices of each input `SparseTensor`.
857 values: A list with the same length as `indices` of `Tensor` objects with the same type.
858 1-D. Non-empty values of each `SparseTensor`.
859 shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
860 1-D. Shapes of each `SparseTensor`.
861 concat_dim: An `int`.
862 Dimension to concatenate along. Must be in range [-rank, rank),
863 where rank is the number of dimensions in each input `SparseTensor`.
864 name: A name for the operation (optional).
866 Returns:
867 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
869 output_indices: A `Tensor` of type `int64`.
870 output_values: A `Tensor`. Has the same type as `values`.
871 output_shape: A `Tensor` of type `int64`.
872 """
873 _ctx = _context._context or _context.context()
874 tld = _ctx._thread_local_data
875 if tld.is_eager:
876 try:
877 _result = pywrap_tfe.TFE_Py_FastPathExecute(
878 _ctx, "SparseConcat", name, indices, values, shapes, "concat_dim",
879 concat_dim)
880 _result = _SparseConcatOutput._make(_result)
881 return _result
882 except _core._NotOkStatusException as e:
883 _ops.raise_from_not_ok_status(e, name)
884 except _core._FallbackException:
885 pass
886 try:
887 return sparse_concat_eager_fallback(
888 indices, values, shapes, concat_dim=concat_dim, name=name, ctx=_ctx)
889 except _core._SymbolicException:
890 pass # Add nodes to the TensorFlow graph.
891 # Add nodes to the TensorFlow graph.
892 if not isinstance(indices, (list, tuple)):
893 raise TypeError(
894 "Expected list for 'indices' argument to "
895 "'sparse_concat' Op, not %r." % indices)
896 _attr_N = len(indices)
897 if not isinstance(values, (list, tuple)):
898 raise TypeError(
899 "Expected list for 'values' argument to "
900 "'sparse_concat' Op, not %r." % values)
901 if len(values) != _attr_N:
902 raise ValueError(
903 "List argument 'values' to 'sparse_concat' Op with length %d "
904 "must match length %d of argument 'indices'." %
905 (len(values), _attr_N))
906 if not isinstance(shapes, (list, tuple)):
907 raise TypeError(
908 "Expected list for 'shapes' argument to "
909 "'sparse_concat' Op, not %r." % shapes)
910 if len(shapes) != _attr_N:
911 raise ValueError(
912 "List argument 'shapes' to 'sparse_concat' Op with length %d "
913 "must match length %d of argument 'indices'." %
914 (len(shapes), _attr_N))
915 concat_dim = _execute.make_int(concat_dim, "concat_dim")
916 _, _, _op, _outputs = _op_def_library._apply_op_helper(
917 "SparseConcat", indices=indices, values=values, shapes=shapes,
918 concat_dim=concat_dim, name=name)
919 _result = _outputs[:]
920 if _execute.must_record_gradient():
921 _attrs = ("concat_dim", _op._get_attr_int("concat_dim"), "N",
922 _op._get_attr_int("N"), "T", _op._get_attr_type("T"))
923 _inputs_flat = _op.inputs
924 _execute.record_gradient(
925 "SparseConcat", _inputs_flat, _attrs, _result)
926 _result = _SparseConcatOutput._make(_result)
927 return _result
929SparseConcat = tf_export("raw_ops.SparseConcat")(_ops.to_raw_op(sparse_concat))
932def sparse_concat_eager_fallback(indices, values, shapes, concat_dim, name, ctx):
933 if not isinstance(indices, (list, tuple)):
934 raise TypeError(
935 "Expected list for 'indices' argument to "
936 "'sparse_concat' Op, not %r." % indices)
937 _attr_N = len(indices)
938 if not isinstance(values, (list, tuple)):
939 raise TypeError(
940 "Expected list for 'values' argument to "
941 "'sparse_concat' Op, not %r." % values)
942 if len(values) != _attr_N:
943 raise ValueError(
944 "List argument 'values' to 'sparse_concat' Op with length %d "
945 "must match length %d of argument 'indices'." %
946 (len(values), _attr_N))
947 if not isinstance(shapes, (list, tuple)):
948 raise TypeError(
949 "Expected list for 'shapes' argument to "
950 "'sparse_concat' Op, not %r." % shapes)
951 if len(shapes) != _attr_N:
952 raise ValueError(
953 "List argument 'shapes' to 'sparse_concat' Op with length %d "
954 "must match length %d of argument 'indices'." %
955 (len(shapes), _attr_N))
956 concat_dim = _execute.make_int(concat_dim, "concat_dim")
957 _attr_T, values = _execute.args_to_matching_eager(list(values), ctx, [])
958 indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
959 shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
960 _inputs_flat = list(indices) + list(values) + list(shapes)
961 _attrs = ("concat_dim", concat_dim, "N", _attr_N, "T", _attr_T)
962 _result = _execute.execute(b"SparseConcat", 3, inputs=_inputs_flat,
963 attrs=_attrs, ctx=ctx, name=name)
964 if _execute.must_record_gradient():
965 _execute.record_gradient(
966 "SparseConcat", _inputs_flat, _attrs, _result)
967 _result = _SparseConcatOutput._make(_result)
968 return _result
970_SparseCrossOutput = collections.namedtuple(
971 "SparseCross",
972 ["output_indices", "output_values", "output_shape"])
975def sparse_cross(indices, values, shapes, dense_inputs, hashed_output, num_buckets, hash_key, out_type, internal_type, name=None):
976 r"""Generates sparse cross from a list of sparse and dense tensors.
978 The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
979 representing features of one feature column. It outputs a 2D `SparseTensor` with
980 the batchwise crosses of these features.
982 For example, if the inputs are
984 inputs[0]: SparseTensor with shape = [2, 2]
985 [0, 0]: "a"
986 [1, 0]: "b"
987 [1, 1]: "c"
989 inputs[1]: SparseTensor with shape = [2, 1]
990 [0, 0]: "d"
991 [1, 0]: "e"
993 inputs[2]: Tensor [["f"], ["g"]]
995 then the output will be
997 shape = [2, 2]
998 [0, 0]: "a_X_d_X_f"
999 [1, 0]: "b_X_e_X_g"
1000 [1, 1]: "c_X_e_X_g"
1002 if hashed_output=true then the output will be
1004 shape = [2, 2]
1005 [0, 0]: FingerprintCat64(
1006 Fingerprint64("f"), FingerprintCat64(
1007 Fingerprint64("d"), Fingerprint64("a")))
1008 [1, 0]: FingerprintCat64(
1009 Fingerprint64("g"), FingerprintCat64(
1010 Fingerprint64("e"), Fingerprint64("b")))
1011 [1, 1]: FingerprintCat64(
1012 Fingerprint64("g"), FingerprintCat64(
1013 Fingerprint64("e"), Fingerprint64("c")))
1015 Args:
1016 indices: A list of `Tensor` objects with type `int64`.
1017 2-D. Indices of each input `SparseTensor`.
1018 values: A list of `Tensor` objects with types from: `int64`, `string`.
1019 1-D. values of each `SparseTensor`.
1020 shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
1021 1-D. Shapes of each `SparseTensor`.
1022 dense_inputs: A list of `Tensor` objects with types from: `int64`, `string`.
1023 2-D. Columns represented by dense `Tensor`.
1024 hashed_output: A `bool`.
1025 If true, returns the hash of the cross instead of the string.
1026 This will allow us avoiding string manipulations.
1027 num_buckets: An `int` that is `>= 0`. It is used if hashed_output is true.
1028 output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
1029 hash_key: An `int`.
1030 Specify the hash_key that will be used by the `FingerprintCat64`
1031 function to combine the crosses fingerprints.
1032 out_type: A `tf.DType` from: `tf.int64, tf.string`.
1033 internal_type: A `tf.DType` from: `tf.int64, tf.string`.
1034 name: A name for the operation (optional).
1036 Returns:
1037 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
1039 output_indices: A `Tensor` of type `int64`.
1040 output_values: A `Tensor` of type `out_type`.
1041 output_shape: A `Tensor` of type `int64`.
1042 """
1043 _ctx = _context._context or _context.context()
1044 tld = _ctx._thread_local_data
1045 if tld.is_eager:
1046 try:
1047 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1048 _ctx, "SparseCross", name, indices, values, shapes, dense_inputs,
1049 "hashed_output", hashed_output, "num_buckets", num_buckets,
1050 "hash_key", hash_key, "out_type", out_type, "internal_type",
1051 internal_type)
1052 _result = _SparseCrossOutput._make(_result)
1053 return _result
1054 except _core._NotOkStatusException as e:
1055 _ops.raise_from_not_ok_status(e, name)
1056 except _core._FallbackException:
1057 pass
1058 try:
1059 return sparse_cross_eager_fallback(
1060 indices, values, shapes, dense_inputs, hashed_output=hashed_output,
1061 num_buckets=num_buckets, hash_key=hash_key, out_type=out_type,
1062 internal_type=internal_type, name=name, ctx=_ctx)
1063 except _core._SymbolicException:
1064 pass # Add nodes to the TensorFlow graph.
1065 # Add nodes to the TensorFlow graph.
1066 if not isinstance(indices, (list, tuple)):
1067 raise TypeError(
1068 "Expected list for 'indices' argument to "
1069 "'sparse_cross' Op, not %r." % indices)
1070 _attr_N = len(indices)
1071 if not isinstance(shapes, (list, tuple)):
1072 raise TypeError(
1073 "Expected list for 'shapes' argument to "
1074 "'sparse_cross' Op, not %r." % shapes)
1075 if len(shapes) != _attr_N:
1076 raise ValueError(
1077 "List argument 'shapes' to 'sparse_cross' Op with length %d "
1078 "must match length %d of argument 'indices'." %
1079 (len(shapes), _attr_N))
1080 hashed_output = _execute.make_bool(hashed_output, "hashed_output")
1081 num_buckets = _execute.make_int(num_buckets, "num_buckets")
1082 hash_key = _execute.make_int(hash_key, "hash_key")
1083 out_type = _execute.make_type(out_type, "out_type")
1084 internal_type = _execute.make_type(internal_type, "internal_type")
1085 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1086 "SparseCross", indices=indices, values=values, shapes=shapes,
1087 dense_inputs=dense_inputs, hashed_output=hashed_output,
1088 num_buckets=num_buckets, hash_key=hash_key,
1089 out_type=out_type, internal_type=internal_type,
1090 name=name)
1091 _result = _outputs[:]
1092 if _execute.must_record_gradient():
1093 _attrs = ("N", _op._get_attr_int("N"), "hashed_output",
1094 _op._get_attr_bool("hashed_output"), "num_buckets",
1095 _op._get_attr_int("num_buckets"), "hash_key",
1096 _op._get_attr_int("hash_key"), "sparse_types",
1097 _op.get_attr("sparse_types"), "dense_types",
1098 _op.get_attr("dense_types"), "out_type",
1099 _op._get_attr_type("out_type"), "internal_type",
1100 _op._get_attr_type("internal_type"))
1101 _inputs_flat = _op.inputs
1102 _execute.record_gradient(
1103 "SparseCross", _inputs_flat, _attrs, _result)
1104 _result = _SparseCrossOutput._make(_result)
1105 return _result
1107SparseCross = tf_export("raw_ops.SparseCross")(_ops.to_raw_op(sparse_cross))
1110def sparse_cross_eager_fallback(indices, values, shapes, dense_inputs, hashed_output, num_buckets, hash_key, out_type, internal_type, name, ctx):
1111 if not isinstance(indices, (list, tuple)):
1112 raise TypeError(
1113 "Expected list for 'indices' argument to "
1114 "'sparse_cross' Op, not %r." % indices)
1115 _attr_N = len(indices)
1116 if not isinstance(shapes, (list, tuple)):
1117 raise TypeError(
1118 "Expected list for 'shapes' argument to "
1119 "'sparse_cross' Op, not %r." % shapes)
1120 if len(shapes) != _attr_N:
1121 raise ValueError(
1122 "List argument 'shapes' to 'sparse_cross' Op with length %d "
1123 "must match length %d of argument 'indices'." %
1124 (len(shapes), _attr_N))
1125 hashed_output = _execute.make_bool(hashed_output, "hashed_output")
1126 num_buckets = _execute.make_int(num_buckets, "num_buckets")
1127 hash_key = _execute.make_int(hash_key, "hash_key")
1128 out_type = _execute.make_type(out_type, "out_type")
1129 internal_type = _execute.make_type(internal_type, "internal_type")
1130 _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, ctx)
1131 _attr_dense_types, dense_inputs = _execute.convert_to_mixed_eager_tensors(dense_inputs, ctx)
1132 indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
1133 shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
1134 _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense_inputs)
1135 _attrs = ("N", _attr_N, "hashed_output", hashed_output, "num_buckets",
1136 num_buckets, "hash_key", hash_key, "sparse_types", _attr_sparse_types,
1137 "dense_types", _attr_dense_types, "out_type", out_type, "internal_type",
1138 internal_type)
1139 _result = _execute.execute(b"SparseCross", 3, inputs=_inputs_flat,
1140 attrs=_attrs, ctx=ctx, name=name)
1141 if _execute.must_record_gradient():
1142 _execute.record_gradient(
1143 "SparseCross", _inputs_flat, _attrs, _result)
1144 _result = _SparseCrossOutput._make(_result)
1145 return _result
1147_SparseCrossHashedOutput = collections.namedtuple(
1148 "SparseCrossHashed",
1149 ["output_indices", "output_values", "output_shape"])
1152def sparse_cross_hashed(indices, values, shapes, dense_inputs, num_buckets, strong_hash, salt, name=None):
1153 r"""Generates sparse cross from a list of sparse and dense tensors.
1155 The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
1156 representing features of one feature column. It outputs a 2D `SparseTensor` with
1157 the batchwise crosses of these features.
1159 For example, if the inputs are
1161 inputs[0]: SparseTensor with shape = [2, 2]
1162 [0, 0]: "a"
1163 [1, 0]: "b"
1164 [1, 1]: "c"
1166 inputs[1]: SparseTensor with shape = [2, 1]
1167 [0, 0]: "d"
1168 [1, 0]: "e"
1170 inputs[2]: Tensor [["f"], ["g"]]
1172 then the output will be
1174 shape = [2, 2]
1175 [0, 0]: "a_X_d_X_f"
1176 [1, 0]: "b_X_e_X_g"
1177 [1, 1]: "c_X_e_X_g"
1179 if hashed_output=true then the output will be
1181 shape = [2, 2]
1182 [0, 0]: FingerprintCat64(
1183 Fingerprint64("f"), FingerprintCat64(
1184 Fingerprint64("d"), Fingerprint64("a")))
1185 [1, 0]: FingerprintCat64(
1186 Fingerprint64("g"), FingerprintCat64(
1187 Fingerprint64("e"), Fingerprint64("b")))
1188 [1, 1]: FingerprintCat64(
1189 Fingerprint64("g"), FingerprintCat64(
1190 Fingerprint64("e"), Fingerprint64("c")))
1192 Args:
1193 indices: A list of `Tensor` objects with type `int64`.
1194 2-D. Indices of each input `SparseTensor`.
1195 values: A list of `Tensor` objects with types from: `int64`, `string`.
1196 1-D. values of each `SparseTensor`.
1197 shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
1198 1-D. Shapes of each `SparseTensor`.
1199 dense_inputs: A list of `Tensor` objects with types from: `int64`, `string`.
1200 2-D. Columns represented by dense `Tensor`.
1201 num_buckets: A `Tensor` of type `int64`.
1202 It is used if hashed_output is true.
1203 output = hashed_value%num_buckets if num_buckets > 0 else hashed_value.
1204 strong_hash: A `Tensor` of type `bool`.
1205 boolean, if true, siphash with salt will be used instead of farmhash.
1206 salt: A `Tensor` of type `int64`.
1207 Specify the salt that will be used by the siphash function.
1208 name: A name for the operation (optional).
1210 Returns:
1211 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
1213 output_indices: A `Tensor` of type `int64`.
1214 output_values: A `Tensor` of type `int64`.
1215 output_shape: A `Tensor` of type `int64`.
1216 """
1217 _ctx = _context._context or _context.context()
1218 tld = _ctx._thread_local_data
1219 if tld.is_eager:
1220 try:
1221 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1222 _ctx, "SparseCrossHashed", name, indices, values, shapes,
1223 dense_inputs, num_buckets, strong_hash, salt)
1224 _result = _SparseCrossHashedOutput._make(_result)
1225 return _result
1226 except _core._NotOkStatusException as e:
1227 _ops.raise_from_not_ok_status(e, name)
1228 except _core._FallbackException:
1229 pass
1230 try:
1231 return sparse_cross_hashed_eager_fallback(
1232 indices, values, shapes, dense_inputs, num_buckets, strong_hash,
1233 salt, name=name, ctx=_ctx)
1234 except _core._SymbolicException:
1235 pass # Add nodes to the TensorFlow graph.
1236 # Add nodes to the TensorFlow graph.
1237 if not isinstance(indices, (list, tuple)):
1238 raise TypeError(
1239 "Expected list for 'indices' argument to "
1240 "'sparse_cross_hashed' Op, not %r." % indices)
1241 _attr_N = len(indices)
1242 if not isinstance(shapes, (list, tuple)):
1243 raise TypeError(
1244 "Expected list for 'shapes' argument to "
1245 "'sparse_cross_hashed' Op, not %r." % shapes)
1246 if len(shapes) != _attr_N:
1247 raise ValueError(
1248 "List argument 'shapes' to 'sparse_cross_hashed' Op with length %d "
1249 "must match length %d of argument 'indices'." %
1250 (len(shapes), _attr_N))
1251 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1252 "SparseCrossHashed", indices=indices, values=values, shapes=shapes,
1253 dense_inputs=dense_inputs,
1254 num_buckets=num_buckets, strong_hash=strong_hash,
1255 salt=salt, name=name)
1256 _result = _outputs[:]
1257 if _execute.must_record_gradient():
1258 _attrs = ("N", _op._get_attr_int("N"), "sparse_types",
1259 _op.get_attr("sparse_types"), "dense_types",
1260 _op.get_attr("dense_types"))
1261 _inputs_flat = _op.inputs
1262 _execute.record_gradient(
1263 "SparseCrossHashed", _inputs_flat, _attrs, _result)
1264 _result = _SparseCrossHashedOutput._make(_result)
1265 return _result
1267SparseCrossHashed = tf_export("raw_ops.SparseCrossHashed")(_ops.to_raw_op(sparse_cross_hashed))
1270def sparse_cross_hashed_eager_fallback(indices, values, shapes, dense_inputs, num_buckets, strong_hash, salt, name, ctx):
1271 if not isinstance(indices, (list, tuple)):
1272 raise TypeError(
1273 "Expected list for 'indices' argument to "
1274 "'sparse_cross_hashed' Op, not %r." % indices)
1275 _attr_N = len(indices)
1276 if not isinstance(shapes, (list, tuple)):
1277 raise TypeError(
1278 "Expected list for 'shapes' argument to "
1279 "'sparse_cross_hashed' Op, not %r." % shapes)
1280 if len(shapes) != _attr_N:
1281 raise ValueError(
1282 "List argument 'shapes' to 'sparse_cross_hashed' Op with length %d "
1283 "must match length %d of argument 'indices'." %
1284 (len(shapes), _attr_N))
1285 _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, ctx)
1286 _attr_dense_types, dense_inputs = _execute.convert_to_mixed_eager_tensors(dense_inputs, ctx)
1287 indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
1288 shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
1289 num_buckets = _ops.convert_to_tensor(num_buckets, _dtypes.int64)
1290 strong_hash = _ops.convert_to_tensor(strong_hash, _dtypes.bool)
1291 salt = _ops.convert_to_tensor(salt, _dtypes.int64)
1292 _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense_inputs) + [num_buckets, strong_hash, salt]
1293 _attrs = ("N", _attr_N, "sparse_types", _attr_sparse_types, "dense_types",
1294 _attr_dense_types)
1295 _result = _execute.execute(b"SparseCrossHashed", 3, inputs=_inputs_flat,
1296 attrs=_attrs, ctx=ctx, name=name)
1297 if _execute.must_record_gradient():
1298 _execute.record_gradient(
1299 "SparseCrossHashed", _inputs_flat, _attrs, _result)
1300 _result = _SparseCrossHashedOutput._make(_result)
1301 return _result
1303_SparseCrossV2Output = collections.namedtuple(
1304 "SparseCrossV2",
1305 ["output_indices", "output_values", "output_shape"])
1308def sparse_cross_v2(indices, values, shapes, dense_inputs, sep, name=None):
1309 r"""Generates sparse cross from a list of sparse and dense tensors.
1311 The op takes two lists, one of 2D `SparseTensor` and one of 2D `Tensor`, each
1312 representing features of one feature column. It outputs a 2D `SparseTensor` with
1313 the batchwise crosses of these features.
1315 For example, if the inputs are
1317 inputs[0]: SparseTensor with shape = [2, 2]
1318 [0, 0]: "a"
1319 [1, 0]: "b"
1320 [1, 1]: "c"
1322 inputs[1]: SparseTensor with shape = [2, 1]
1323 [0, 0]: "d"
1324 [1, 0]: "e"
1326 inputs[2]: Tensor [["f"], ["g"]]
1328 then the output will be
1330 shape = [2, 2]
1331 [0, 0]: "a_X_d_X_f"
1332 [1, 0]: "b_X_e_X_g"
1333 [1, 1]: "c_X_e_X_g"
1335 if hashed_output=true then the output will be
1337 shape = [2, 2]
1338 [0, 0]: FingerprintCat64(
1339 Fingerprint64("f"), FingerprintCat64(
1340 Fingerprint64("d"), Fingerprint64("a")))
1341 [1, 0]: FingerprintCat64(
1342 Fingerprint64("g"), FingerprintCat64(
1343 Fingerprint64("e"), Fingerprint64("b")))
1344 [1, 1]: FingerprintCat64(
1345 Fingerprint64("g"), FingerprintCat64(
1346 Fingerprint64("e"), Fingerprint64("c")))
1348 Args:
1349 indices: A list of `Tensor` objects with type `int64`.
1350 2-D. Indices of each input `SparseTensor`.
1351 values: A list of `Tensor` objects with types from: `int64`, `string`.
1352 1-D. values of each `SparseTensor`.
1353 shapes: A list with the same length as `indices` of `Tensor` objects with type `int64`.
1354 1-D. Shapes of each `SparseTensor`.
1355 dense_inputs: A list of `Tensor` objects with types from: `int64`, `string`.
1356 2-D. Columns represented by dense `Tensor`.
1357 sep: A `Tensor` of type `string`.
1358 string used when joining a list of string inputs, can be used as separator later.
1359 name: A name for the operation (optional).
1361 Returns:
1362 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
1364 output_indices: A `Tensor` of type `int64`.
1365 output_values: A `Tensor` of type `string`.
1366 output_shape: A `Tensor` of type `int64`.
1367 """
1368 _ctx = _context._context or _context.context()
1369 tld = _ctx._thread_local_data
1370 if tld.is_eager:
1371 try:
1372 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1373 _ctx, "SparseCrossV2", name, indices, values, shapes, dense_inputs,
1374 sep)
1375 _result = _SparseCrossV2Output._make(_result)
1376 return _result
1377 except _core._NotOkStatusException as e:
1378 _ops.raise_from_not_ok_status(e, name)
1379 except _core._FallbackException:
1380 pass
1381 try:
1382 return sparse_cross_v2_eager_fallback(
1383 indices, values, shapes, dense_inputs, sep, name=name, ctx=_ctx)
1384 except _core._SymbolicException:
1385 pass # Add nodes to the TensorFlow graph.
1386 # Add nodes to the TensorFlow graph.
1387 if not isinstance(indices, (list, tuple)):
1388 raise TypeError(
1389 "Expected list for 'indices' argument to "
1390 "'sparse_cross_v2' Op, not %r." % indices)
1391 _attr_N = len(indices)
1392 if not isinstance(shapes, (list, tuple)):
1393 raise TypeError(
1394 "Expected list for 'shapes' argument to "
1395 "'sparse_cross_v2' Op, not %r." % shapes)
1396 if len(shapes) != _attr_N:
1397 raise ValueError(
1398 "List argument 'shapes' to 'sparse_cross_v2' Op with length %d "
1399 "must match length %d of argument 'indices'." %
1400 (len(shapes), _attr_N))
1401 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1402 "SparseCrossV2", indices=indices, values=values, shapes=shapes,
1403 dense_inputs=dense_inputs, sep=sep, name=name)
1404 _result = _outputs[:]
1405 if _execute.must_record_gradient():
1406 _attrs = ("N", _op._get_attr_int("N"), "sparse_types",
1407 _op.get_attr("sparse_types"), "dense_types",
1408 _op.get_attr("dense_types"))
1409 _inputs_flat = _op.inputs
1410 _execute.record_gradient(
1411 "SparseCrossV2", _inputs_flat, _attrs, _result)
1412 _result = _SparseCrossV2Output._make(_result)
1413 return _result
1415SparseCrossV2 = tf_export("raw_ops.SparseCrossV2")(_ops.to_raw_op(sparse_cross_v2))
1418def sparse_cross_v2_eager_fallback(indices, values, shapes, dense_inputs, sep, name, ctx):
1419 if not isinstance(indices, (list, tuple)):
1420 raise TypeError(
1421 "Expected list for 'indices' argument to "
1422 "'sparse_cross_v2' Op, not %r." % indices)
1423 _attr_N = len(indices)
1424 if not isinstance(shapes, (list, tuple)):
1425 raise TypeError(
1426 "Expected list for 'shapes' argument to "
1427 "'sparse_cross_v2' Op, not %r." % shapes)
1428 if len(shapes) != _attr_N:
1429 raise ValueError(
1430 "List argument 'shapes' to 'sparse_cross_v2' Op with length %d "
1431 "must match length %d of argument 'indices'." %
1432 (len(shapes), _attr_N))
1433 _attr_sparse_types, values = _execute.convert_to_mixed_eager_tensors(values, ctx)
1434 _attr_dense_types, dense_inputs = _execute.convert_to_mixed_eager_tensors(dense_inputs, ctx)
1435 indices = _ops.convert_n_to_tensor(indices, _dtypes.int64)
1436 shapes = _ops.convert_n_to_tensor(shapes, _dtypes.int64)
1437 sep = _ops.convert_to_tensor(sep, _dtypes.string)
1438 _inputs_flat = list(indices) + list(values) + list(shapes) + list(dense_inputs) + [sep]
1439 _attrs = ("N", _attr_N, "sparse_types", _attr_sparse_types, "dense_types",
1440 _attr_dense_types)
1441 _result = _execute.execute(b"SparseCrossV2", 3, inputs=_inputs_flat,
1442 attrs=_attrs, ctx=ctx, name=name)
1443 if _execute.must_record_gradient():
1444 _execute.record_gradient(
1445 "SparseCrossV2", _inputs_flat, _attrs, _result)
1446 _result = _SparseCrossV2Output._make(_result)
1447 return _result
1450def sparse_dense_cwise_add(sp_indices, sp_values, sp_shape, dense, name=None):
1451 r"""Adds up a SparseTensor and a dense Tensor, using these special rules:
1453 (1) Broadcasts the dense side to have the same shape as the sparse side, if
1454 eligible;
1455 (2) Then, only the dense values pointed to by the indices of the SparseTensor
1456 participate in the cwise addition.
1458 By these rules, the result is a logical SparseTensor with exactly the same
1459 indices and shape, but possibly with different non-zero values. The output of
1460 this Op is the resultant non-zero values.
1462 Args:
1463 sp_indices: A `Tensor` of type `int64`.
1464 2-D. `N x R` matrix with the indices of non-empty values in a
1465 SparseTensor, possibly not in canonical ordering.
1466 sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
1467 1-D. `N` non-empty values corresponding to `sp_indices`.
1468 sp_shape: A `Tensor` of type `int64`.
1469 1-D. Shape of the input SparseTensor.
1470 dense: A `Tensor`. Must have the same type as `sp_values`.
1471 `R`-D. The dense Tensor operand.
1472 name: A name for the operation (optional).
1474 Returns:
1475 A `Tensor`. Has the same type as `sp_values`.
1476 """
1477 _ctx = _context._context or _context.context()
1478 tld = _ctx._thread_local_data
1479 if tld.is_eager:
1480 try:
1481 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1482 _ctx, "SparseDenseCwiseAdd", name, sp_indices, sp_values, sp_shape,
1483 dense)
1484 return _result
1485 except _core._NotOkStatusException as e:
1486 _ops.raise_from_not_ok_status(e, name)
1487 except _core._FallbackException:
1488 pass
1489 try:
1490 return sparse_dense_cwise_add_eager_fallback(
1491 sp_indices, sp_values, sp_shape, dense, name=name, ctx=_ctx)
1492 except _core._SymbolicException:
1493 pass # Add nodes to the TensorFlow graph.
1494 # Add nodes to the TensorFlow graph.
1495 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1496 "SparseDenseCwiseAdd", sp_indices=sp_indices, sp_values=sp_values,
1497 sp_shape=sp_shape, dense=dense, name=name)
1498 _result = _outputs[:]
1499 if _execute.must_record_gradient():
1500 _attrs = ("T", _op._get_attr_type("T"))
1501 _inputs_flat = _op.inputs
1502 _execute.record_gradient(
1503 "SparseDenseCwiseAdd", _inputs_flat, _attrs, _result)
1504 _result, = _result
1505 return _result
1507SparseDenseCwiseAdd = tf_export("raw_ops.SparseDenseCwiseAdd")(_ops.to_raw_op(sparse_dense_cwise_add))
1510def sparse_dense_cwise_add_eager_fallback(sp_indices, sp_values, sp_shape, dense, name, ctx):
1511 _attr_T, _inputs_T = _execute.args_to_matching_eager([sp_values, dense], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
1512 (sp_values, dense) = _inputs_T
1513 sp_indices = _ops.convert_to_tensor(sp_indices, _dtypes.int64)
1514 sp_shape = _ops.convert_to_tensor(sp_shape, _dtypes.int64)
1515 _inputs_flat = [sp_indices, sp_values, sp_shape, dense]
1516 _attrs = ("T", _attr_T)
1517 _result = _execute.execute(b"SparseDenseCwiseAdd", 1, inputs=_inputs_flat,
1518 attrs=_attrs, ctx=ctx, name=name)
1519 if _execute.must_record_gradient():
1520 _execute.record_gradient(
1521 "SparseDenseCwiseAdd", _inputs_flat, _attrs, _result)
1522 _result, = _result
1523 return _result
1526def sparse_dense_cwise_div(sp_indices, sp_values, sp_shape, dense, name=None):
1527 r"""Component-wise divides a SparseTensor by a dense Tensor.
1529 *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
1530 the other direction.
1532 Args:
1533 sp_indices: A `Tensor` of type `int64`.
1534 2-D. `N x R` matrix with the indices of non-empty values in a
1535 SparseTensor, possibly not in canonical ordering.
1536 sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
1537 1-D. `N` non-empty values corresponding to `sp_indices`.
1538 sp_shape: A `Tensor` of type `int64`.
1539 1-D. Shape of the input SparseTensor.
1540 dense: A `Tensor`. Must have the same type as `sp_values`.
1541 `R`-D. The dense Tensor operand.
1542 name: A name for the operation (optional).
1544 Returns:
1545 A `Tensor`. Has the same type as `sp_values`.
1546 """
1547 _ctx = _context._context or _context.context()
1548 tld = _ctx._thread_local_data
1549 if tld.is_eager:
1550 try:
1551 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1552 _ctx, "SparseDenseCwiseDiv", name, sp_indices, sp_values, sp_shape,
1553 dense)
1554 return _result
1555 except _core._NotOkStatusException as e:
1556 _ops.raise_from_not_ok_status(e, name)
1557 except _core._FallbackException:
1558 pass
1559 try:
1560 return sparse_dense_cwise_div_eager_fallback(
1561 sp_indices, sp_values, sp_shape, dense, name=name, ctx=_ctx)
1562 except _core._SymbolicException:
1563 pass # Add nodes to the TensorFlow graph.
1564 # Add nodes to the TensorFlow graph.
1565 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1566 "SparseDenseCwiseDiv", sp_indices=sp_indices, sp_values=sp_values,
1567 sp_shape=sp_shape, dense=dense, name=name)
1568 _result = _outputs[:]
1569 if _execute.must_record_gradient():
1570 _attrs = ("T", _op._get_attr_type("T"))
1571 _inputs_flat = _op.inputs
1572 _execute.record_gradient(
1573 "SparseDenseCwiseDiv", _inputs_flat, _attrs, _result)
1574 _result, = _result
1575 return _result
1577SparseDenseCwiseDiv = tf_export("raw_ops.SparseDenseCwiseDiv")(_ops.to_raw_op(sparse_dense_cwise_div))
1580def sparse_dense_cwise_div_eager_fallback(sp_indices, sp_values, sp_shape, dense, name, ctx):
1581 _attr_T, _inputs_T = _execute.args_to_matching_eager([sp_values, dense], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
1582 (sp_values, dense) = _inputs_T
1583 sp_indices = _ops.convert_to_tensor(sp_indices, _dtypes.int64)
1584 sp_shape = _ops.convert_to_tensor(sp_shape, _dtypes.int64)
1585 _inputs_flat = [sp_indices, sp_values, sp_shape, dense]
1586 _attrs = ("T", _attr_T)
1587 _result = _execute.execute(b"SparseDenseCwiseDiv", 1, inputs=_inputs_flat,
1588 attrs=_attrs, ctx=ctx, name=name)
1589 if _execute.must_record_gradient():
1590 _execute.record_gradient(
1591 "SparseDenseCwiseDiv", _inputs_flat, _attrs, _result)
1592 _result, = _result
1593 return _result
1596def sparse_dense_cwise_mul(sp_indices, sp_values, sp_shape, dense, name=None):
1597 r"""Component-wise multiplies a SparseTensor by a dense Tensor.
1599 The output locations corresponding to the implicitly zero elements in the sparse
1600 tensor will be zero (i.e., will not take up storage space), regardless of the
1601 contents of the dense tensor (even if it's +/-INF and that INF*0 == NaN).
1603 *Limitation*: this Op only broadcasts the dense side to the sparse side, but not
1604 the other direction.
1606 Args:
1607 sp_indices: A `Tensor` of type `int64`.
1608 2-D. `N x R` matrix with the indices of non-empty values in a
1609 SparseTensor, possibly not in canonical ordering.
1610 sp_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
1611 1-D. `N` non-empty values corresponding to `sp_indices`.
1612 sp_shape: A `Tensor` of type `int64`.
1613 1-D. Shape of the input SparseTensor.
1614 dense: A `Tensor`. Must have the same type as `sp_values`.
1615 `R`-D. The dense Tensor operand.
1616 name: A name for the operation (optional).
1618 Returns:
1619 A `Tensor`. Has the same type as `sp_values`.
1620 """
1621 _ctx = _context._context or _context.context()
1622 tld = _ctx._thread_local_data
1623 if tld.is_eager:
1624 try:
1625 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1626 _ctx, "SparseDenseCwiseMul", name, sp_indices, sp_values, sp_shape,
1627 dense)
1628 return _result
1629 except _core._NotOkStatusException as e:
1630 _ops.raise_from_not_ok_status(e, name)
1631 except _core._FallbackException:
1632 pass
1633 try:
1634 return sparse_dense_cwise_mul_eager_fallback(
1635 sp_indices, sp_values, sp_shape, dense, name=name, ctx=_ctx)
1636 except _core._SymbolicException:
1637 pass # Add nodes to the TensorFlow graph.
1638 # Add nodes to the TensorFlow graph.
1639 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1640 "SparseDenseCwiseMul", sp_indices=sp_indices, sp_values=sp_values,
1641 sp_shape=sp_shape, dense=dense, name=name)
1642 _result = _outputs[:]
1643 if _execute.must_record_gradient():
1644 _attrs = ("T", _op._get_attr_type("T"))
1645 _inputs_flat = _op.inputs
1646 _execute.record_gradient(
1647 "SparseDenseCwiseMul", _inputs_flat, _attrs, _result)
1648 _result, = _result
1649 return _result
1651SparseDenseCwiseMul = tf_export("raw_ops.SparseDenseCwiseMul")(_ops.to_raw_op(sparse_dense_cwise_mul))
1654def sparse_dense_cwise_mul_eager_fallback(sp_indices, sp_values, sp_shape, dense, name, ctx):
1655 _attr_T, _inputs_T = _execute.args_to_matching_eager([sp_values, dense], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
1656 (sp_values, dense) = _inputs_T
1657 sp_indices = _ops.convert_to_tensor(sp_indices, _dtypes.int64)
1658 sp_shape = _ops.convert_to_tensor(sp_shape, _dtypes.int64)
1659 _inputs_flat = [sp_indices, sp_values, sp_shape, dense]
1660 _attrs = ("T", _attr_T)
1661 _result = _execute.execute(b"SparseDenseCwiseMul", 1, inputs=_inputs_flat,
1662 attrs=_attrs, ctx=ctx, name=name)
1663 if _execute.must_record_gradient():
1664 _execute.record_gradient(
1665 "SparseDenseCwiseMul", _inputs_flat, _attrs, _result)
1666 _result, = _result
1667 return _result
1669_SparseFillEmptyRowsOutput = collections.namedtuple(
1670 "SparseFillEmptyRows",
1671 ["output_indices", "output_values", "empty_row_indicator", "reverse_index_map"])
1674def sparse_fill_empty_rows(indices, values, dense_shape, default_value, name=None):
1675 r"""Fills empty rows in the input 2-D `SparseTensor` with a default value.
1677 The input `SparseTensor` is represented via the tuple of inputs
1678 (`indices`, `values`, `dense_shape`). The output `SparseTensor` has the
1679 same `dense_shape` but with indices `output_indices` and values
1680 `output_values`.
1682 This op inserts a single entry for every row that doesn't have any values.
1683 The index is created as `[row, 0, ..., 0]` and the inserted value
1684 is `default_value`.
1686 For example, suppose `sp_input` has shape `[5, 6]` and non-empty values:
1688 [0, 1]: a
1689 [0, 3]: b
1690 [2, 0]: c
1691 [3, 1]: d
1693 Rows 1 and 4 are empty, so the output will be of shape `[5, 6]` with values:
1695 [0, 1]: a
1696 [0, 3]: b
1697 [1, 0]: default_value
1698 [2, 0]: c
1699 [3, 1]: d
1700 [4, 0]: default_value
1702 The output `SparseTensor` will be in row-major order and will have the
1703 same shape as the input.
1705 This op also returns an indicator vector shaped `[dense_shape[0]]` such that
1707 empty_row_indicator[i] = True iff row i was an empty row.
1709 And a reverse index map vector shaped `[indices.shape[0]]` that is used during
1710 backpropagation,
1712 reverse_index_map[j] = out_j s.t. indices[j, :] == output_indices[out_j, :]
1714 Args:
1715 indices: A `Tensor` of type `int64`.
1716 2-D. the indices of the sparse tensor.
1717 values: A `Tensor`. 1-D. the values of the sparse tensor.
1718 dense_shape: A `Tensor` of type `int64`.
1719 1-D. the shape of the sparse tensor.
1720 default_value: A `Tensor`. Must have the same type as `values`.
1721 0-D. default value to insert into location `[row, 0, ..., 0]`
1722 for rows missing from the input sparse tensor.
1723 output indices: 2-D. the indices of the filled sparse tensor.
1724 name: A name for the operation (optional).
1726 Returns:
1727 A tuple of `Tensor` objects (output_indices, output_values, empty_row_indicator, reverse_index_map).
1729 output_indices: A `Tensor` of type `int64`.
1730 output_values: A `Tensor`. Has the same type as `values`.
1731 empty_row_indicator: A `Tensor` of type `bool`.
1732 reverse_index_map: A `Tensor` of type `int64`.
1733 """
1734 _ctx = _context._context or _context.context()
1735 tld = _ctx._thread_local_data
1736 if tld.is_eager:
1737 try:
1738 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1739 _ctx, "SparseFillEmptyRows", name, indices, values, dense_shape,
1740 default_value)
1741 _result = _SparseFillEmptyRowsOutput._make(_result)
1742 return _result
1743 except _core._NotOkStatusException as e:
1744 _ops.raise_from_not_ok_status(e, name)
1745 except _core._FallbackException:
1746 pass
1747 try:
1748 return sparse_fill_empty_rows_eager_fallback(
1749 indices, values, dense_shape, default_value, name=name, ctx=_ctx)
1750 except _core._SymbolicException:
1751 pass # Add nodes to the TensorFlow graph.
1752 # Add nodes to the TensorFlow graph.
1753 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1754 "SparseFillEmptyRows", indices=indices, values=values,
1755 dense_shape=dense_shape,
1756 default_value=default_value, name=name)
1757 _result = _outputs[:]
1758 if _execute.must_record_gradient():
1759 _attrs = ("T", _op._get_attr_type("T"))
1760 _inputs_flat = _op.inputs
1761 _execute.record_gradient(
1762 "SparseFillEmptyRows", _inputs_flat, _attrs, _result)
1763 _result = _SparseFillEmptyRowsOutput._make(_result)
1764 return _result
1766SparseFillEmptyRows = tf_export("raw_ops.SparseFillEmptyRows")(_ops.to_raw_op(sparse_fill_empty_rows))
1769def sparse_fill_empty_rows_eager_fallback(indices, values, dense_shape, default_value, name, ctx):
1770 _attr_T, _inputs_T = _execute.args_to_matching_eager([values, default_value], ctx, [])
1771 (values, default_value) = _inputs_T
1772 indices = _ops.convert_to_tensor(indices, _dtypes.int64)
1773 dense_shape = _ops.convert_to_tensor(dense_shape, _dtypes.int64)
1774 _inputs_flat = [indices, values, dense_shape, default_value]
1775 _attrs = ("T", _attr_T)
1776 _result = _execute.execute(b"SparseFillEmptyRows", 4, inputs=_inputs_flat,
1777 attrs=_attrs, ctx=ctx, name=name)
1778 if _execute.must_record_gradient():
1779 _execute.record_gradient(
1780 "SparseFillEmptyRows", _inputs_flat, _attrs, _result)
1781 _result = _SparseFillEmptyRowsOutput._make(_result)
1782 return _result
1784_SparseFillEmptyRowsGradOutput = collections.namedtuple(
1785 "SparseFillEmptyRowsGrad",
1786 ["d_values", "d_default_value"])
1789def sparse_fill_empty_rows_grad(reverse_index_map, grad_values, name=None):
1790 r"""The gradient of SparseFillEmptyRows.
1792 Takes vectors reverse_index_map, shaped `[N]`, and grad_values,
1793 shaped `[N_full]`, where `N_full >= N` and copies data into either
1794 `d_values` or `d_default_value`. Here `d_values` is shaped `[N]` and
1795 `d_default_value` is a scalar.
1797 d_values[j] = grad_values[reverse_index_map[j]]
1798 d_default_value = sum_{k : 0 .. N_full - 1} (
1799 grad_values[k] * 1{k not in reverse_index_map})
1801 Args:
1802 reverse_index_map: A `Tensor` of type `int64`.
1803 1-D. The reverse index map from SparseFillEmptyRows.
1804 grad_values: A `Tensor`. 1-D. The gradients from backprop.
1805 name: A name for the operation (optional).
1807 Returns:
1808 A tuple of `Tensor` objects (d_values, d_default_value).
1810 d_values: A `Tensor`. Has the same type as `grad_values`.
1811 d_default_value: A `Tensor`. Has the same type as `grad_values`.
1812 """
1813 _ctx = _context._context or _context.context()
1814 tld = _ctx._thread_local_data
1815 if tld.is_eager:
1816 try:
1817 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1818 _ctx, "SparseFillEmptyRowsGrad", name, reverse_index_map, grad_values)
1819 _result = _SparseFillEmptyRowsGradOutput._make(_result)
1820 return _result
1821 except _core._NotOkStatusException as e:
1822 _ops.raise_from_not_ok_status(e, name)
1823 except _core._FallbackException:
1824 pass
1825 try:
1826 return sparse_fill_empty_rows_grad_eager_fallback(
1827 reverse_index_map, grad_values, name=name, ctx=_ctx)
1828 except _core._SymbolicException:
1829 pass # Add nodes to the TensorFlow graph.
1830 # Add nodes to the TensorFlow graph.
1831 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1832 "SparseFillEmptyRowsGrad", reverse_index_map=reverse_index_map,
1833 grad_values=grad_values, name=name)
1834 _result = _outputs[:]
1835 if _execute.must_record_gradient():
1836 _attrs = ("T", _op._get_attr_type("T"))
1837 _inputs_flat = _op.inputs
1838 _execute.record_gradient(
1839 "SparseFillEmptyRowsGrad", _inputs_flat, _attrs, _result)
1840 _result = _SparseFillEmptyRowsGradOutput._make(_result)
1841 return _result
1843SparseFillEmptyRowsGrad = tf_export("raw_ops.SparseFillEmptyRowsGrad")(_ops.to_raw_op(sparse_fill_empty_rows_grad))
1846def sparse_fill_empty_rows_grad_eager_fallback(reverse_index_map, grad_values, name, ctx):
1847 _attr_T, (grad_values,) = _execute.args_to_matching_eager([grad_values], ctx, [])
1848 reverse_index_map = _ops.convert_to_tensor(reverse_index_map, _dtypes.int64)
1849 _inputs_flat = [reverse_index_map, grad_values]
1850 _attrs = ("T", _attr_T)
1851 _result = _execute.execute(b"SparseFillEmptyRowsGrad", 2,
1852 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
1853 name=name)
1854 if _execute.must_record_gradient():
1855 _execute.record_gradient(
1856 "SparseFillEmptyRowsGrad", _inputs_flat, _attrs, _result)
1857 _result = _SparseFillEmptyRowsGradOutput._make(_result)
1858 return _result
1861def sparse_reduce_max(input_indices, input_values, input_shape, reduction_axes, keep_dims=False, name=None):
1862 r"""Computes the max of elements across dimensions of a SparseTensor.
1864 This Op takes a SparseTensor and is the sparse counterpart to
1865 `tf.reduce_max()`. In particular, this Op also returns a dense `Tensor`
1866 instead of a sparse one.
1868 Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
1869 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
1870 `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
1871 with length 1.
1873 If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
1874 with a single element is returned. Additionally, the axes can be negative,
1875 which are interpreted according to the indexing rules in Python.
1877 Args:
1878 input_indices: A `Tensor` of type `int64`.
1879 2-D. `N x R` matrix with the indices of non-empty values in a
1880 SparseTensor, possibly not in canonical ordering.
1881 input_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1882 1-D. `N` non-empty values corresponding to `input_indices`.
1883 input_shape: A `Tensor` of type `int64`.
1884 1-D. Shape of the input SparseTensor.
1885 reduction_axes: A `Tensor` of type `int32`.
1886 1-D. Length-`K` vector containing the reduction axes.
1887 keep_dims: An optional `bool`. Defaults to `False`.
1888 If true, retain reduced dimensions with length 1.
1889 name: A name for the operation (optional).
1891 Returns:
1892 A `Tensor`. Has the same type as `input_values`.
1893 """
1894 _ctx = _context._context or _context.context()
1895 tld = _ctx._thread_local_data
1896 if tld.is_eager:
1897 try:
1898 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1899 _ctx, "SparseReduceMax", name, input_indices, input_values,
1900 input_shape, reduction_axes, "keep_dims", keep_dims)
1901 return _result
1902 except _core._NotOkStatusException as e:
1903 _ops.raise_from_not_ok_status(e, name)
1904 except _core._FallbackException:
1905 pass
1906 try:
1907 return sparse_reduce_max_eager_fallback(
1908 input_indices, input_values, input_shape, reduction_axes,
1909 keep_dims=keep_dims, name=name, ctx=_ctx)
1910 except _core._SymbolicException:
1911 pass # Add nodes to the TensorFlow graph.
1912 # Add nodes to the TensorFlow graph.
1913 if keep_dims is None:
1914 keep_dims = False
1915 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
1916 _, _, _op, _outputs = _op_def_library._apply_op_helper(
1917 "SparseReduceMax", input_indices=input_indices,
1918 input_values=input_values, input_shape=input_shape,
1919 reduction_axes=reduction_axes, keep_dims=keep_dims,
1920 name=name)
1921 _result = _outputs[:]
1922 if _execute.must_record_gradient():
1923 _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T",
1924 _op._get_attr_type("T"))
1925 _inputs_flat = _op.inputs
1926 _execute.record_gradient(
1927 "SparseReduceMax", _inputs_flat, _attrs, _result)
1928 _result, = _result
1929 return _result
1931SparseReduceMax = tf_export("raw_ops.SparseReduceMax")(_ops.to_raw_op(sparse_reduce_max))
1934def sparse_reduce_max_eager_fallback(input_indices, input_values, input_shape, reduction_axes, keep_dims, name, ctx):
1935 if keep_dims is None:
1936 keep_dims = False
1937 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
1938 _attr_T, (input_values,) = _execute.args_to_matching_eager([input_values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
1939 input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
1940 input_shape = _ops.convert_to_tensor(input_shape, _dtypes.int64)
1941 reduction_axes = _ops.convert_to_tensor(reduction_axes, _dtypes.int32)
1942 _inputs_flat = [input_indices, input_values, input_shape, reduction_axes]
1943 _attrs = ("keep_dims", keep_dims, "T", _attr_T)
1944 _result = _execute.execute(b"SparseReduceMax", 1, inputs=_inputs_flat,
1945 attrs=_attrs, ctx=ctx, name=name)
1946 if _execute.must_record_gradient():
1947 _execute.record_gradient(
1948 "SparseReduceMax", _inputs_flat, _attrs, _result)
1949 _result, = _result
1950 return _result
1952_SparseReduceMaxSparseOutput = collections.namedtuple(
1953 "SparseReduceMaxSparse",
1954 ["output_indices", "output_values", "output_shape"])
1957def sparse_reduce_max_sparse(input_indices, input_values, input_shape, reduction_axes, keep_dims=False, name=None):
1958 r"""Computes the max of elements across dimensions of a SparseTensor.
1960 This Op takes a SparseTensor and is the sparse counterpart to
1961 `tf.reduce_max()`. In contrast to SparseReduceMax, this Op returns a
1962 SparseTensor.
1964 Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
1965 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
1966 `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
1967 with length 1.
1969 If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
1970 with a single element is returned. Additionally, the axes can be negative,
1971 which are interpreted according to the indexing rules in Python.
1973 Args:
1974 input_indices: A `Tensor` of type `int64`.
1975 2-D. `N x R` matrix with the indices of non-empty values in a
1976 SparseTensor, possibly not in canonical ordering.
1977 input_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
1978 1-D. `N` non-empty values corresponding to `input_indices`.
1979 input_shape: A `Tensor` of type `int64`.
1980 1-D. Shape of the input SparseTensor.
1981 reduction_axes: A `Tensor` of type `int32`.
1982 1-D. Length-`K` vector containing the reduction axes.
1983 keep_dims: An optional `bool`. Defaults to `False`.
1984 If true, retain reduced dimensions with length 1.
1985 name: A name for the operation (optional).
1987 Returns:
1988 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
1990 output_indices: A `Tensor` of type `int64`.
1991 output_values: A `Tensor`. Has the same type as `input_values`.
1992 output_shape: A `Tensor` of type `int64`.
1993 """
1994 _ctx = _context._context or _context.context()
1995 tld = _ctx._thread_local_data
1996 if tld.is_eager:
1997 try:
1998 _result = pywrap_tfe.TFE_Py_FastPathExecute(
1999 _ctx, "SparseReduceMaxSparse", name, input_indices, input_values,
2000 input_shape, reduction_axes, "keep_dims", keep_dims)
2001 _result = _SparseReduceMaxSparseOutput._make(_result)
2002 return _result
2003 except _core._NotOkStatusException as e:
2004 _ops.raise_from_not_ok_status(e, name)
2005 except _core._FallbackException:
2006 pass
2007 try:
2008 return sparse_reduce_max_sparse_eager_fallback(
2009 input_indices, input_values, input_shape, reduction_axes,
2010 keep_dims=keep_dims, name=name, ctx=_ctx)
2011 except _core._SymbolicException:
2012 pass # Add nodes to the TensorFlow graph.
2013 # Add nodes to the TensorFlow graph.
2014 if keep_dims is None:
2015 keep_dims = False
2016 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
2017 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2018 "SparseReduceMaxSparse", input_indices=input_indices,
2019 input_values=input_values,
2020 input_shape=input_shape,
2021 reduction_axes=reduction_axes,
2022 keep_dims=keep_dims, name=name)
2023 _result = _outputs[:]
2024 if _execute.must_record_gradient():
2025 _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T",
2026 _op._get_attr_type("T"))
2027 _inputs_flat = _op.inputs
2028 _execute.record_gradient(
2029 "SparseReduceMaxSparse", _inputs_flat, _attrs, _result)
2030 _result = _SparseReduceMaxSparseOutput._make(_result)
2031 return _result
2033SparseReduceMaxSparse = tf_export("raw_ops.SparseReduceMaxSparse")(_ops.to_raw_op(sparse_reduce_max_sparse))
2036def sparse_reduce_max_sparse_eager_fallback(input_indices, input_values, input_shape, reduction_axes, keep_dims, name, ctx):
2037 if keep_dims is None:
2038 keep_dims = False
2039 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
2040 _attr_T, (input_values,) = _execute.args_to_matching_eager([input_values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
2041 input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
2042 input_shape = _ops.convert_to_tensor(input_shape, _dtypes.int64)
2043 reduction_axes = _ops.convert_to_tensor(reduction_axes, _dtypes.int32)
2044 _inputs_flat = [input_indices, input_values, input_shape, reduction_axes]
2045 _attrs = ("keep_dims", keep_dims, "T", _attr_T)
2046 _result = _execute.execute(b"SparseReduceMaxSparse", 3, inputs=_inputs_flat,
2047 attrs=_attrs, ctx=ctx, name=name)
2048 if _execute.must_record_gradient():
2049 _execute.record_gradient(
2050 "SparseReduceMaxSparse", _inputs_flat, _attrs, _result)
2051 _result = _SparseReduceMaxSparseOutput._make(_result)
2052 return _result
2055def sparse_reduce_sum(input_indices, input_values, input_shape, reduction_axes, keep_dims=False, name=None):
2056 r"""Computes the sum of elements across dimensions of a SparseTensor.
2058 This Op takes a SparseTensor and is the sparse counterpart to
2059 `tf.reduce_sum()`. In particular, this Op also returns a dense `Tensor`
2060 instead of a sparse one.
2062 Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
2063 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
2064 `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
2065 with length 1.
2067 If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
2068 with a single element is returned. Additionally, the axes can be negative,
2069 which are interpreted according to the indexing rules in Python.
2071 Args:
2072 input_indices: A `Tensor` of type `int64`.
2073 2-D. `N x R` matrix with the indices of non-empty values in a
2074 SparseTensor, possibly not in canonical ordering.
2075 input_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
2076 1-D. `N` non-empty values corresponding to `input_indices`.
2077 input_shape: A `Tensor` of type `int64`.
2078 1-D. Shape of the input SparseTensor.
2079 reduction_axes: A `Tensor` of type `int32`.
2080 1-D. Length-`K` vector containing the reduction axes.
2081 keep_dims: An optional `bool`. Defaults to `False`.
2082 If true, retain reduced dimensions with length 1.
2083 name: A name for the operation (optional).
2085 Returns:
2086 A `Tensor`. Has the same type as `input_values`.
2087 """
2088 _ctx = _context._context or _context.context()
2089 tld = _ctx._thread_local_data
2090 if tld.is_eager:
2091 try:
2092 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2093 _ctx, "SparseReduceSum", name, input_indices, input_values,
2094 input_shape, reduction_axes, "keep_dims", keep_dims)
2095 return _result
2096 except _core._NotOkStatusException as e:
2097 _ops.raise_from_not_ok_status(e, name)
2098 except _core._FallbackException:
2099 pass
2100 try:
2101 return sparse_reduce_sum_eager_fallback(
2102 input_indices, input_values, input_shape, reduction_axes,
2103 keep_dims=keep_dims, name=name, ctx=_ctx)
2104 except _core._SymbolicException:
2105 pass # Add nodes to the TensorFlow graph.
2106 # Add nodes to the TensorFlow graph.
2107 if keep_dims is None:
2108 keep_dims = False
2109 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
2110 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2111 "SparseReduceSum", input_indices=input_indices,
2112 input_values=input_values, input_shape=input_shape,
2113 reduction_axes=reduction_axes, keep_dims=keep_dims,
2114 name=name)
2115 _result = _outputs[:]
2116 if _execute.must_record_gradient():
2117 _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T",
2118 _op._get_attr_type("T"))
2119 _inputs_flat = _op.inputs
2120 _execute.record_gradient(
2121 "SparseReduceSum", _inputs_flat, _attrs, _result)
2122 _result, = _result
2123 return _result
2125SparseReduceSum = tf_export("raw_ops.SparseReduceSum")(_ops.to_raw_op(sparse_reduce_sum))
2128def sparse_reduce_sum_eager_fallback(input_indices, input_values, input_shape, reduction_axes, keep_dims, name, ctx):
2129 if keep_dims is None:
2130 keep_dims = False
2131 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
2132 _attr_T, (input_values,) = _execute.args_to_matching_eager([input_values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
2133 input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
2134 input_shape = _ops.convert_to_tensor(input_shape, _dtypes.int64)
2135 reduction_axes = _ops.convert_to_tensor(reduction_axes, _dtypes.int32)
2136 _inputs_flat = [input_indices, input_values, input_shape, reduction_axes]
2137 _attrs = ("keep_dims", keep_dims, "T", _attr_T)
2138 _result = _execute.execute(b"SparseReduceSum", 1, inputs=_inputs_flat,
2139 attrs=_attrs, ctx=ctx, name=name)
2140 if _execute.must_record_gradient():
2141 _execute.record_gradient(
2142 "SparseReduceSum", _inputs_flat, _attrs, _result)
2143 _result, = _result
2144 return _result
2146_SparseReduceSumSparseOutput = collections.namedtuple(
2147 "SparseReduceSumSparse",
2148 ["output_indices", "output_values", "output_shape"])
2151def sparse_reduce_sum_sparse(input_indices, input_values, input_shape, reduction_axes, keep_dims=False, name=None):
2152 r"""Computes the sum of elements across dimensions of a SparseTensor.
2154 This Op takes a SparseTensor and is the sparse counterpart to
2155 `tf.reduce_sum()`. In contrast to SparseReduceSum, this Op returns a
2156 SparseTensor.
2158 Reduces `sp_input` along the dimensions given in `reduction_axes`. Unless
2159 `keep_dims` is true, the rank of the tensor is reduced by 1 for each entry in
2160 `reduction_axes`. If `keep_dims` is true, the reduced dimensions are retained
2161 with length 1.
2163 If `reduction_axes` has no entries, all dimensions are reduced, and a tensor
2164 with a single element is returned. Additionally, the axes can be negative,
2165 which are interpreted according to the indexing rules in Python.
2167 Args:
2168 input_indices: A `Tensor` of type `int64`.
2169 2-D. `N x R` matrix with the indices of non-empty values in a
2170 SparseTensor, possibly not in canonical ordering.
2171 input_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
2172 1-D. `N` non-empty values corresponding to `input_indices`.
2173 input_shape: A `Tensor` of type `int64`.
2174 1-D. Shape of the input SparseTensor.
2175 reduction_axes: A `Tensor` of type `int32`.
2176 1-D. Length-`K` vector containing the reduction axes.
2177 keep_dims: An optional `bool`. Defaults to `False`.
2178 If true, retain reduced dimensions with length 1.
2179 name: A name for the operation (optional).
2181 Returns:
2182 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
2184 output_indices: A `Tensor` of type `int64`.
2185 output_values: A `Tensor`. Has the same type as `input_values`.
2186 output_shape: A `Tensor` of type `int64`.
2187 """
2188 _ctx = _context._context or _context.context()
2189 tld = _ctx._thread_local_data
2190 if tld.is_eager:
2191 try:
2192 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2193 _ctx, "SparseReduceSumSparse", name, input_indices, input_values,
2194 input_shape, reduction_axes, "keep_dims", keep_dims)
2195 _result = _SparseReduceSumSparseOutput._make(_result)
2196 return _result
2197 except _core._NotOkStatusException as e:
2198 _ops.raise_from_not_ok_status(e, name)
2199 except _core._FallbackException:
2200 pass
2201 try:
2202 return sparse_reduce_sum_sparse_eager_fallback(
2203 input_indices, input_values, input_shape, reduction_axes,
2204 keep_dims=keep_dims, name=name, ctx=_ctx)
2205 except _core._SymbolicException:
2206 pass # Add nodes to the TensorFlow graph.
2207 # Add nodes to the TensorFlow graph.
2208 if keep_dims is None:
2209 keep_dims = False
2210 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
2211 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2212 "SparseReduceSumSparse", input_indices=input_indices,
2213 input_values=input_values,
2214 input_shape=input_shape,
2215 reduction_axes=reduction_axes,
2216 keep_dims=keep_dims, name=name)
2217 _result = _outputs[:]
2218 if _execute.must_record_gradient():
2219 _attrs = ("keep_dims", _op._get_attr_bool("keep_dims"), "T",
2220 _op._get_attr_type("T"))
2221 _inputs_flat = _op.inputs
2222 _execute.record_gradient(
2223 "SparseReduceSumSparse", _inputs_flat, _attrs, _result)
2224 _result = _SparseReduceSumSparseOutput._make(_result)
2225 return _result
2227SparseReduceSumSparse = tf_export("raw_ops.SparseReduceSumSparse")(_ops.to_raw_op(sparse_reduce_sum_sparse))
2230def sparse_reduce_sum_sparse_eager_fallback(input_indices, input_values, input_shape, reduction_axes, keep_dims, name, ctx):
2231 if keep_dims is None:
2232 keep_dims = False
2233 keep_dims = _execute.make_bool(keep_dims, "keep_dims")
2234 _attr_T, (input_values,) = _execute.args_to_matching_eager([input_values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
2235 input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
2236 input_shape = _ops.convert_to_tensor(input_shape, _dtypes.int64)
2237 reduction_axes = _ops.convert_to_tensor(reduction_axes, _dtypes.int32)
2238 _inputs_flat = [input_indices, input_values, input_shape, reduction_axes]
2239 _attrs = ("keep_dims", keep_dims, "T", _attr_T)
2240 _result = _execute.execute(b"SparseReduceSumSparse", 3, inputs=_inputs_flat,
2241 attrs=_attrs, ctx=ctx, name=name)
2242 if _execute.must_record_gradient():
2243 _execute.record_gradient(
2244 "SparseReduceSumSparse", _inputs_flat, _attrs, _result)
2245 _result = _SparseReduceSumSparseOutput._make(_result)
2246 return _result
2248_SparseReorderOutput = collections.namedtuple(
2249 "SparseReorder",
2250 ["output_indices", "output_values"])
2253def sparse_reorder(input_indices, input_values, input_shape, name=None):
2254 r"""Reorders a SparseTensor into the canonical, row-major ordering.
2256 Note that by convention, all sparse ops preserve the canonical ordering along
2257 increasing dimension number. The only time ordering can be violated is during
2258 manual manipulation of the indices and values vectors to add entries.
2260 Reordering does not affect the shape of the SparseTensor.
2262 If the tensor has rank `R` and `N` non-empty values, `input_indices` has
2263 shape `[N, R]`, input_values has length `N`, and input_shape has length `R`.
2265 Args:
2266 input_indices: A `Tensor` of type `int64`.
2267 2-D. `N x R` matrix with the indices of non-empty values in a
2268 SparseTensor, possibly not in canonical ordering.
2269 input_values: A `Tensor`.
2270 1-D. `N` non-empty values corresponding to `input_indices`.
2271 input_shape: A `Tensor` of type `int64`.
2272 1-D. Shape of the input SparseTensor.
2273 name: A name for the operation (optional).
2275 Returns:
2276 A tuple of `Tensor` objects (output_indices, output_values).
2278 output_indices: A `Tensor` of type `int64`.
2279 output_values: A `Tensor`. Has the same type as `input_values`.
2280 """
2281 _ctx = _context._context or _context.context()
2282 tld = _ctx._thread_local_data
2283 if tld.is_eager:
2284 try:
2285 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2286 _ctx, "SparseReorder", name, input_indices, input_values, input_shape)
2287 _result = _SparseReorderOutput._make(_result)
2288 return _result
2289 except _core._NotOkStatusException as e:
2290 _ops.raise_from_not_ok_status(e, name)
2291 except _core._FallbackException:
2292 pass
2293 try:
2294 return sparse_reorder_eager_fallback(
2295 input_indices, input_values, input_shape, name=name, ctx=_ctx)
2296 except _core._SymbolicException:
2297 pass # Add nodes to the TensorFlow graph.
2298 # Add nodes to the TensorFlow graph.
2299 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2300 "SparseReorder", input_indices=input_indices,
2301 input_values=input_values, input_shape=input_shape,
2302 name=name)
2303 _result = _outputs[:]
2304 if _execute.must_record_gradient():
2305 _attrs = ("T", _op._get_attr_type("T"))
2306 _inputs_flat = _op.inputs
2307 _execute.record_gradient(
2308 "SparseReorder", _inputs_flat, _attrs, _result)
2309 _result = _SparseReorderOutput._make(_result)
2310 return _result
2312SparseReorder = tf_export("raw_ops.SparseReorder")(_ops.to_raw_op(sparse_reorder))
2315def sparse_reorder_eager_fallback(input_indices, input_values, input_shape, name, ctx):
2316 _attr_T, (input_values,) = _execute.args_to_matching_eager([input_values], ctx, [])
2317 input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
2318 input_shape = _ops.convert_to_tensor(input_shape, _dtypes.int64)
2319 _inputs_flat = [input_indices, input_values, input_shape]
2320 _attrs = ("T", _attr_T)
2321 _result = _execute.execute(b"SparseReorder", 2, inputs=_inputs_flat,
2322 attrs=_attrs, ctx=ctx, name=name)
2323 if _execute.must_record_gradient():
2324 _execute.record_gradient(
2325 "SparseReorder", _inputs_flat, _attrs, _result)
2326 _result = _SparseReorderOutput._make(_result)
2327 return _result
2329_SparseReshapeOutput = collections.namedtuple(
2330 "SparseReshape",
2331 ["output_indices", "output_shape"])
2334def sparse_reshape(input_indices, input_shape, new_shape, name=None):
2335 r"""Reshapes a SparseTensor to represent values in a new dense shape.
2337 This operation has the same semantics as reshape on the represented dense
2338 tensor. The `input_indices` are recomputed based on the requested `new_shape`.
2340 If one component of `new_shape` is the special value -1, the size of that
2341 dimension is computed so that the total dense size remains constant. At
2342 most one component of `new_shape` can be -1. The number of dense elements
2343 implied by `new_shape` must be the same as the number of dense elements
2344 originally implied by `input_shape`.
2346 Reshaping does not affect the order of values in the SparseTensor.
2348 If the input tensor has rank `R_in` and `N` non-empty values, and `new_shape`
2349 has length `R_out`, then `input_indices` has shape `[N, R_in]`,
2350 `input_shape` has length `R_in`, `output_indices` has shape `[N, R_out]`, and
2351 `output_shape` has length `R_out`.
2353 Args:
2354 input_indices: A `Tensor` of type `int64`.
2355 2-D. `N x R_in` matrix with the indices of non-empty values in a
2356 SparseTensor.
2357 input_shape: A `Tensor` of type `int64`.
2358 1-D. `R_in` vector with the input SparseTensor's dense shape.
2359 new_shape: A `Tensor` of type `int64`.
2360 1-D. `R_out` vector with the requested new dense shape.
2361 name: A name for the operation (optional).
2363 Returns:
2364 A tuple of `Tensor` objects (output_indices, output_shape).
2366 output_indices: A `Tensor` of type `int64`.
2367 output_shape: A `Tensor` of type `int64`.
2368 """
2369 _ctx = _context._context or _context.context()
2370 tld = _ctx._thread_local_data
2371 if tld.is_eager:
2372 try:
2373 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2374 _ctx, "SparseReshape", name, input_indices, input_shape, new_shape)
2375 _result = _SparseReshapeOutput._make(_result)
2376 return _result
2377 except _core._NotOkStatusException as e:
2378 _ops.raise_from_not_ok_status(e, name)
2379 except _core._FallbackException:
2380 pass
2381 try:
2382 return sparse_reshape_eager_fallback(
2383 input_indices, input_shape, new_shape, name=name, ctx=_ctx)
2384 except _core._SymbolicException:
2385 pass # Add nodes to the TensorFlow graph.
2386 # Add nodes to the TensorFlow graph.
2387 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2388 "SparseReshape", input_indices=input_indices, input_shape=input_shape,
2389 new_shape=new_shape, name=name)
2390 _result = _outputs[:]
2391 if _execute.must_record_gradient():
2392 _attrs = ()
2393 _inputs_flat = _op.inputs
2394 _execute.record_gradient(
2395 "SparseReshape", _inputs_flat, _attrs, _result)
2396 _result = _SparseReshapeOutput._make(_result)
2397 return _result
2399SparseReshape = tf_export("raw_ops.SparseReshape")(_ops.to_raw_op(sparse_reshape))
2402def sparse_reshape_eager_fallback(input_indices, input_shape, new_shape, name, ctx):
2403 input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
2404 input_shape = _ops.convert_to_tensor(input_shape, _dtypes.int64)
2405 new_shape = _ops.convert_to_tensor(new_shape, _dtypes.int64)
2406 _inputs_flat = [input_indices, input_shape, new_shape]
2407 _attrs = None
2408 _result = _execute.execute(b"SparseReshape", 2, inputs=_inputs_flat,
2409 attrs=_attrs, ctx=ctx, name=name)
2410 if _execute.must_record_gradient():
2411 _execute.record_gradient(
2412 "SparseReshape", _inputs_flat, _attrs, _result)
2413 _result = _SparseReshapeOutput._make(_result)
2414 return _result
2416_SparseSliceOutput = collections.namedtuple(
2417 "SparseSlice",
2418 ["output_indices", "output_values", "output_shape"])
2421def sparse_slice(indices, values, shape, start, size, name=None):
2422 r"""Slice a `SparseTensor` based on the `start` and `size`.
2424 For example, if the input is
2426 input_tensor = shape = [2, 7]
2427 [ a d e ]
2428 [b c ]
2430 Graphically the output tensors are:
2432 sparse_slice([0, 0], [2, 4]) = shape = [2, 4]
2433 [ a ]
2434 [b c ]
2436 sparse_slice([0, 4], [2, 3]) = shape = [2, 3]
2437 [ d e ]
2438 [ ]
2440 Args:
2441 indices: A `Tensor` of type `int64`.
2442 2-D tensor represents the indices of the sparse tensor.
2443 values: A `Tensor`. 1-D tensor represents the values of the sparse tensor.
2444 shape: A `Tensor` of type `int64`.
2445 1-D. tensor represents the shape of the sparse tensor.
2446 start: A `Tensor` of type `int64`.
2447 1-D. tensor represents the start of the slice.
2448 size: A `Tensor` of type `int64`.
2449 1-D. tensor represents the size of the slice.
2450 output indices: A list of 1-D tensors represents the indices of the output
2451 sparse tensors.
2452 name: A name for the operation (optional).
2454 Returns:
2455 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
2457 output_indices: A `Tensor` of type `int64`.
2458 output_values: A `Tensor`. Has the same type as `values`.
2459 output_shape: A `Tensor` of type `int64`.
2460 """
2461 _ctx = _context._context or _context.context()
2462 tld = _ctx._thread_local_data
2463 if tld.is_eager:
2464 try:
2465 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2466 _ctx, "SparseSlice", name, indices, values, shape, start, size)
2467 _result = _SparseSliceOutput._make(_result)
2468 return _result
2469 except _core._NotOkStatusException as e:
2470 _ops.raise_from_not_ok_status(e, name)
2471 except _core._FallbackException:
2472 pass
2473 try:
2474 return sparse_slice_eager_fallback(
2475 indices, values, shape, start, size, name=name, ctx=_ctx)
2476 except _core._SymbolicException:
2477 pass # Add nodes to the TensorFlow graph.
2478 # Add nodes to the TensorFlow graph.
2479 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2480 "SparseSlice", indices=indices, values=values, shape=shape,
2481 start=start, size=size, name=name)
2482 _result = _outputs[:]
2483 if _execute.must_record_gradient():
2484 _attrs = ("T", _op._get_attr_type("T"))
2485 _inputs_flat = _op.inputs
2486 _execute.record_gradient(
2487 "SparseSlice", _inputs_flat, _attrs, _result)
2488 _result = _SparseSliceOutput._make(_result)
2489 return _result
2491SparseSlice = tf_export("raw_ops.SparseSlice")(_ops.to_raw_op(sparse_slice))
2494def sparse_slice_eager_fallback(indices, values, shape, start, size, name, ctx):
2495 _attr_T, (values,) = _execute.args_to_matching_eager([values], ctx, [])
2496 indices = _ops.convert_to_tensor(indices, _dtypes.int64)
2497 shape = _ops.convert_to_tensor(shape, _dtypes.int64)
2498 start = _ops.convert_to_tensor(start, _dtypes.int64)
2499 size = _ops.convert_to_tensor(size, _dtypes.int64)
2500 _inputs_flat = [indices, values, shape, start, size]
2501 _attrs = ("T", _attr_T)
2502 _result = _execute.execute(b"SparseSlice", 3, inputs=_inputs_flat,
2503 attrs=_attrs, ctx=ctx, name=name)
2504 if _execute.must_record_gradient():
2505 _execute.record_gradient(
2506 "SparseSlice", _inputs_flat, _attrs, _result)
2507 _result = _SparseSliceOutput._make(_result)
2508 return _result
2511def sparse_slice_grad(backprop_val_grad, input_indices, input_start, output_indices, name=None):
2512 r"""The gradient operator for the SparseSlice op.
2514 This op takes in the upstream gradient w.r.t. non-empty values of
2515 the sliced `SparseTensor`, and outputs the gradients w.r.t.
2516 the non-empty values of input `SparseTensor`.
2518 Args:
2519 backprop_val_grad: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
2520 1-D. The gradient with respect to
2521 the non-empty values of the sliced `SparseTensor`.
2522 input_indices: A `Tensor` of type `int64`.
2523 2-D. The `indices` of the input `SparseTensor`.
2524 input_start: A `Tensor` of type `int64`.
2525 1-D. tensor represents the start of the slice.
2526 output_indices: A `Tensor` of type `int64`.
2527 2-D. The `indices` of the sliced `SparseTensor`.
2528 name: A name for the operation (optional).
2530 Returns:
2531 A `Tensor`. Has the same type as `backprop_val_grad`.
2532 """
2533 _ctx = _context._context or _context.context()
2534 tld = _ctx._thread_local_data
2535 if tld.is_eager:
2536 try:
2537 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2538 _ctx, "SparseSliceGrad", name, backprop_val_grad, input_indices,
2539 input_start, output_indices)
2540 return _result
2541 except _core._NotOkStatusException as e:
2542 _ops.raise_from_not_ok_status(e, name)
2543 except _core._FallbackException:
2544 pass
2545 try:
2546 return sparse_slice_grad_eager_fallback(
2547 backprop_val_grad, input_indices, input_start, output_indices,
2548 name=name, ctx=_ctx)
2549 except _core._SymbolicException:
2550 pass # Add nodes to the TensorFlow graph.
2551 # Add nodes to the TensorFlow graph.
2552 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2553 "SparseSliceGrad", backprop_val_grad=backprop_val_grad,
2554 input_indices=input_indices,
2555 input_start=input_start,
2556 output_indices=output_indices, name=name)
2557 _result = _outputs[:]
2558 if _execute.must_record_gradient():
2559 _attrs = ("T", _op._get_attr_type("T"))
2560 _inputs_flat = _op.inputs
2561 _execute.record_gradient(
2562 "SparseSliceGrad", _inputs_flat, _attrs, _result)
2563 _result, = _result
2564 return _result
2566SparseSliceGrad = tf_export("raw_ops.SparseSliceGrad")(_ops.to_raw_op(sparse_slice_grad))
2569def sparse_slice_grad_eager_fallback(backprop_val_grad, input_indices, input_start, output_indices, name, ctx):
2570 _attr_T, (backprop_val_grad,) = _execute.args_to_matching_eager([backprop_val_grad], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
2571 input_indices = _ops.convert_to_tensor(input_indices, _dtypes.int64)
2572 input_start = _ops.convert_to_tensor(input_start, _dtypes.int64)
2573 output_indices = _ops.convert_to_tensor(output_indices, _dtypes.int64)
2574 _inputs_flat = [backprop_val_grad, input_indices, input_start, output_indices]
2575 _attrs = ("T", _attr_T)
2576 _result = _execute.execute(b"SparseSliceGrad", 1, inputs=_inputs_flat,
2577 attrs=_attrs, ctx=ctx, name=name)
2578 if _execute.must_record_gradient():
2579 _execute.record_gradient(
2580 "SparseSliceGrad", _inputs_flat, _attrs, _result)
2581 _result, = _result
2582 return _result
2585def sparse_softmax(sp_indices, sp_values, sp_shape, name=None):
2586 r"""Applies softmax to a batched N-D `SparseTensor`.
2588 The inputs represent an N-D SparseTensor with logical shape `[..., B, C]`
2589 (where `N >= 2`), and with indices sorted in the canonical lexicographic order.
2591 This op is equivalent to applying the normal `tf.nn.softmax()` to each innermost
2592 logical submatrix with shape `[B, C]`, but with the catch that *the implicitly
2593 zero elements do not participate*. Specifically, the algorithm is equivalent
2594 to the following:
2596 (1) Applies `tf.nn.softmax()` to a densified view of each innermost submatrix
2597 with shape `[B, C]`, along the size-C dimension;
2598 (2) Masks out the original implicitly-zero locations;
2599 (3) Renormalizes the remaining elements.
2601 Hence, the `SparseTensor` result has exactly the same non-zero indices and
2602 shape.
2604 Args:
2605 sp_indices: A `Tensor` of type `int64`.
2606 2-D. `NNZ x R` matrix with the indices of non-empty values in a
2607 SparseTensor, in canonical ordering.
2608 sp_values: A `Tensor`. Must be one of the following types: `half`, `float32`, `float64`.
2609 1-D. `NNZ` non-empty values corresponding to `sp_indices`.
2610 sp_shape: A `Tensor` of type `int64`.
2611 1-D. Shape of the input SparseTensor.
2612 name: A name for the operation (optional).
2614 Returns:
2615 A `Tensor`. Has the same type as `sp_values`.
2616 """
2617 _ctx = _context._context or _context.context()
2618 tld = _ctx._thread_local_data
2619 if tld.is_eager:
2620 try:
2621 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2622 _ctx, "SparseSoftmax", name, sp_indices, sp_values, sp_shape)
2623 return _result
2624 except _core._NotOkStatusException as e:
2625 _ops.raise_from_not_ok_status(e, name)
2626 except _core._FallbackException:
2627 pass
2628 try:
2629 return sparse_softmax_eager_fallback(
2630 sp_indices, sp_values, sp_shape, name=name, ctx=_ctx)
2631 except _core._SymbolicException:
2632 pass # Add nodes to the TensorFlow graph.
2633 # Add nodes to the TensorFlow graph.
2634 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2635 "SparseSoftmax", sp_indices=sp_indices, sp_values=sp_values,
2636 sp_shape=sp_shape, name=name)
2637 _result = _outputs[:]
2638 if _execute.must_record_gradient():
2639 _attrs = ("T", _op._get_attr_type("T"))
2640 _inputs_flat = _op.inputs
2641 _execute.record_gradient(
2642 "SparseSoftmax", _inputs_flat, _attrs, _result)
2643 _result, = _result
2644 return _result
2646SparseSoftmax = tf_export("raw_ops.SparseSoftmax")(_ops.to_raw_op(sparse_softmax))
2649def sparse_softmax_eager_fallback(sp_indices, sp_values, sp_shape, name, ctx):
2650 _attr_T, (sp_values,) = _execute.args_to_matching_eager([sp_values], ctx, [_dtypes.half, _dtypes.float32, _dtypes.float64, ])
2651 sp_indices = _ops.convert_to_tensor(sp_indices, _dtypes.int64)
2652 sp_shape = _ops.convert_to_tensor(sp_shape, _dtypes.int64)
2653 _inputs_flat = [sp_indices, sp_values, sp_shape]
2654 _attrs = ("T", _attr_T)
2655 _result = _execute.execute(b"SparseSoftmax", 1, inputs=_inputs_flat,
2656 attrs=_attrs, ctx=ctx, name=name)
2657 if _execute.must_record_gradient():
2658 _execute.record_gradient(
2659 "SparseSoftmax", _inputs_flat, _attrs, _result)
2660 _result, = _result
2661 return _result
2663_SparseSparseMaximumOutput = collections.namedtuple(
2664 "SparseSparseMaximum",
2665 ["output_indices", "output_values"])
2668def sparse_sparse_maximum(a_indices, a_values, a_shape, b_indices, b_values, b_shape, name=None):
2669 r"""Returns the element-wise max of two SparseTensors.
2671 Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
2673 Args:
2674 a_indices: A `Tensor` of type `int64`.
2675 2-D. `N x R` matrix with the indices of non-empty values in a
2676 SparseTensor, in the canonical lexicographic ordering.
2677 a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `int64`, `bfloat16`, `uint16`, `half`, `uint32`, `uint64`.
2678 1-D. `N` non-empty values corresponding to `a_indices`.
2679 a_shape: A `Tensor` of type `int64`.
2680 1-D. Shape of the input SparseTensor.
2681 b_indices: A `Tensor` of type `int64`.
2682 counterpart to `a_indices` for the other operand.
2683 b_values: A `Tensor`. Must have the same type as `a_values`.
2684 counterpart to `a_values` for the other operand; must be of the same dtype.
2685 b_shape: A `Tensor` of type `int64`.
2686 counterpart to `a_shape` for the other operand; the two shapes must be equal.
2687 name: A name for the operation (optional).
2689 Returns:
2690 A tuple of `Tensor` objects (output_indices, output_values).
2692 output_indices: A `Tensor` of type `int64`.
2693 output_values: A `Tensor`. Has the same type as `a_values`.
2694 """
2695 _ctx = _context._context or _context.context()
2696 tld = _ctx._thread_local_data
2697 if tld.is_eager:
2698 try:
2699 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2700 _ctx, "SparseSparseMaximum", name, a_indices, a_values, a_shape,
2701 b_indices, b_values, b_shape)
2702 _result = _SparseSparseMaximumOutput._make(_result)
2703 return _result
2704 except _core._NotOkStatusException as e:
2705 _ops.raise_from_not_ok_status(e, name)
2706 except _core._FallbackException:
2707 pass
2708 try:
2709 return sparse_sparse_maximum_eager_fallback(
2710 a_indices, a_values, a_shape, b_indices, b_values, b_shape,
2711 name=name, ctx=_ctx)
2712 except _core._SymbolicException:
2713 pass # Add nodes to the TensorFlow graph.
2714 # Add nodes to the TensorFlow graph.
2715 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2716 "SparseSparseMaximum", a_indices=a_indices, a_values=a_values,
2717 a_shape=a_shape, b_indices=b_indices,
2718 b_values=b_values, b_shape=b_shape, name=name)
2719 _result = _outputs[:]
2720 if _execute.must_record_gradient():
2721 _attrs = ("T", _op._get_attr_type("T"))
2722 _inputs_flat = _op.inputs
2723 _execute.record_gradient(
2724 "SparseSparseMaximum", _inputs_flat, _attrs, _result)
2725 _result = _SparseSparseMaximumOutput._make(_result)
2726 return _result
2728SparseSparseMaximum = tf_export("raw_ops.SparseSparseMaximum")(_ops.to_raw_op(sparse_sparse_maximum))
2731def sparse_sparse_maximum_eager_fallback(a_indices, a_values, a_shape, b_indices, b_values, b_shape, name, ctx):
2732 _attr_T, _inputs_T = _execute.args_to_matching_eager([a_values, b_values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.int64, _dtypes.bfloat16, _dtypes.uint16, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
2733 (a_values, b_values) = _inputs_T
2734 a_indices = _ops.convert_to_tensor(a_indices, _dtypes.int64)
2735 a_shape = _ops.convert_to_tensor(a_shape, _dtypes.int64)
2736 b_indices = _ops.convert_to_tensor(b_indices, _dtypes.int64)
2737 b_shape = _ops.convert_to_tensor(b_shape, _dtypes.int64)
2738 _inputs_flat = [a_indices, a_values, a_shape, b_indices, b_values, b_shape]
2739 _attrs = ("T", _attr_T)
2740 _result = _execute.execute(b"SparseSparseMaximum", 2, inputs=_inputs_flat,
2741 attrs=_attrs, ctx=ctx, name=name)
2742 if _execute.must_record_gradient():
2743 _execute.record_gradient(
2744 "SparseSparseMaximum", _inputs_flat, _attrs, _result)
2745 _result = _SparseSparseMaximumOutput._make(_result)
2746 return _result
2748_SparseSparseMinimumOutput = collections.namedtuple(
2749 "SparseSparseMinimum",
2750 ["output_indices", "output_values"])
2753def sparse_sparse_minimum(a_indices, a_values, a_shape, b_indices, b_values, b_shape, name=None):
2754 r"""Returns the element-wise min of two SparseTensors.
2756 Assumes the two SparseTensors have the same shape, i.e., no broadcasting.
2758 Args:
2759 a_indices: A `Tensor` of type `int64`.
2760 2-D. `N x R` matrix with the indices of non-empty values in a
2761 SparseTensor, in the canonical lexicographic ordering.
2762 a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
2763 1-D. `N` non-empty values corresponding to `a_indices`.
2764 a_shape: A `Tensor` of type `int64`.
2765 1-D. Shape of the input SparseTensor.
2766 b_indices: A `Tensor` of type `int64`.
2767 counterpart to `a_indices` for the other operand.
2768 b_values: A `Tensor`. Must have the same type as `a_values`.
2769 counterpart to `a_values` for the other operand; must be of the same dtype.
2770 b_shape: A `Tensor` of type `int64`.
2771 counterpart to `a_shape` for the other operand; the two shapes must be equal.
2772 name: A name for the operation (optional).
2774 Returns:
2775 A tuple of `Tensor` objects (output_indices, output_values).
2777 output_indices: A `Tensor` of type `int64`.
2778 output_values: A `Tensor`. Has the same type as `a_values`.
2779 """
2780 _ctx = _context._context or _context.context()
2781 tld = _ctx._thread_local_data
2782 if tld.is_eager:
2783 try:
2784 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2785 _ctx, "SparseSparseMinimum", name, a_indices, a_values, a_shape,
2786 b_indices, b_values, b_shape)
2787 _result = _SparseSparseMinimumOutput._make(_result)
2788 return _result
2789 except _core._NotOkStatusException as e:
2790 _ops.raise_from_not_ok_status(e, name)
2791 except _core._FallbackException:
2792 pass
2793 try:
2794 return sparse_sparse_minimum_eager_fallback(
2795 a_indices, a_values, a_shape, b_indices, b_values, b_shape,
2796 name=name, ctx=_ctx)
2797 except _core._SymbolicException:
2798 pass # Add nodes to the TensorFlow graph.
2799 # Add nodes to the TensorFlow graph.
2800 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2801 "SparseSparseMinimum", a_indices=a_indices, a_values=a_values,
2802 a_shape=a_shape, b_indices=b_indices,
2803 b_values=b_values, b_shape=b_shape, name=name)
2804 _result = _outputs[:]
2805 if _execute.must_record_gradient():
2806 _attrs = ("T", _op._get_attr_type("T"))
2807 _inputs_flat = _op.inputs
2808 _execute.record_gradient(
2809 "SparseSparseMinimum", _inputs_flat, _attrs, _result)
2810 _result = _SparseSparseMinimumOutput._make(_result)
2811 return _result
2813SparseSparseMinimum = tf_export("raw_ops.SparseSparseMinimum")(_ops.to_raw_op(sparse_sparse_minimum))
2816def sparse_sparse_minimum_eager_fallback(a_indices, a_values, a_shape, b_indices, b_values, b_shape, name, ctx):
2817 _attr_T, _inputs_T = _execute.args_to_matching_eager([a_values, b_values], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
2818 (a_values, b_values) = _inputs_T
2819 a_indices = _ops.convert_to_tensor(a_indices, _dtypes.int64)
2820 a_shape = _ops.convert_to_tensor(a_shape, _dtypes.int64)
2821 b_indices = _ops.convert_to_tensor(b_indices, _dtypes.int64)
2822 b_shape = _ops.convert_to_tensor(b_shape, _dtypes.int64)
2823 _inputs_flat = [a_indices, a_values, a_shape, b_indices, b_values, b_shape]
2824 _attrs = ("T", _attr_T)
2825 _result = _execute.execute(b"SparseSparseMinimum", 2, inputs=_inputs_flat,
2826 attrs=_attrs, ctx=ctx, name=name)
2827 if _execute.must_record_gradient():
2828 _execute.record_gradient(
2829 "SparseSparseMinimum", _inputs_flat, _attrs, _result)
2830 _result = _SparseSparseMinimumOutput._make(_result)
2831 return _result
2833_SparseSplitOutput = collections.namedtuple(
2834 "SparseSplit",
2835 ["output_indices", "output_values", "output_shape"])
2838def sparse_split(split_dim, indices, values, shape, num_split, name=None):
2839 r"""Split a `SparseTensor` into `num_split` tensors along one dimension.
2841 If the `shape[split_dim]` is not an integer multiple of `num_split`. Slices
2842 `[0 : shape[split_dim] % num_split]` gets one extra dimension.
2843 For example, if `split_dim = 1` and `num_split = 2` and the input is
2845 input_tensor = shape = [2, 7]
2846 [ a d e ]
2847 [b c ]
2849 Graphically the output tensors are:
2851 output_tensor[0] = shape = [2, 4]
2852 [ a ]
2853 [b c ]
2855 output_tensor[1] = shape = [2, 3]
2856 [ d e ]
2857 [ ]
2859 Args:
2860 split_dim: A `Tensor` of type `int64`.
2861 0-D. The dimension along which to split. Must be in the range
2862 `[0, rank(shape))`.
2863 indices: A `Tensor` of type `int64`.
2864 2-D tensor represents the indices of the sparse tensor.
2865 values: A `Tensor`. 1-D tensor represents the values of the sparse tensor.
2866 shape: A `Tensor` of type `int64`.
2867 1-D. tensor represents the shape of the sparse tensor.
2868 output indices: A list of 1-D tensors represents the indices of the output
2869 sparse tensors.
2870 num_split: An `int` that is `>= 1`. The number of ways to split.
2871 name: A name for the operation (optional).
2873 Returns:
2874 A tuple of `Tensor` objects (output_indices, output_values, output_shape).
2876 output_indices: A list of `num_split` `Tensor` objects with type `int64`.
2877 output_values: A list of `num_split` `Tensor` objects with the same type as `values`.
2878 output_shape: A list of `num_split` `Tensor` objects with type `int64`.
2879 """
2880 _ctx = _context._context or _context.context()
2881 tld = _ctx._thread_local_data
2882 if tld.is_eager:
2883 try:
2884 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2885 _ctx, "SparseSplit", name, split_dim, indices, values, shape,
2886 "num_split", num_split)
2887 _result = _SparseSplitOutput._make(_result)
2888 return _result
2889 except _core._NotOkStatusException as e:
2890 _ops.raise_from_not_ok_status(e, name)
2891 except _core._FallbackException:
2892 pass
2893 try:
2894 return sparse_split_eager_fallback(
2895 split_dim, indices, values, shape, num_split=num_split, name=name,
2896 ctx=_ctx)
2897 except _core._SymbolicException:
2898 pass # Add nodes to the TensorFlow graph.
2899 # Add nodes to the TensorFlow graph.
2900 num_split = _execute.make_int(num_split, "num_split")
2901 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2902 "SparseSplit", split_dim=split_dim, indices=indices, values=values,
2903 shape=shape, num_split=num_split, name=name)
2904 _result = _outputs[:]
2905 if _execute.must_record_gradient():
2906 _attrs = ("num_split", _op._get_attr_int("num_split"), "T",
2907 _op._get_attr_type("T"))
2908 _inputs_flat = _op.inputs
2909 _execute.record_gradient(
2910 "SparseSplit", _inputs_flat, _attrs, _result)
2911 _result = [_result[:num_split]] + _result[num_split:]
2912 _result = _result[:1] + [_result[1:1 + num_split]] + _result[1 + num_split:]
2913 _result = _result[:2] + [_result[2:]]
2914 _result = _SparseSplitOutput._make(_result)
2915 return _result
2917SparseSplit = tf_export("raw_ops.SparseSplit")(_ops.to_raw_op(sparse_split))
2920def sparse_split_eager_fallback(split_dim, indices, values, shape, num_split, name, ctx):
2921 num_split = _execute.make_int(num_split, "num_split")
2922 _attr_T, (values,) = _execute.args_to_matching_eager([values], ctx, [])
2923 split_dim = _ops.convert_to_tensor(split_dim, _dtypes.int64)
2924 indices = _ops.convert_to_tensor(indices, _dtypes.int64)
2925 shape = _ops.convert_to_tensor(shape, _dtypes.int64)
2926 _inputs_flat = [split_dim, indices, values, shape]
2927 _attrs = ("num_split", num_split, "T", _attr_T)
2928 _result = _execute.execute(b"SparseSplit", num_split + num_split +
2929 num_split, inputs=_inputs_flat, attrs=_attrs,
2930 ctx=ctx, name=name)
2931 if _execute.must_record_gradient():
2932 _execute.record_gradient(
2933 "SparseSplit", _inputs_flat, _attrs, _result)
2934 _result = [_result[:num_split]] + _result[num_split:]
2935 _result = _result[:1] + [_result[1:1 + num_split]] + _result[1 + num_split:]
2936 _result = _result[:2] + [_result[2:]]
2937 _result = _SparseSplitOutput._make(_result)
2938 return _result
2941def sparse_tensor_dense_add(a_indices, a_values, a_shape, b, name=None):
2942 r"""Adds up a `SparseTensor` and a dense `Tensor`, producing a dense `Tensor`.
2944 This Op does not require `a_indices` be sorted in standard lexicographic order.
2946 Args:
2947 a_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
2948 2-D. The `indices` of the `SparseTensor`, with shape `[nnz, ndims]`.
2949 a_values: A `Tensor`. Must be one of the following types: `float32`, `float64`, `int32`, `uint8`, `int16`, `int8`, `complex64`, `int64`, `qint8`, `quint8`, `qint32`, `bfloat16`, `qint16`, `quint16`, `uint16`, `complex128`, `half`, `uint32`, `uint64`.
2950 1-D. The `values` of the `SparseTensor`, with shape `[nnz]`.
2951 a_shape: A `Tensor`. Must have the same type as `a_indices`.
2952 1-D. The `shape` of the `SparseTensor`, with shape `[ndims]`.
2953 b: A `Tensor`. Must have the same type as `a_values`.
2954 `ndims`-D Tensor. With shape `a_shape`.
2955 name: A name for the operation (optional).
2957 Returns:
2958 A `Tensor`. Has the same type as `a_values`.
2959 """
2960 _ctx = _context._context or _context.context()
2961 tld = _ctx._thread_local_data
2962 if tld.is_eager:
2963 try:
2964 _result = pywrap_tfe.TFE_Py_FastPathExecute(
2965 _ctx, "SparseTensorDenseAdd", name, a_indices, a_values, a_shape, b)
2966 return _result
2967 except _core._NotOkStatusException as e:
2968 _ops.raise_from_not_ok_status(e, name)
2969 except _core._FallbackException:
2970 pass
2971 try:
2972 return sparse_tensor_dense_add_eager_fallback(
2973 a_indices, a_values, a_shape, b, name=name, ctx=_ctx)
2974 except _core._SymbolicException:
2975 pass # Add nodes to the TensorFlow graph.
2976 # Add nodes to the TensorFlow graph.
2977 _, _, _op, _outputs = _op_def_library._apply_op_helper(
2978 "SparseTensorDenseAdd", a_indices=a_indices, a_values=a_values,
2979 a_shape=a_shape, b=b, name=name)
2980 _result = _outputs[:]
2981 if _execute.must_record_gradient():
2982 _attrs = ("T", _op._get_attr_type("T"), "Tindices",
2983 _op._get_attr_type("Tindices"))
2984 _inputs_flat = _op.inputs
2985 _execute.record_gradient(
2986 "SparseTensorDenseAdd", _inputs_flat, _attrs, _result)
2987 _result, = _result
2988 return _result
2990SparseTensorDenseAdd = tf_export("raw_ops.SparseTensorDenseAdd")(_ops.to_raw_op(sparse_tensor_dense_add))
2993def sparse_tensor_dense_add_eager_fallback(a_indices, a_values, a_shape, b, name, ctx):
2994 _attr_T, _inputs_T = _execute.args_to_matching_eager([a_values, b], ctx, [_dtypes.float32, _dtypes.float64, _dtypes.int32, _dtypes.uint8, _dtypes.int16, _dtypes.int8, _dtypes.complex64, _dtypes.int64, _dtypes.qint8, _dtypes.quint8, _dtypes.qint32, _dtypes.bfloat16, _dtypes.qint16, _dtypes.quint16, _dtypes.uint16, _dtypes.complex128, _dtypes.half, _dtypes.uint32, _dtypes.uint64, ])
2995 (a_values, b) = _inputs_T
2996 _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([a_indices, a_shape], ctx, [_dtypes.int32, _dtypes.int64, ])
2997 (a_indices, a_shape) = _inputs_Tindices
2998 _inputs_flat = [a_indices, a_values, a_shape, b]
2999 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices)
3000 _result = _execute.execute(b"SparseTensorDenseAdd", 1, inputs=_inputs_flat,
3001 attrs=_attrs, ctx=ctx, name=name)
3002 if _execute.must_record_gradient():
3003 _execute.record_gradient(
3004 "SparseTensorDenseAdd", _inputs_flat, _attrs, _result)
3005 _result, = _result
3006 return _result
3009def sparse_tensor_dense_mat_mul(a_indices, a_values, a_shape, b, adjoint_a=False, adjoint_b=False, name=None):
3010 r"""Multiply SparseTensor (of rank 2) "A" by dense matrix "B".
3012 No validity checking is performed on the indices of A. However, the following
3013 input format is recommended for optimal behavior:
3015 if adjoint_a == false:
3016 A should be sorted in lexicographically increasing order. Use SparseReorder
3017 if you're not sure.
3018 if adjoint_a == true:
3019 A should be sorted in order of increasing dimension 1 (i.e., "column major"
3020 order instead of "row major" order).
3022 Args:
3023 a_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
3024 2-D. The `indices` of the `SparseTensor`, size `[nnz, 2]` Matrix.
3025 a_values: A `Tensor`.
3026 1-D. The `values` of the `SparseTensor`, size `[nnz]` Vector.
3027 a_shape: A `Tensor` of type `int64`.
3028 1-D. The `shape` of the `SparseTensor`, size `[2]` Vector.
3029 b: A `Tensor`. Must have the same type as `a_values`.
3030 2-D. A dense Matrix.
3031 adjoint_a: An optional `bool`. Defaults to `False`.
3032 Use the adjoint of A in the matrix multiply. If A is complex, this
3033 is transpose(conj(A)). Otherwise it's transpose(A).
3034 adjoint_b: An optional `bool`. Defaults to `False`.
3035 Use the adjoint of B in the matrix multiply. If B is complex, this
3036 is transpose(conj(B)). Otherwise it's transpose(B).
3037 name: A name for the operation (optional).
3039 Returns:
3040 A `Tensor`. Has the same type as `a_values`.
3041 """
3042 _ctx = _context._context or _context.context()
3043 tld = _ctx._thread_local_data
3044 if tld.is_eager:
3045 try:
3046 _result = pywrap_tfe.TFE_Py_FastPathExecute(
3047 _ctx, "SparseTensorDenseMatMul", name, a_indices, a_values, a_shape,
3048 b, "adjoint_a", adjoint_a, "adjoint_b", adjoint_b)
3049 return _result
3050 except _core._NotOkStatusException as e:
3051 _ops.raise_from_not_ok_status(e, name)
3052 except _core._FallbackException:
3053 pass
3054 try:
3055 return sparse_tensor_dense_mat_mul_eager_fallback(
3056 a_indices, a_values, a_shape, b, adjoint_a=adjoint_a,
3057 adjoint_b=adjoint_b, name=name, ctx=_ctx)
3058 except _core._SymbolicException:
3059 pass # Add nodes to the TensorFlow graph.
3060 # Add nodes to the TensorFlow graph.
3061 if adjoint_a is None:
3062 adjoint_a = False
3063 adjoint_a = _execute.make_bool(adjoint_a, "adjoint_a")
3064 if adjoint_b is None:
3065 adjoint_b = False
3066 adjoint_b = _execute.make_bool(adjoint_b, "adjoint_b")
3067 _, _, _op, _outputs = _op_def_library._apply_op_helper(
3068 "SparseTensorDenseMatMul", a_indices=a_indices, a_values=a_values,
3069 a_shape=a_shape, b=b, adjoint_a=adjoint_a,
3070 adjoint_b=adjoint_b, name=name)
3071 _result = _outputs[:]
3072 if _execute.must_record_gradient():
3073 _attrs = ("T", _op._get_attr_type("T"), "Tindices",
3074 _op._get_attr_type("Tindices"), "adjoint_a",
3075 _op._get_attr_bool("adjoint_a"), "adjoint_b",
3076 _op._get_attr_bool("adjoint_b"))
3077 _inputs_flat = _op.inputs
3078 _execute.record_gradient(
3079 "SparseTensorDenseMatMul", _inputs_flat, _attrs, _result)
3080 _result, = _result
3081 return _result
3083SparseTensorDenseMatMul = tf_export("raw_ops.SparseTensorDenseMatMul")(_ops.to_raw_op(sparse_tensor_dense_mat_mul))
3086def sparse_tensor_dense_mat_mul_eager_fallback(a_indices, a_values, a_shape, b, adjoint_a, adjoint_b, name, ctx):
3087 if adjoint_a is None:
3088 adjoint_a = False
3089 adjoint_a = _execute.make_bool(adjoint_a, "adjoint_a")
3090 if adjoint_b is None:
3091 adjoint_b = False
3092 adjoint_b = _execute.make_bool(adjoint_b, "adjoint_b")
3093 _attr_T, _inputs_T = _execute.args_to_matching_eager([a_values, b], ctx, [])
3094 (a_values, b) = _inputs_T
3095 _attr_Tindices, (a_indices,) = _execute.args_to_matching_eager([a_indices], ctx, [_dtypes.int32, _dtypes.int64, ], _dtypes.int64)
3096 a_shape = _ops.convert_to_tensor(a_shape, _dtypes.int64)
3097 _inputs_flat = [a_indices, a_values, a_shape, b]
3098 _attrs = ("T", _attr_T, "Tindices", _attr_Tindices, "adjoint_a", adjoint_a,
3099 "adjoint_b", adjoint_b)
3100 _result = _execute.execute(b"SparseTensorDenseMatMul", 1,
3101 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
3102 name=name)
3103 if _execute.must_record_gradient():
3104 _execute.record_gradient(
3105 "SparseTensorDenseMatMul", _inputs_flat, _attrs, _result)
3106 _result, = _result
3107 return _result
3110def sparse_to_dense(sparse_indices, output_shape, sparse_values, default_value, validate_indices=True, name=None):
3111 r"""Converts a sparse representation into a dense tensor.
3113 Builds an array `dense` with shape `output_shape` such that
3115 ```
3116 # If sparse_indices is scalar
3117 dense[i] = (i == sparse_indices ? sparse_values : default_value)
3119 # If sparse_indices is a vector, then for each i
3120 dense[sparse_indices[i]] = sparse_values[i]
3122 # If sparse_indices is an n by d matrix, then for each i in [0, n)
3123 dense[sparse_indices[i][0], ..., sparse_indices[i][d-1]] = sparse_values[i]
3124 ```
3126 All other values in `dense` are set to `default_value`. If `sparse_values` is a
3127 scalar, all sparse indices are set to this single value.
3129 Indices should be sorted in lexicographic order, and indices must not
3130 contain any repeats. If `validate_indices` is true, these properties
3131 are checked during execution.
3133 Args:
3134 sparse_indices: A `Tensor`. Must be one of the following types: `int32`, `int64`.
3135 0-D, 1-D, or 2-D. `sparse_indices[i]` contains the complete
3136 index where `sparse_values[i]` will be placed.
3137 output_shape: A `Tensor`. Must have the same type as `sparse_indices`.
3138 1-D. Shape of the dense output tensor.
3139 sparse_values: A `Tensor`.
3140 1-D. Values corresponding to each row of `sparse_indices`,
3141 or a scalar value to be used for all sparse indices.
3142 default_value: A `Tensor`. Must have the same type as `sparse_values`.
3143 Scalar value to set for indices not specified in
3144 `sparse_indices`.
3145 validate_indices: An optional `bool`. Defaults to `True`.
3146 If true, indices are checked to make sure they are sorted in
3147 lexicographic order and that there are no repeats.
3148 name: A name for the operation (optional).
3150 Returns:
3151 A `Tensor`. Has the same type as `sparse_values`.
3152 """
3153 _ctx = _context._context or _context.context()
3154 tld = _ctx._thread_local_data
3155 if tld.is_eager:
3156 try:
3157 _result = pywrap_tfe.TFE_Py_FastPathExecute(
3158 _ctx, "SparseToDense", name, sparse_indices, output_shape,
3159 sparse_values, default_value, "validate_indices", validate_indices)
3160 return _result
3161 except _core._NotOkStatusException as e:
3162 _ops.raise_from_not_ok_status(e, name)
3163 except _core._FallbackException:
3164 pass
3165 try:
3166 return sparse_to_dense_eager_fallback(
3167 sparse_indices, output_shape, sparse_values, default_value,
3168 validate_indices=validate_indices, name=name, ctx=_ctx)
3169 except _core._SymbolicException:
3170 pass # Add nodes to the TensorFlow graph.
3171 # Add nodes to the TensorFlow graph.
3172 if validate_indices is None:
3173 validate_indices = True
3174 validate_indices = _execute.make_bool(validate_indices, "validate_indices")
3175 _, _, _op, _outputs = _op_def_library._apply_op_helper(
3176 "SparseToDense", sparse_indices=sparse_indices,
3177 output_shape=output_shape,
3178 sparse_values=sparse_values,
3179 default_value=default_value,
3180 validate_indices=validate_indices, name=name)
3181 _result = _outputs[:]
3182 if _execute.must_record_gradient():
3183 _attrs = ("validate_indices", _op._get_attr_bool("validate_indices"), "T",
3184 _op._get_attr_type("T"), "Tindices",
3185 _op._get_attr_type("Tindices"))
3186 _inputs_flat = _op.inputs
3187 _execute.record_gradient(
3188 "SparseToDense", _inputs_flat, _attrs, _result)
3189 _result, = _result
3190 return _result
3192SparseToDense = tf_export("raw_ops.SparseToDense")(_ops.to_raw_op(sparse_to_dense))
3195def sparse_to_dense_eager_fallback(sparse_indices, output_shape, sparse_values, default_value, validate_indices, name, ctx):
3196 if validate_indices is None:
3197 validate_indices = True
3198 validate_indices = _execute.make_bool(validate_indices, "validate_indices")
3199 _attr_T, _inputs_T = _execute.args_to_matching_eager([sparse_values, default_value], ctx, [])
3200 (sparse_values, default_value) = _inputs_T
3201 _attr_Tindices, _inputs_Tindices = _execute.args_to_matching_eager([sparse_indices, output_shape], ctx, [_dtypes.int32, _dtypes.int64, ])
3202 (sparse_indices, output_shape) = _inputs_Tindices
3203 _inputs_flat = [sparse_indices, output_shape, sparse_values, default_value]
3204 _attrs = ("validate_indices", validate_indices, "T", _attr_T, "Tindices",
3205 _attr_Tindices)
3206 _result = _execute.execute(b"SparseToDense", 1, inputs=_inputs_flat,
3207 attrs=_attrs, ctx=ctx, name=name)
3208 if _execute.must_record_gradient():
3209 _execute.record_gradient(
3210 "SparseToDense", _inputs_flat, _attrs, _result)
3211 _result, = _result
3212 return _result
3214_TakeManySparseFromTensorsMapOutput = collections.namedtuple(
3215 "TakeManySparseFromTensorsMap",
3216 ["sparse_indices", "sparse_values", "sparse_shape"])
3219def take_many_sparse_from_tensors_map(sparse_handles, dtype, container="", shared_name="", name=None):
3220 r"""Read `SparseTensors` from a `SparseTensorsMap` and concatenate them.
3222 The input `sparse_handles` must be an `int64` matrix of shape `[N, 1]` where
3223 `N` is the minibatch size and the rows correspond to the output handles of
3224 `AddSparseToTensorsMap` or `AddManySparseToTensorsMap`. The ranks of the
3225 original `SparseTensor` objects that went into the given input ops must all
3226 match. When the final `SparseTensor` is created, it has rank one
3227 higher than the ranks of the incoming `SparseTensor` objects
3228 (they have been concatenated along a new row dimension on the left).
3230 The output `SparseTensor` object's shape values for all dimensions but the
3231 first are the max across the input `SparseTensor` objects' shape values
3232 for the corresponding dimensions. Its first shape value is `N`, the minibatch
3233 size.
3235 The input `SparseTensor` objects' indices are assumed ordered in
3236 standard lexicographic order. If this is not the case, after this
3237 step run `SparseReorder` to restore index ordering.
3239 For example, if the handles represent an input, which is a `[2, 3]` matrix
3240 representing two original `SparseTensor` objects:
3242 ```
3243 index = [ 0]
3244 [10]
3245 [20]
3246 values = [1, 2, 3]
3247 shape = [50]
3248 ```
3250 and
3252 ```
3253 index = [ 2]
3254 [10]
3255 values = [4, 5]
3256 shape = [30]
3257 ```
3259 then the final `SparseTensor` will be:
3261 ```
3262 index = [0 0]
3263 [0 10]
3264 [0 20]
3265 [1 2]
3266 [1 10]
3267 values = [1, 2, 3, 4, 5]
3268 shape = [2 50]
3269 ```
3271 Args:
3272 sparse_handles: A `Tensor` of type `int64`.
3273 1-D, The `N` serialized `SparseTensor` objects.
3274 Shape: `[N]`.
3275 dtype: A `tf.DType`.
3276 The `dtype` of the `SparseTensor` objects stored in the
3277 `SparseTensorsMap`.
3278 container: An optional `string`. Defaults to `""`.
3279 The container name for the `SparseTensorsMap` read by this op.
3280 shared_name: An optional `string`. Defaults to `""`.
3281 The shared name for the `SparseTensorsMap` read by this op.
3282 It should not be blank; rather the `shared_name` or unique Operation name
3283 of the Op that created the original `SparseTensorsMap` should be used.
3284 name: A name for the operation (optional).
3286 Returns:
3287 A tuple of `Tensor` objects (sparse_indices, sparse_values, sparse_shape).
3289 sparse_indices: A `Tensor` of type `int64`.
3290 sparse_values: A `Tensor` of type `dtype`.
3291 sparse_shape: A `Tensor` of type `int64`.
3292 """
3293 _ctx = _context._context or _context.context()
3294 tld = _ctx._thread_local_data
3295 if tld.is_eager:
3296 try:
3297 _result = pywrap_tfe.TFE_Py_FastPathExecute(
3298 _ctx, "TakeManySparseFromTensorsMap", name, sparse_handles, "dtype",
3299 dtype, "container", container, "shared_name", shared_name)
3300 _result = _TakeManySparseFromTensorsMapOutput._make(_result)
3301 return _result
3302 except _core._NotOkStatusException as e:
3303 _ops.raise_from_not_ok_status(e, name)
3304 except _core._FallbackException:
3305 pass
3306 try:
3307 return take_many_sparse_from_tensors_map_eager_fallback(
3308 sparse_handles, dtype=dtype, container=container,
3309 shared_name=shared_name, name=name, ctx=_ctx)
3310 except _core._SymbolicException:
3311 pass # Add nodes to the TensorFlow graph.
3312 # Add nodes to the TensorFlow graph.
3313 dtype = _execute.make_type(dtype, "dtype")
3314 if container is None:
3315 container = ""
3316 container = _execute.make_str(container, "container")
3317 if shared_name is None:
3318 shared_name = ""
3319 shared_name = _execute.make_str(shared_name, "shared_name")
3320 _, _, _op, _outputs = _op_def_library._apply_op_helper(
3321 "TakeManySparseFromTensorsMap", sparse_handles=sparse_handles,
3322 dtype=dtype, container=container,
3323 shared_name=shared_name, name=name)
3324 _result = _outputs[:]
3325 if _execute.must_record_gradient():
3326 _attrs = ("dtype", _op._get_attr_type("dtype"), "container",
3327 _op.get_attr("container"), "shared_name",
3328 _op.get_attr("shared_name"))
3329 _inputs_flat = _op.inputs
3330 _execute.record_gradient(
3331 "TakeManySparseFromTensorsMap", _inputs_flat, _attrs, _result)
3332 _result = _TakeManySparseFromTensorsMapOutput._make(_result)
3333 return _result
3335TakeManySparseFromTensorsMap = tf_export("raw_ops.TakeManySparseFromTensorsMap")(_ops.to_raw_op(take_many_sparse_from_tensors_map))
3338def take_many_sparse_from_tensors_map_eager_fallback(sparse_handles, dtype, container, shared_name, name, ctx):
3339 dtype = _execute.make_type(dtype, "dtype")
3340 if container is None:
3341 container = ""
3342 container = _execute.make_str(container, "container")
3343 if shared_name is None:
3344 shared_name = ""
3345 shared_name = _execute.make_str(shared_name, "shared_name")
3346 sparse_handles = _ops.convert_to_tensor(sparse_handles, _dtypes.int64)
3347 _inputs_flat = [sparse_handles]
3348 _attrs = ("dtype", dtype, "container", container, "shared_name",
3349 shared_name)
3350 _result = _execute.execute(b"TakeManySparseFromTensorsMap", 3,
3351 inputs=_inputs_flat, attrs=_attrs, ctx=ctx,
3352 name=name)
3353 if _execute.must_record_gradient():
3354 _execute.record_gradient(
3355 "TakeManySparseFromTensorsMap", _inputs_flat, _attrs, _result)
3356 _result = _TakeManySparseFromTensorsMapOutput._make(_result)
3357 return _result