Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/ragged/ragged_conversion_ops.py: 27%

74 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2018 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15"""Ops to convert between RaggedTensors and other tensor types.""" 

16 

17from tensorflow.python.framework import dtypes 

18from tensorflow.python.framework import indexed_slices 

19from tensorflow.python.framework import ops 

20from tensorflow.python.ops import array_ops 

21from tensorflow.python.ops import gen_ragged_conversion_ops 

22from tensorflow.python.ops import math_ops 

23from tensorflow.python.ops.ragged import ragged_tensor 

24 

25 

26def from_tensor(tensor, 

27 lengths=None, 

28 padding=None, 

29 ragged_rank=1, 

30 row_splits_dtype=dtypes.int64, 

31 name=None): 

32 if ragged_tensor.is_ragged(tensor): 

33 return tensor 

34 else: 

35 return ragged_tensor.RaggedTensor.from_tensor( 

36 tensor, 

37 lengths=lengths, 

38 padding=padding, 

39 ragged_rank=ragged_rank, 

40 row_splits_dtype=row_splits_dtype, 

41 name=name) 

42 

43 

44def to_tensor(rt_input, default_value=None, name=None): 

45 if ragged_tensor.is_ragged(rt_input): 

46 return rt_input.to_tensor(default_value, name) 

47 else: 

48 return rt_input 

49 

50 

51def ragged_to_dense(rt_input, default_value=None, shape=None): 

52 """Create a dense tensor from a ragged tensor.""" 

53 return rt_input.to_tensor(default_value=default_value, shape=shape) 

54 

55 

56@ops.RegisterGradient("RaggedTensorToTensor") 

57def _ragged_tensor_to_tensor_grad(op, grad): 

58 """Gradient for RaggedToTensor op.""" 

59 # Extract inputs from the op. 

60 flat_values = op.inputs[1] 

61 default_value = op.inputs[2] 

62 row_partition_tensors = op.inputs[3:] 

63 row_partition_types = op.get_attr("row_partition_types") 

64 flat_value_shape = array_ops.shape(flat_values) 

65 ragged_rank = sum( 

66 1 for typ in row_partition_types if typ != b"FIRST_DIM_SIZE") 

67 

68 # Create two tensors that correspond 1:1 with grad (and op.output): 

69 # * indices[i1...iN] is the index in `flat_values` of the value used to 

70 # populate output[i1...iN] (if the value came from `flat_values`) or 

71 # -1 (if the value came from `default_value`). 

72 # * mask[i1...iN] is true if output[i1...iN] came from `flat_values`, or 

73 # false if it came from `default_value`. 

74 indices = gen_ragged_conversion_ops.ragged_tensor_to_tensor( 

75 shape=array_ops.shape(grad)[:1 + ragged_rank], 

76 values=math_ops.range(flat_value_shape[0]), 

77 default_value=-1, 

78 row_partition_types=row_partition_types, 

79 row_partition_tensors=row_partition_tensors) 

80 mask = math_ops.not_equal(indices, -1) 

81 

82 # Select out the gradients & indices that came from `flat_values`, and use 

83 # those to construct the gradient for `flat_values` (as an IndexedSlices). 

84 values_grad = indexed_slices.IndexedSlices( 

85 values=array_ops.boolean_mask(grad, mask), 

86 indices=array_ops.boolean_mask(indices, mask), 

87 dense_shape=flat_value_shape) 

88 

89 # Select out the gradients that came from `default_value`, and sum them to 

90 # get the gradient for the default. Note that the default_value may have 

91 # been broadcast as part of the RaggedTensorToTensor operation, so we also 

92 # need to reduce any dimensions that might have been broadcast. 

93 default_grads = array_ops.boolean_mask(grad, ~mask) 

94 dims_to_reduce = math_ops.range( 

95 array_ops.rank(default_grads) - 

96 _rank_ignoring_leading_dims_with_size_1(default_value)) 

97 default_grad = math_ops.reduce_sum(default_grads, axis=dims_to_reduce) 

98 

99 # Restore any leading dims with size one. 

100 default_grad = array_ops.reshape(default_grad, array_ops.shape(default_value)) 

101 

102 return ([None, values_grad, default_grad] + 

103 [None for _ in row_partition_tensors]) 

104 

105 

106def _rank_ignoring_leading_dims_with_size_1(value): 

107 """Returns `rank(value)`, ignoring any leading dimensions with size 1.""" 

108 # Compute the result using static shape, if possible. 

109 if value.shape.rank is not None: 

110 ndims = value.shape.rank 

111 for dim in value.shape.dims: 

112 if dim.value == 1: 

113 ndims -= 1 

114 elif dim.value is None: 

115 ndims = None # Can't compute the result using static shape. 

116 break 

117 else: 

118 break 

119 if ndims is not None: 

120 return ndims 

121 

122 # Otherwise, we need to compute the result dynamically. The math we use to 

123 # do this is a bit round-about, so here's an example to illustrate: 

124 # shape = [1, 1, 3, 5, 1, 4] # shape(value) 

125 # dim_is_one = [1, 1, 0, 0, 1, 0] # equal(shape, 1) 

126 # leading_ones = [1, 1, 0, 0, 0, 0] # cumprod(dim_is_one) 

127 # num_leading_ones = 2 # reduce_sum(leading_ones) 

128 # result = 4 # rank(value) - num_leading_ones 

129 shape = array_ops.shape(value) 

130 dim_is_one = math_ops.cast(math_ops.equal(shape, 1), dtypes.int32) 

131 leading_ones = math_ops.cumprod(dim_is_one) 

132 num_leading_ones = math_ops.reduce_sum(leading_ones) 

133 return array_ops.rank(value) - num_leading_ones 

134 

135 

136def to_sparse(rt_input, name=None): 

137 return rt_input.to_sparse(name) 

138 

139 

140def from_sparse(st_input, name=None): 

141 return ragged_tensor.RaggedTensor.from_sparse(st_input, name) 

142 

143 

144@ops.RegisterGradient("RaggedTensorFromVariant") 

145def _ragged_tensor_from_variant_grad(op, *grads): 

146 """Gradient for RaggedTensorFromVariant op.""" 

147 

148 variant_rank = op.inputs[0].shape.rank 

149 if variant_rank == 0: 

150 batched_input = False 

151 elif variant_rank == 1: 

152 batched_input = True 

153 elif variant_rank is None: 

154 batched_input = (op.get_attr("output_ragged_rank") > 0) 

155 else: 

156 # TODO(edloper): Add a batch_dims argument to RaggedTensorToVariant, so 

157 # we can support this. 

158 raise ValueError("Unable to compute gradient: RaggedTensorToVariant " 

159 "can currently only generate 0D or 1D output.") 

160 return [ 

161 gen_ragged_conversion_ops.ragged_tensor_to_variant( 

162 rt_nested_splits=op.outputs[:-1], 

163 rt_dense_values=grads[-1], 

164 batched_input=batched_input) 

165 ] 

166 

167 

168@ops.RegisterGradient("RaggedTensorToVariant") 

169def _ragged_tensor_to_variant_grad(op, encoded_ragged_grad): 

170 """Gradient for RaggedTensorToVariant op.""" 

171 dense_values = op.inputs[-1] 

172 ragged_rank = len(op.inputs) - 1 

173 row_splits = 0 if ragged_rank == 0 else op.inputs[0] 

174 values_grad = gen_ragged_conversion_ops.ragged_tensor_to_variant_gradient( 

175 encoded_ragged_grad=encoded_ragged_grad, 

176 row_splits=row_splits, 

177 dense_values_shape=array_ops.shape(dense_values), 

178 Tvalues=op.inputs[-1].dtype) 

179 result = [None] * ragged_rank + [values_grad] 

180 return result