Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/data_flow_grad.py: 58%

48 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2015 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15 

16"""Gradients for operators defined in data_flow_ops.py.""" 

17 

18from tensorflow.python.framework import dtypes 

19from tensorflow.python.framework import indexed_slices 

20from tensorflow.python.framework import ops 

21from tensorflow.python.ops import array_ops 

22from tensorflow.python.ops import data_flow_ops 

23from tensorflow.python.ops import math_ops 

24 

25 

26@ops.RegisterGradient("DynamicPartition") 

27def _DynamicPartitionGrads(op, *grads): 

28 """Gradients for DynamicPartition.""" 

29 data = op.inputs[0] 

30 indices = op.inputs[1] 

31 num_partitions = op.get_attr("num_partitions") 

32 

33 prefix_shape = array_ops.shape(indices) 

34 original_indices = array_ops.reshape( 

35 math_ops.range(math_ops.reduce_prod(prefix_shape)), prefix_shape) 

36 partitioned_indices = data_flow_ops.dynamic_partition( 

37 original_indices, indices, num_partitions) 

38 reconstructed = data_flow_ops.parallel_dynamic_stitch(partitioned_indices, 

39 grads) 

40 reconstructed = array_ops.reshape(reconstructed, array_ops.shape(data)) 

41 return [reconstructed, None] 

42 

43 

44@ops.RegisterGradient("DynamicStitch") 

45@ops.RegisterGradient("ParallelDynamicStitch") 

46def _DynamicStitchGrads(op, grad): 

47 """Gradients for DynamicStitch and ParallelDynamicStitch.""" 

48 

49 num_values = len(op.inputs) // 2 

50 indices_grad = [None] * num_values 

51 

52 def AsInt32(x): 

53 return (x if op.inputs[0].dtype == dtypes.int32 else 

54 math_ops.cast(x, dtypes.int32)) 

55 

56 inputs = [AsInt32(op.inputs[i]) for i in range(num_values)] 

57 if isinstance(grad, indexed_slices.IndexedSlices): 

58 output_shape = array_ops.shape(op.outputs[0]) 

59 output_rows = output_shape[0] 

60 grad = math_ops.unsorted_segment_sum(grad.values, grad.indices, output_rows) 

61 values_grad = [array_ops.gather(grad, inp) for inp in inputs] 

62 return indices_grad + values_grad 

63 

64 

65ops.NotDifferentiable("Queue") 

66ops.NotDifferentiable("QueueEnqueue") 

67ops.NotDifferentiable("QueueEnqueueMany") 

68ops.NotDifferentiable("QueueDequeue") 

69ops.NotDifferentiable("QueueDequeueMany") 

70ops.NotDifferentiable("QueueDequeueUpTo") 

71ops.NotDifferentiable("QueueClose") 

72ops.NotDifferentiable("QueueSize") 

73 

74ops.NotDifferentiable("Stack") 

75ops.NotDifferentiable("StackPush") 

76ops.NotDifferentiable("StackPop") 

77ops.NotDifferentiable("StackClose") 

78 

79ops.NotDifferentiable("GetSessionHandle") 

80ops.NotDifferentiable("GetSessionHandleV2") 

81ops.NotDifferentiable("GetSessionTensor") 

82ops.NotDifferentiable("DeleteSessionTensor")