Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/ops/parallel_for/gradients.py: 17%
54 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Jacobian ops."""
16from tensorflow.python.framework import ops
17from tensorflow.python.ops import array_ops
18from tensorflow.python.ops import check_ops
19from tensorflow.python.ops import gradients_impl as gradient_ops
20from tensorflow.python.ops.parallel_for import control_flow_ops
21from tensorflow.python.util import nest
24def jacobian(output, inputs, use_pfor=True, parallel_iterations=None):
25 """Computes jacobian of `output` w.r.t. `inputs`.
27 Args:
28 output: A tensor.
29 inputs: A tensor or a nested structure of tensor objects.
30 use_pfor: If true, uses pfor for computing the jacobian. Else uses
31 tf.while_loop.
32 parallel_iterations: A knob to control how many iterations and dispatched in
33 parallel. This knob can be used to control the total memory usage.
35 Returns:
36 A tensor or a nested structure of tensors with the same structure as
37 `inputs`. Each entry is the jacobian of `output` w.r.t. to the corresponding
38 value in `inputs`. If output has shape [y_1, ..., y_n] and inputs_i has
39 shape [x_1, ..., x_m], the corresponding jacobian has shape
40 [y_1, ..., y_n, x_1, ..., x_m]. Note that in cases where the gradient is
41 sparse (IndexedSlices), jacobian function currently makes it dense and
42 returns a Tensor instead. This may change in the future.
43 """
44 flat_inputs = nest.flatten(inputs)
45 output_tensor_shape = output.shape
46 output_shape = array_ops.shape(output)
47 output = array_ops.reshape(output, [-1])
49 def loop_fn(i):
50 y = array_ops.gather(output, i)
51 return gradient_ops.gradients(y, flat_inputs)
53 try:
54 output_size = int(output.shape[0])
55 except TypeError:
56 output_size = array_ops.shape(output)[0]
58 if use_pfor:
59 pfor_outputs = control_flow_ops.pfor(
60 loop_fn, output_size, parallel_iterations=parallel_iterations)
61 else:
62 pfor_outputs = control_flow_ops.for_loop(
63 loop_fn,
64 [output.dtype] * len(flat_inputs),
65 output_size,
66 parallel_iterations=parallel_iterations)
68 for i, out in enumerate(pfor_outputs):
69 if isinstance(out, ops.Tensor):
70 new_shape = array_ops.concat(
71 [output_shape, array_ops.shape(out)[1:]], axis=0)
72 out = array_ops.reshape(out, new_shape)
73 out.set_shape(output_tensor_shape.concatenate(flat_inputs[i].shape))
74 pfor_outputs[i] = out
76 return nest.pack_sequence_as(inputs, pfor_outputs)
79def batch_jacobian(output, inp, use_pfor=True, parallel_iterations=None):
80 """Computes and stacks jacobians of `output[i,...]` w.r.t. `input[i,...]`.
82 e.g.
83 x = tf.constant([[1, 2], [3, 4]], dtype=tf.float32)
84 y = x * x
85 jacobian = batch_jacobian(y, x)
86 # => [[[2, 0], [0, 4]], [[6, 0], [0, 8]]]
88 Args:
89 output: A tensor with shape [b, y1, ..., y_n]. `output[i,...]` should
90 only depend on `inp[i,...]`.
91 inp: A tensor with shape [b, x1, ..., x_m]
92 use_pfor: If true, uses pfor for computing the Jacobian. Else uses a
93 tf.while_loop.
94 parallel_iterations: A knob to control how many iterations are vectorized
95 and dispatched in parallel. The default value of None, when use_pfor is
96 true, corresponds to vectorizing all the iterations. When use_pfor is
97 false, the default value of None corresponds to parallel_iterations=10.
98 This knob can be used to control the total memory usage.
100 Returns:
101 A tensor `t` with shape [b, y_1, ..., y_n, x1, ..., x_m] where `t[i, ...]`
102 is the jacobian of `output[i, ...]` w.r.t. `inp[i, ...]`, i.e. stacked
103 per-example jacobians.
105 Raises:
106 ValueError: if first dimension of `output` and `inp` do not match.
107 """
108 output_shape = output.shape
109 if not output_shape[0].is_compatible_with(inp.shape[0]):
110 raise ValueError(f"Need first dimension of `output` shape ({output.shape}) "
111 f"and `inp` shape ({inp.shape}) to match.")
112 if output_shape.is_fully_defined():
113 batch_size = int(output_shape[0])
114 output_row_size = output_shape.num_elements() // batch_size
115 else:
116 output_shape = array_ops.shape(output)
117 batch_size = output_shape[0]
118 output_row_size = array_ops.size(output) // batch_size
119 inp_shape = array_ops.shape(inp)
120 # Flatten output to 2-D.
121 with ops.control_dependencies(
122 [check_ops.assert_equal(batch_size, inp_shape[0])]):
123 output = array_ops.reshape(output, [batch_size, output_row_size])
125 def loop_fn(i):
126 y = array_ops.gather(output, i, axis=1)
127 return gradient_ops.gradients(y, inp)[0]
129 if use_pfor:
130 pfor_output = control_flow_ops.pfor(loop_fn, output_row_size,
131 parallel_iterations=parallel_iterations)
132 else:
133 pfor_output = control_flow_ops.for_loop(
134 loop_fn, output.dtype,
135 output_row_size,
136 parallel_iterations=parallel_iterations)
137 if pfor_output is None:
138 return None
139 pfor_output = array_ops.reshape(pfor_output,
140 [output_row_size, batch_size, -1])
141 output = array_ops.transpose(pfor_output, [1, 0, 2])
142 new_shape = array_ops.concat([output_shape, inp_shape[1:]], axis=0)
143 return array_ops.reshape(output, new_shape)