Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/tensorflow/python/tpu/tpu_optimizer.py: 28%
61 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2017 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# =============================================================================
16"""Optimizer that implements cross-shard gradient reduction for TPU."""
19from tensorflow.python.framework import ops
20from tensorflow.python.ops.losses import losses
21from tensorflow.python.platform import tf_logging as logging
22from tensorflow.python.tpu import tpu_function
23from tensorflow.python.tpu.ops import tpu_ops
24from tensorflow.python.training import optimizer
25from tensorflow.python.util.tf_export import tf_export
28@tf_export(v1=["tpu.CrossShardOptimizer"])
29class CrossShardOptimizer(optimizer.Optimizer):
30 """An optimizer that averages gradients across TPU shards."""
32 def __init__(self,
33 opt,
34 reduction=losses.Reduction.MEAN,
35 name="CrossShardOptimizer",
36 group_assignment=None):
37 """Construct a new cross-shard optimizer.
39 Args:
40 opt: An existing `Optimizer` to encapsulate.
41 reduction: The reduction to apply to the shard losses.
42 name: Optional name prefix for the operations created when applying
43 gradients. Defaults to "CrossShardOptimizer".
44 group_assignment: Optional 2d int32 lists with shape
45 [num_groups, num_replicas_per_group] which describles how to apply
46 optimizer to subgroups.
48 Raises:
49 ValueError: If reduction is not a valid cross-shard reduction.
50 """
51 accepted_reductions = (losses.Reduction.SUM, losses.Reduction.MEAN)
52 if reduction not in accepted_reductions:
53 raise ValueError(
54 f"Argument `reduction` should be one of {accepted_reductions}. "
55 f"Received: {reduction}")
56 if not isinstance(opt, optimizer.Optimizer):
57 raise TypeError(
58 "CrossShardOptimizer only works with tf.training.Optimizer and not "
59 f"Keras Optimizer. Received: {opt}. "
60 "If you are using TPUStrategy, "
61 "Keras Optimizer will sum gradients across replicas."
62 "If you are using TPUEstimator, you may instead sum your gradients "
63 "with:\n"
64 "`grads = [tf.compat.v1.tpu.cross_replica_sum(g) for g in grads]`\n"
65 "If you want to average your gradients, rescale your loss with: "
66 "`loss /= global_batch_size`")
68 super(CrossShardOptimizer, self).__init__(False, name)
69 self._opt = opt
70 self._reduction = reduction
71 self._group_assignment = group_assignment
73 def _verify_and_get_subgroup_size(self, group_assignment, num_shards):
74 """Verify group_assignment and get the subgroup size".
76 Args:
77 group_assignment: list of group ids for applying the optimizer
78 to subgroups.
79 num_shards: The number of TPU shards.
81 Returns:
82 The size of one subgroup in group_assignment.
84 Raises:
85 ValueError: If group_assignment is invalid.
86 """
87 if not group_assignment:
88 return None
89 if not (isinstance(group_assignment, list) and
90 all(isinstance(i, list) for i in group_assignment)):
91 raise ValueError(
92 f"Argument `group_assignment` must be a list of lists. "
93 f"Received: {group_assignment}")
95 replica_ids = set()
96 for g in group_assignment:
97 for i in g:
98 replica_ids.add(i)
100 if set(range(num_shards)) != replica_ids:
101 raise ValueError(
102 f"Argument `group_assignment` must be a permutation of "
103 f"range({num_shards}). Received: {group_assignment}")
105 subgroup_size_list = [len(group) for group in group_assignment]
106 if all(subgroup_size_list[0] == size for size in subgroup_size_list):
107 return subgroup_size_list[0]
108 else:
109 raise ValueError("The size of each subgroup in `group_assignment` must "
110 f"be equal. Received: {group_assignment}")
112 def compute_gradients(self, loss, var_list=None, **kwargs):
113 """Compute gradients of "loss" for the variables in "var_list".
115 This simply wraps `compute_gradients()` from the real optimizer. The
116 gradients will be aggregated in `apply_gradients()` so that user can
117 modify the gradients like clipping with per replica global norm if needed.
118 The global norm with aggregated gradients can be bad as one replica's huge
119 gradients can hurt the gradients from other replicas.
121 When the CrossShardOptimizer is constructed with
122 `reduction == losses.Reduction.MEAN` (default), this function scales the
123 loss by `1.0 / num_shards` before computing the gradients. Assuming the
124 optimizer uses the default implementation of `compute_gradients()`, the
125 gradients of the scaled loss are scaled by `1.0 / num_shards` compared to
126 the gradients of the original loss. This scaling factor is important because
127 `apply_gradients()` sums gradients across shards, rather than averaging
128 them. However, the scaling factor must be taken into account when clipping
129 the norm of the gradients or performing other postprocessing.
131 Args:
132 loss: A Tensor containing the value to minimize.
133 var_list: Optional list or tuple of `tf.Variable` to update to minimize
134 `loss`. Defaults to the list of variables collected in the graph
135 under the key `GraphKey.TRAINABLE_VARIABLES`.
136 **kwargs: Keyword arguments for compute_gradients().
138 Returns:
139 A list of (gradient, variable) pairs.
141 Raises:
142 ValueError: If not within a tpu_shard_context or group_assignment is
143 invalid.
144 """
145 num_shards = tpu_function.get_tpu_context().number_of_shards
146 if num_shards is None:
147 logging.warning(
148 "CrossShardOptimizer should be used within a tpu_shard_context, but "
149 "got unset number_of_shards. Assuming 1.")
150 num_shards = 1
152 subgroup_size = self._verify_and_get_subgroup_size(self._group_assignment,
153 num_shards)
155 if num_shards > 1 and self._reduction == losses.Reduction.MEAN:
156 if self._group_assignment:
157 scale = 1.0 / subgroup_size
158 else:
159 scale = 1.0 / num_shards
160 loss *= scale
162 return self._opt.compute_gradients(loss, var_list=var_list, **kwargs)
164 def apply_gradients(self, grads_and_vars, global_step=None, name=None):
165 """Apply gradients to variables.
167 Calls tpu_ops.cross_replica_sum() to sum gradient contributions across
168 replicas, and then applies the real optimizer.
170 Args:
171 grads_and_vars: List of (gradient, variable) pairs as returned by
172 compute_gradients().
173 global_step: Optional Variable to increment by one after the
174 variables have been updated.
175 name: Optional name for the returned operation. Default to the
176 name passed to the Optimizer constructor.
178 Returns:
179 An `Operation` that applies the gradients. If `global_step` was not None,
180 that operation also increments `global_step`.
182 Raises:
183 ValueError: If the grads_and_vars is malformed.
184 """
185 summed_grads_and_vars = []
186 for (grad, var) in grads_and_vars:
187 if grad is None:
188 summed_grads_and_vars.append((grad, var))
189 else:
190 with ops.colocate_with(grad):
191 summed_grads_and_vars.append((tpu_ops.cross_replica_sum(
192 grad, self._group_assignment), var))
193 return self._opt.apply_gradients(summed_grads_and_vars, global_step, name)
195 def get_slot(self, *args, **kwargs):
196 """Return a slot named "name" created for "var" by the Optimizer.
198 This simply wraps the get_slot() from the actual optimizer.
200 Args:
201 *args: Arguments for get_slot().
202 **kwargs: Keyword arguments for get_slot().
204 Returns:
205 The `Variable` for the slot if it was created, `None` otherwise.
206 """
207 return self._opt.get_slot(*args, **kwargs)
209 def get_slot_names(self, *args, **kwargs):
210 """Return a list of the names of slots created by the `Optimizer`.
212 This simply wraps the get_slot_names() from the actual optimizer.
214 Args:
215 *args: Arguments for get_slot().
216 **kwargs: Keyword arguments for get_slot().
218 Returns:
219 A list of strings.
220 """
221 return self._opt.get_slot_names(*args, **kwargs)
223 def variables(self):
224 """Forwarding the variables from the underlying optimizer."""
225 return self._opt.variables()