Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/optimizers/adamax.py: 27%
48 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2022 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Adamax optimizer implementation."""
17import tensorflow.compat.v2 as tf
19from keras.src.optimizers import optimizer
20from keras.src.saving.object_registration import register_keras_serializable
22# isort: off
23from tensorflow.python.util.tf_export import keras_export
26@register_keras_serializable()
27@keras_export(
28 "keras.optimizers.experimental.Adamax", "keras.optimizers.Adamax", v1=[]
29)
30class Adamax(optimizer.Optimizer):
31 """Optimizer that implements the Adamax algorithm.
33 Adamax, a variant of Adam based on the infinity norm, is a first-order
34 gradient-based optimization method. Due to its capability of adjusting the
35 learning rate based on data characteristics, it is suited to learn
36 time-variant process, e.g., speech data with dynamically changed noise
37 conditions. Default parameters follow those provided in the paper (see
38 references below).
40 Initialization:
42 ```python
43 m = 0 # Initialize initial 1st moment vector
44 u = 0 # Initialize the exponentially weighted infinity norm
45 t = 0 # Initialize timestep
46 ```
48 The update rule for parameter `w` with gradient `g` is described at the end
49 of section 7.1 of the paper (see the referenece section):
51 ```python
52 t += 1
53 m = beta1 * m + (1 - beta) * g
54 u = max(beta2 * u, abs(g))
55 current_lr = learning_rate / (1 - beta1 ** t)
56 w = w - current_lr * m / (u + epsilon)
57 ```
59 Args:
60 learning_rate: A `tf.Tensor`, floating point value, a schedule that is a
61 `tf.keras.optimizers.schedules.LearningRateSchedule`, or a callable
62 that takes no arguments and returns the actual value to use. The
63 learning rate. Defaults to `0.001`.
64 beta_1: A float value or a constant float tensor. The exponential decay
65 rate for the 1st moment estimates.
66 beta_2: A float value or a constant float tensor. The exponential decay
67 rate for the exponentially weighted infinity norm.
68 epsilon: A small constant for numerical stability.
69 {{base_optimizer_keyword_args}}
71 Reference:
72 - [Kingma et al., 2014](http://arxiv.org/abs/1412.6980)
73 """
75 def __init__(
76 self,
77 learning_rate=0.001,
78 beta_1=0.9,
79 beta_2=0.999,
80 epsilon=1e-7,
81 weight_decay=None,
82 clipnorm=None,
83 clipvalue=None,
84 global_clipnorm=None,
85 use_ema=False,
86 ema_momentum=0.99,
87 ema_overwrite_frequency=None,
88 jit_compile=True,
89 name="Adamax",
90 **kwargs
91 ):
92 super().__init__(
93 name=name,
94 weight_decay=weight_decay,
95 clipnorm=clipnorm,
96 clipvalue=clipvalue,
97 global_clipnorm=global_clipnorm,
98 use_ema=use_ema,
99 ema_momentum=ema_momentum,
100 ema_overwrite_frequency=ema_overwrite_frequency,
101 jit_compile=jit_compile,
102 **kwargs
103 )
104 self._learning_rate = self._build_learning_rate(learning_rate)
105 self.beta_1 = beta_1
106 self.beta_2 = beta_2
107 self.epsilon = epsilon
109 def build(self, var_list):
110 """Initialize optimizer variables.
112 Adamax optimizer has 2 types of variables: momentums (denoted as m),
113 exponentially weighted infinity norm (denoted as u).
115 Args:
116 var_list: list of model variables to build Adamax variables on.
117 """
118 super().build(var_list)
119 if hasattr(self, "_built") and self._built:
120 return
121 self._built = True
122 self._m = []
123 self._u = []
124 for var in var_list:
125 self._m.append(
126 self.add_variable_from_reference(
127 model_variable=var, variable_name="m"
128 )
129 )
130 self._u.append(
131 self.add_variable_from_reference(
132 model_variable=var, variable_name="u"
133 )
134 )
136 def update_step(self, gradient, variable):
137 """Update step given gradient and the associated model variable."""
138 lr = tf.cast(self.learning_rate, variable.dtype)
139 local_step = tf.cast(self.iterations + 1, variable.dtype)
140 beta_1_power = tf.pow(tf.cast(self.beta_1, variable.dtype), local_step)
142 var_key = self._var_key(variable)
143 m = self._m[self._index_dict[var_key]]
144 u = self._u[self._index_dict[var_key]]
146 if isinstance(gradient, tf.IndexedSlices):
147 # Sparse gradients.
148 indices = gradient.indices
149 m.assign_add(-m * (1 - self.beta_1))
150 m.scatter_add(
151 tf.IndexedSlices(gradient.values * (1 - self.beta_1), indices)
152 )
153 u.assign(u * self.beta_2)
154 u_slice = tf.gather(u, indices)
155 u_slice_incremental = (
156 tf.maximum(u_slice, tf.abs(gradient.values)) - u_slice
157 )
158 u.scatter_add(tf.IndexedSlices(u_slice_incremental, indices))
159 variable.assign_sub(
160 (lr * m) / ((1 - beta_1_power) * (u + self.epsilon))
161 )
162 else:
163 # Dense gradients.
164 m.assign_add((gradient - m) * (1 - self.beta_1))
165 u.assign(tf.maximum(self.beta_2 * u, tf.abs(gradient)))
166 variable.assign_sub(
167 (lr * m) / ((1 - beta_1_power) * (u + self.epsilon))
168 )
170 def get_config(self):
171 config = super().get_config()
173 config.update(
174 {
175 "learning_rate": self._serialize_hyperparameter(
176 self._learning_rate
177 ),
178 "beta_1": self.beta_1,
179 "beta_2": self.beta_2,
180 "epsilon": self.epsilon,
181 }
182 )
183 return config
186Adamax.__doc__ = Adamax.__doc__.replace(
187 "{{base_optimizer_keyword_args}}", optimizer.base_optimizer_keyword_args
188)