Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/optimizers/legacy/adagrad.py: 34%

53 statements  

« prev     ^ index     » next       coverage.py v7.4.0, created at 2024-01-03 07:57 +0000

1# Copyright 2018 The TensorFlow Authors. All Rights Reserved. 

2# 

3# Licensed under the Apache License, Version 2.0 (the "License"); 

4# you may not use this file except in compliance with the License. 

5# You may obtain a copy of the License at 

6# 

7# http://www.apache.org/licenses/LICENSE-2.0 

8# 

9# Unless required by applicable law or agreed to in writing, software 

10# distributed under the License is distributed on an "AS IS" BASIS, 

11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 

12# See the License for the specific language governing permissions and 

13# limitations under the License. 

14# ============================================================================== 

15"""Adagrad optimizer implementation.""" 

16 

17import numpy as np 

18import tensorflow.compat.v2 as tf 

19 

20from keras.src import backend_config 

21from keras.src.optimizers.legacy import optimizer_v2 

22 

23# isort: off 

24from tensorflow.python.util.tf_export import keras_export 

25 

26 

27@keras_export( 

28 "keras.optimizers.legacy.Adagrad", 

29 v1=["keras.optimizers.Adagrad", "keras.optimizers.legacy.Adagrad"], 

30) 

31class Adagrad(optimizer_v2.OptimizerV2): 

32 r"""Optimizer that implements the Adagrad algorithm. 

33 

34 Adagrad is an optimizer with parameter-specific learning rates, 

35 which are adapted relative to how frequently a parameter gets 

36 updated during training. The more updates a parameter receives, 

37 the smaller the updates. 

38 

39 Args: 

40 learning_rate: Initial value for the learning rate: 

41 either a floating point value, 

42 or a `tf.keras.optimizers.schedules.LearningRateSchedule` instance. 

43 Note that `Adagrad` tends to benefit from higher initial learning rate 

44 values compared to other optimizers. 

45 To match the exact form in the original paper, use 1.0. 

46 Defaults to `0.001`. 

47 initial_accumulator_value: Floating point value. 

48 Starting value for the accumulators (per-parameter momentum values). 

49 Must be non-negative. 

50 epsilon: Small floating point value used to maintain numerical stability. 

51 name: Optional name prefix for the operations created when applying 

52 gradients. Defaults to `"Adagrad"`. 

53 **kwargs: keyword arguments. Allowed arguments are `clipvalue`, 

54 `clipnorm`, `global_clipnorm`. 

55 If `clipvalue` (float) is set, the gradient of each weight 

56 is clipped to be no higher than this value. 

57 If `clipnorm` (float) is set, the gradient of each weight 

58 is individually clipped so that its norm is no higher than this value. 

59 If `global_clipnorm` (float) is set the gradient of all weights is 

60 clipped so that their global norm is no higher than this value.. 

61 

62 Reference: 

63 - [Duchi et al., 2011]( 

64 http://www.jmlr.org/papers/volume12/duchi11a/duchi11a.pdf). 

65 """ 

66 

67 _HAS_AGGREGATE_GRAD = True 

68 

69 def __init__( 

70 self, 

71 learning_rate=0.001, 

72 initial_accumulator_value=0.1, 

73 epsilon=1e-7, 

74 name="Adagrad", 

75 **kwargs 

76 ): 

77 if initial_accumulator_value < 0.0: 

78 raise ValueError( 

79 "initial_accumulator_value must be non-negative: %s" 

80 % initial_accumulator_value 

81 ) 

82 if epsilon is None: 

83 epsilon = backend_config.epsilon() 

84 super().__init__(name, **kwargs) 

85 self._set_hyper("learning_rate", kwargs.get("lr", learning_rate)) 

86 self._set_hyper("decay", self._initial_decay) 

87 self._initial_accumulator_value = initial_accumulator_value 

88 self.epsilon = epsilon or backend_config.epsilon() 

89 

90 def _create_slots(self, var_list): 

91 for var in var_list: 

92 dtype = var.dtype.base_dtype 

93 init = tf.compat.v1.constant_initializer( 

94 self._initial_accumulator_value, dtype=dtype 

95 ) 

96 self.add_slot(var, "accumulator", init) 

97 

98 def _prepare_local(self, var_device, var_dtype, apply_state): 

99 super()._prepare_local(var_device, var_dtype, apply_state) 

100 apply_state[(var_device, var_dtype)].update( 

101 dict( 

102 epsilon=tf.convert_to_tensor(self.epsilon, var_dtype), 

103 neg_lr_t=-apply_state[(var_device, var_dtype)]["lr_t"], 

104 zero=tf.zeros((), dtype=tf.int64), 

105 ) 

106 ) 

107 

108 def set_weights(self, weights): 

109 params = self.weights 

110 # Override set_weights for backward compatibility of Keras V1 optimizer 

111 # since it does not include iteration at head of the weight list. Set 

112 # iteration to 0. 

113 if len(params) == len(weights) + 1: 

114 weights = [np.array(0)] + weights 

115 super().set_weights(weights) 

116 

117 @classmethod 

118 def from_config(cls, config, custom_objects=None): 

119 """Creates an optimizer from its config. 

120 

121 This method is the reverse of `get_config`, 

122 capable of instantiating the same optimizer from the config 

123 dictionary. 

124 

125 Args: 

126 config: A Python dictionary, typically the output of get_config. 

127 custom_objects: A Python dictionary mapping names to additional 

128 Python objects used to create this optimizer, such as a function 

129 used for a hyperparameter. 

130 

131 Returns: 

132 An optimizer instance. 

133 """ 

134 if "initial_accumulator_value" not in config: 

135 config["initial_accumulator_value"] = 0.1 

136 if "lr" in config: 

137 config["learning_rate"] = config.pop("lr") 

138 return cls(**config) 

139 

140 def _resource_apply_dense(self, grad, var, apply_state=None): 

141 var_device, var_dtype = var.device, var.dtype.base_dtype 

142 coefficients = (apply_state or {}).get( 

143 (var_device, var_dtype) 

144 ) or self._fallback_apply_state(var_device, var_dtype) 

145 

146 acc = self.get_slot(var, "accumulator") 

147 return tf.raw_ops.ResourceApplyAdagradV2( 

148 var=var.handle, 

149 accum=acc.handle, 

150 lr=coefficients["lr_t"], 

151 epsilon=coefficients["epsilon"], 

152 grad=grad, 

153 use_locking=self._use_locking, 

154 ) 

155 

156 def _resource_apply_sparse(self, grad, var, indices, apply_state=None): 

157 var_device, var_dtype = var.device, var.dtype.base_dtype 

158 coefficients = (apply_state or {}).get( 

159 (var_device, var_dtype) 

160 ) or self._fallback_apply_state(var_device, var_dtype) 

161 

162 acc = self.get_slot(var, "accumulator") 

163 return tf.raw_ops.ResourceSparseApplyAdagradV2( 

164 var=var.handle, 

165 accum=acc.handle, 

166 lr=coefficients["lr_t"], 

167 epsilon=coefficients["epsilon"], 

168 grad=grad, 

169 indices=indices, 

170 use_locking=self._use_locking, 

171 ) 

172 

173 def get_config(self): 

174 config = super().get_config() 

175 config.update( 

176 { 

177 "learning_rate": self._serialize_hyperparameter( 

178 "learning_rate" 

179 ), 

180 "decay": self._initial_decay, 

181 "initial_accumulator_value": self._initial_accumulator_value, 

182 "epsilon": self.epsilon, 

183 } 

184 ) 

185 return config 

186