Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/regularizers.py: 39%
121 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Built-in regularizers."""
18import math
20import tensorflow.compat.v2 as tf
22from keras.src import backend
23from keras.src.saving.legacy import serialization as legacy_serialization
24from keras.src.saving.serialization_lib import deserialize_keras_object
25from keras.src.saving.serialization_lib import serialize_keras_object
27# isort: off
28from tensorflow.python.util.tf_export import keras_export
31def _check_penalty_number(x):
32 """check penalty number availability, raise ValueError if failed."""
33 if not isinstance(x, (float, int)):
34 raise ValueError(
35 f"Value {x} is not a valid regularization penalty number, "
36 "expected an int or float value."
37 )
39 if math.isinf(x) or math.isnan(x):
40 raise ValueError(
41 f"Value {x} is not a valid regularization penalty number, "
42 "an infinite number or NaN are not valid values."
43 )
46def _none_to_default(inputs, default):
47 return default if inputs is None else default
50@keras_export("keras.regularizers.Regularizer")
51class Regularizer:
52 """Regularizer base class.
54 Regularizers allow you to apply penalties on layer parameters or layer
55 activity during optimization. These penalties are summed into the loss
56 function that the network optimizes.
58 Regularization penalties are applied on a per-layer basis. The exact API
59 will depend on the layer, but many layers (e.g. `Dense`, `Conv1D`, `Conv2D`
60 and `Conv3D`) have a unified API.
62 These layers expose 3 keyword arguments:
64 - `kernel_regularizer`: Regularizer to apply a penalty on the layer's kernel
65 - `bias_regularizer`: Regularizer to apply a penalty on the layer's bias
66 - `activity_regularizer`: Regularizer to apply a penalty on the layer's
67 output
69 All layers (including custom layers) expose `activity_regularizer` as a
70 settable property, whether or not it is in the constructor arguments.
72 The value returned by the `activity_regularizer` is divided by the input
73 batch size so that the relative weighting between the weight regularizers
74 and the activity regularizers does not change with the batch size.
76 You can access a layer's regularization penalties by calling `layer.losses`
77 after calling the layer on inputs.
79 ## Example
81 >>> layer = tf.keras.layers.Dense(
82 ... 5, input_dim=5,
83 ... kernel_initializer='ones',
84 ... kernel_regularizer=tf.keras.regularizers.L1(0.01),
85 ... activity_regularizer=tf.keras.regularizers.L2(0.01))
86 >>> tensor = tf.ones(shape=(5, 5)) * 2.0
87 >>> out = layer(tensor)
89 >>> # The kernel regularization term is 0.25
90 >>> # The activity regularization term (after dividing by the batch size)
91 >>> # is 5
92 >>> tf.math.reduce_sum(layer.losses)
93 <tf.Tensor: shape=(), dtype=float32, numpy=5.25>
95 ## Available penalties
97 ```python
98 tf.keras.regularizers.L1(0.3) # L1 Regularization Penalty
99 tf.keras.regularizers.L2(0.1) # L2 Regularization Penalty
100 tf.keras.regularizers.L1L2(l1=0.01, l2=0.01) # L1 + L2 penalties
101 ```
103 ## Directly calling a regularizer
105 Compute a regularization loss on a tensor by directly calling a regularizer
106 as if it is a one-argument function.
108 E.g.
109 >>> regularizer = tf.keras.regularizers.L2(2.)
110 >>> tensor = tf.ones(shape=(5, 5))
111 >>> regularizer(tensor)
112 <tf.Tensor: shape=(), dtype=float32, numpy=50.0>
115 ## Developing new regularizers
117 Any function that takes in a weight matrix and returns a scalar
118 tensor can be used as a regularizer, e.g.:
120 >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l1')
121 ... def l1_reg(weight_matrix):
122 ... return 0.01 * tf.math.reduce_sum(tf.math.abs(weight_matrix))
123 ...
124 >>> layer = tf.keras.layers.Dense(5, input_dim=5,
125 ... kernel_initializer='ones', kernel_regularizer=l1_reg)
126 >>> tensor = tf.ones(shape=(5, 5))
127 >>> out = layer(tensor)
128 >>> layer.losses
129 [<tf.Tensor: shape=(), dtype=float32, numpy=0.25>]
131 Alternatively, you can write your custom regularizers in an
132 object-oriented way by extending this regularizer base class, e.g.:
134 >>> @tf.keras.utils.register_keras_serializable(package='Custom', name='l2')
135 ... class L2Regularizer(tf.keras.regularizers.Regularizer):
136 ... def __init__(self, l2=0.):
137 ... self.l2 = l2
138 ...
139 ... def __call__(self, x):
140 ... return self.l2 * tf.math.reduce_sum(tf.math.square(x))
141 ...
142 ... def get_config(self):
143 ... return {'l2': float(self.l2)}
144 ...
145 >>> layer = tf.keras.layers.Dense(
146 ... 5, input_dim=5, kernel_initializer='ones',
147 ... kernel_regularizer=L2Regularizer(l2=0.5))
149 >>> tensor = tf.ones(shape=(5, 5))
150 >>> out = layer(tensor)
151 >>> layer.losses
152 [<tf.Tensor: shape=(), dtype=float32, numpy=12.5>]
154 ### A note on serialization and deserialization:
156 Registering the regularizers as serializable is optional if you are just
157 training and executing models, exporting to and from SavedModels, or saving
158 and loading weight checkpoints.
160 Registration is required for saving and
161 loading models to HDF5 format, Keras model cloning, some visualization
162 utilities, and exporting models to and from JSON. If using this
163 functionality, you must make sure any python process running your model has
164 also defined and registered your custom regularizer.
165 """
167 def __call__(self, x):
168 """Compute a regularization penalty from an input tensor."""
169 return 0.0
171 @classmethod
172 def from_config(cls, config):
173 """Creates a regularizer from its config.
175 This method is the reverse of `get_config`,
176 capable of instantiating the same regularizer from the config
177 dictionary.
179 This method is used by Keras `model_to_estimator`, saving and
180 loading models to HDF5 formats, Keras model cloning, some visualization
181 utilities, and exporting models to and from JSON.
183 Args:
184 config: A Python dictionary, typically the output of get_config.
186 Returns:
187 A regularizer instance.
188 """
189 return cls(**config)
191 def get_config(self):
192 """Returns the config of the regularizer.
194 An regularizer config is a Python dictionary (serializable)
195 containing all configuration parameters of the regularizer.
196 The same regularizer can be reinstantiated later
197 (without any saved state) from this configuration.
199 This method is optional if you are just training and executing models,
200 exporting to and from SavedModels, or using weight checkpoints.
202 This method is required for Keras `model_to_estimator`, saving and
203 loading models to HDF5 formats, Keras model cloning, some visualization
204 utilities, and exporting models to and from JSON.
206 Returns:
207 Python dictionary.
208 """
209 raise NotImplementedError(f"{self} does not implement get_config()")
212@keras_export("keras.regularizers.L1L2")
213class L1L2(Regularizer):
214 """A regularizer that applies both L1 and L2 regularization penalties.
216 The L1 regularization penalty is computed as:
217 `loss = l1 * reduce_sum(abs(x))`
219 The L2 regularization penalty is computed as
220 `loss = l2 * reduce_sum(square(x))`
222 L1L2 may be passed to a layer as a string identifier:
224 >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1_l2')
226 In this case, the default values used are `l1=0.01` and `l2=0.01`.
228 Arguments:
229 l1: Float; L1 regularization factor.
230 l2: Float; L2 regularization factor.
231 """
233 def __init__(self, l1=0.0, l2=0.0):
234 # The default value for l1 and l2 are different from the value in l1_l2
235 # for backward compatibility reason. Eg, L1L2(l2=0.1) will only have l2
236 # and no l1 penalty.
237 l1 = 0.0 if l1 is None else l1
238 l2 = 0.0 if l2 is None else l2
239 _check_penalty_number(l1)
240 _check_penalty_number(l2)
242 self.l1 = backend.cast_to_floatx(l1)
243 self.l2 = backend.cast_to_floatx(l2)
245 def __call__(self, x):
246 regularization = backend.constant(0.0, dtype=x.dtype)
247 if self.l1:
248 regularization += self.l1 * tf.reduce_sum(tf.abs(x))
249 if self.l2:
250 # equivalent to "self.l2 * tf.reduce_sum(tf.square(x))"
251 regularization += 2.0 * self.l2 * tf.nn.l2_loss(x)
252 return regularization
254 def get_config(self):
255 return {"l1": float(self.l1), "l2": float(self.l2)}
258@keras_export("keras.regularizers.L1", "keras.regularizers.l1")
259class L1(Regularizer):
260 """A regularizer that applies a L1 regularization penalty.
262 The L1 regularization penalty is computed as:
263 `loss = l1 * reduce_sum(abs(x))`
265 L1 may be passed to a layer as a string identifier:
267 >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l1')
269 In this case, the default value used is `l1=0.01`.
271 Arguments:
272 l1: Float; L1 regularization factor.
273 """
275 def __init__(self, l1=0.01, **kwargs):
276 l1 = kwargs.pop("l", l1) # Backwards compatibility
277 if kwargs:
278 raise TypeError(f"Argument(s) not recognized: {kwargs}")
280 l1 = 0.01 if l1 is None else l1
281 _check_penalty_number(l1)
283 self.l1 = backend.cast_to_floatx(l1)
285 def __call__(self, x):
286 return self.l1 * tf.reduce_sum(tf.abs(x))
288 def get_config(self):
289 return {"l1": float(self.l1)}
292@keras_export("keras.regularizers.L2", "keras.regularizers.l2")
293class L2(Regularizer):
294 """A regularizer that applies a L2 regularization penalty.
296 The L2 regularization penalty is computed as:
297 `loss = l2 * reduce_sum(square(x))`
299 L2 may be passed to a layer as a string identifier:
301 >>> dense = tf.keras.layers.Dense(3, kernel_regularizer='l2')
303 In this case, the default value used is `l2=0.01`.
305 Arguments:
306 l2: Float; L2 regularization factor.
307 """
309 def __init__(self, l2=0.01, **kwargs):
310 l2 = kwargs.pop("l", l2) # Backwards compatibility
311 if kwargs:
312 raise TypeError(f"Argument(s) not recognized: {kwargs}")
314 l2 = 0.01 if l2 is None else l2
315 _check_penalty_number(l2)
317 self.l2 = backend.cast_to_floatx(l2)
319 def __call__(self, x):
320 # equivalent to "self.l2 * tf.reduce_sum(tf.square(x))"
321 return 2.0 * self.l2 * tf.nn.l2_loss(x)
323 def get_config(self):
324 return {"l2": float(self.l2)}
327@keras_export(
328 "keras.regularizers.OrthogonalRegularizer",
329 "keras.regularizers.orthogonal_regularizer",
330 v1=[],
331)
332class OrthogonalRegularizer(Regularizer):
333 """Regularizer that encourages input vectors to be orthogonal to each other.
335 It can be applied to either the rows of a matrix (`mode="rows"`) or its
336 columns (`mode="columns"`). When applied to a `Dense` kernel of shape
337 `(input_dim, units)`, rows mode will seek to make the feature vectors
338 (i.e. the basis of the output space) orthogonal to each other.
340 Arguments:
341 factor: Float. The regularization factor. The regularization penalty will
342 be proportional to `factor` times the mean of the dot products between
343 the L2-normalized rows (if `mode="rows"`, or columns if
344 `mode="columns"`) of the inputs, excluding the product of each
345 row/column with itself. Defaults to 0.01.
346 mode: String, one of `{"rows", "columns"}`. Defaults to `"rows"`. In rows
347 mode, the regularization effect seeks to make the rows of the input
348 orthogonal to each other. In columns mode, it seeks to make the columns
349 of the input orthogonal to each other.
351 Example:
353 >>> regularizer = tf.keras.regularizers.OrthogonalRegularizer(factor=0.01)
354 >>> layer = tf.keras.layers.Dense(units=4, kernel_regularizer=regularizer)
355 """
357 def __init__(self, factor=0.01, mode="rows"):
358 _check_penalty_number(factor)
359 self.factor = backend.cast_to_floatx(factor)
360 if mode not in {"rows", "columns"}:
361 raise ValueError(
362 "Invalid value for argument `mode`. Expected one of "
363 f'{{"rows", "columns"}}. Received: mode={mode}'
364 )
365 self.mode = mode
367 def __call__(self, inputs):
368 if inputs.shape.rank != 2:
369 raise ValueError(
370 "Inputs to OrthogonalRegularizer must have rank 2. Received: "
371 f"inputs.shape == {inputs.shape}"
372 )
373 if self.mode == "rows":
374 inputs = tf.math.l2_normalize(inputs, axis=1)
375 product = tf.matmul(inputs, tf.transpose(inputs))
376 size = inputs.shape[0]
377 else:
378 inputs = tf.math.l2_normalize(inputs, axis=0)
379 product = tf.matmul(tf.transpose(inputs), inputs)
380 size = inputs.shape[1]
381 product_no_diagonal = product * (1.0 - tf.eye(size, dtype=inputs.dtype))
382 num_pairs = size * (size - 1.0) / 2.0
383 return (
384 self.factor
385 * 0.5
386 * tf.reduce_sum(tf.abs(product_no_diagonal))
387 / num_pairs
388 )
390 def get_config(self):
391 return {"factor": float(self.factor), "mode": self.mode}
394@keras_export("keras.regularizers.l1_l2")
395def l1_l2(l1=0.01, l2=0.01):
396 r"""Create a regularizer that applies both L1 and L2 penalties.
398 The L1 regularization penalty is computed as:
399 `loss = l1 * reduce_sum(abs(x))`
401 The L2 regularization penalty is computed as:
402 `loss = l2 * reduce_sum(square(x))`
404 Args:
405 l1: Float; L1 regularization factor.
406 l2: Float; L2 regularization factor.
408 Returns:
409 An L1L2 Regularizer with the given regularization factors.
410 """
411 return L1L2(l1=l1, l2=l2)
414# Deserialization aliases.
415l1 = L1
416l2 = L2
417orthogonal_regularizer = OrthogonalRegularizer
420@keras_export("keras.regularizers.serialize")
421def serialize(regularizer, use_legacy_format=False):
422 if use_legacy_format:
423 return legacy_serialization.serialize_keras_object(regularizer)
424 return serialize_keras_object(regularizer)
427@keras_export("keras.regularizers.deserialize")
428def deserialize(config, custom_objects=None, use_legacy_format=False):
429 if config == "l1_l2":
430 # Special case necessary since the defaults used for "l1_l2" (string)
431 # differ from those of the L1L2 class.
432 return L1L2(l1=0.01, l2=0.01)
433 if use_legacy_format:
434 return legacy_serialization.deserialize_keras_object(
435 config,
436 module_objects=globals(),
437 custom_objects=custom_objects,
438 printable_module_name="regularizer",
439 )
440 return deserialize_keras_object(
441 config,
442 module_objects=globals(),
443 custom_objects=custom_objects,
444 printable_module_name="regularizer",
445 )
448@keras_export("keras.regularizers.get")
449def get(identifier):
450 """Retrieve a regularizer instance from a config or identifier."""
451 if identifier is None:
452 return None
453 if isinstance(identifier, dict):
454 use_legacy_format = "module" not in identifier
455 return deserialize(identifier, use_legacy_format=use_legacy_format)
456 elif isinstance(identifier, str):
457 return deserialize(str(identifier))
458 elif callable(identifier):
459 return identifier
460 else:
461 raise ValueError(
462 f"Could not interpret regularizer identifier: {identifier}"
463 )