Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/metrics/regression_metrics.py: 39%
155 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Regression metrics, e.g. MAE/MSE/etc."""
17import warnings
19import tensorflow.compat.v2 as tf
21from keras.src import backend
22from keras.src.dtensor import utils as dtensor_utils
23from keras.src.losses import logcosh
24from keras.src.losses import mean_absolute_error
25from keras.src.losses import mean_absolute_percentage_error
26from keras.src.losses import mean_squared_error
27from keras.src.losses import mean_squared_logarithmic_error
28from keras.src.metrics import base_metric
29from keras.src.utils import losses_utils
30from keras.src.utils import metrics_utils
31from keras.src.utils.tf_utils import is_tensor_or_variable
33# isort: off
34from tensorflow.python.util.tf_export import keras_export
37@keras_export("keras.metrics.MeanRelativeError")
38class MeanRelativeError(base_metric.Mean):
39 """Computes the mean relative error by normalizing with the given values.
41 This metric creates two local variables, `total` and `count` that are used
42 to compute the mean relative error. This is weighted by `sample_weight`, and
43 it is ultimately returned as `mean_relative_error`: an idempotent operation
44 that simply divides `total` by `count`.
46 If `sample_weight` is `None`, weights default to 1.
47 Use `sample_weight` of 0 to mask values.
49 Args:
50 normalizer: The normalizer values with same shape as predictions.
51 name: (Optional) string name of the metric instance.
52 dtype: (Optional) data type of the metric result.
54 Standalone usage:
56 >>> m = tf.keras.metrics.MeanRelativeError(normalizer=[1, 3, 2, 3])
57 >>> m.update_state([1, 3, 2, 3], [2, 4, 6, 8])
59 >>> # metric = mean(|y_pred - y_true| / normalizer)
60 >>> # = mean([1, 1, 4, 5] / [1, 3, 2, 3]) = mean([1, 1/3, 2, 5/3])
61 >>> # = 5/4 = 1.25
62 >>> m.result().numpy()
63 1.25
65 Usage with `compile()` API:
67 ```python
68 model.compile(
69 optimizer='sgd',
70 loss='mse',
71 metrics=[tf.keras.metrics.MeanRelativeError(normalizer=[1, 3])])
72 ```
73 """
75 @dtensor_utils.inject_mesh
76 def __init__(self, normalizer, name=None, dtype=None):
77 super().__init__(name=name, dtype=dtype)
78 normalizer = tf.cast(normalizer, self._dtype)
79 self.normalizer = normalizer
81 def update_state(self, y_true, y_pred, sample_weight=None):
82 """Accumulates metric statistics.
84 Args:
85 y_true: The ground truth values.
86 y_pred: The predicted values.
87 sample_weight: Optional weighting of each example. Defaults to 1. Can
88 be a `Tensor` whose rank is either 0, or the same rank as `y_true`,
89 and must be broadcastable to `y_true`.
91 Returns:
92 Update op.
93 """
94 y_true = tf.cast(y_true, self._dtype)
95 y_pred = tf.cast(y_pred, self._dtype)
96 [
97 y_pred,
98 y_true,
99 ], sample_weight = metrics_utils.ragged_assert_compatible_and_get_flat_values( # noqa: E501
100 [y_pred, y_true], sample_weight
101 )
102 y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
103 y_pred, y_true
104 )
106 y_pred, self.normalizer = losses_utils.remove_squeezable_dimensions(
107 y_pred, self.normalizer
108 )
109 y_pred.shape.assert_is_compatible_with(y_true.shape)
110 relative_errors = tf.math.divide_no_nan(
111 tf.abs(y_true - y_pred), self.normalizer
112 )
114 return super().update_state(
115 relative_errors, sample_weight=sample_weight
116 )
118 def get_config(self):
119 n = self.normalizer
120 config = {
121 "normalizer": backend.eval(n) if is_tensor_or_variable(n) else n
122 }
123 base_config = super().get_config()
124 return dict(list(base_config.items()) + list(config.items()))
127@keras_export("keras.metrics.CosineSimilarity")
128class CosineSimilarity(base_metric.MeanMetricWrapper):
129 """Computes the cosine similarity between the labels and predictions.
131 `cosine similarity = (a . b) / ||a|| ||b||`
133 See: [Cosine Similarity](https://en.wikipedia.org/wiki/Cosine_similarity).
135 This metric keeps the average cosine similarity between `predictions` and
136 `labels` over a stream of data.
138 Args:
139 name: (Optional) string name of the metric instance.
140 dtype: (Optional) data type of the metric result.
141 axis: (Optional) Defaults to -1. The dimension along which the cosine
142 similarity is computed.
144 Standalone usage:
146 >>> # l2_norm(y_true) = [[0., 1.], [1./1.414, 1./1.414]]
147 >>> # l2_norm(y_pred) = [[1., 0.], [1./1.414, 1./1.414]]
148 >>> # l2_norm(y_true) . l2_norm(y_pred) = [[0., 0.], [0.5, 0.5]]
149 >>> # result = mean(sum(l2_norm(y_true) . l2_norm(y_pred), axis=1))
150 >>> # = ((0. + 0.) + (0.5 + 0.5)) / 2
151 >>> m = tf.keras.metrics.CosineSimilarity(axis=1)
152 >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]])
153 >>> m.result().numpy()
154 0.49999997
156 >>> m.reset_state()
157 >>> m.update_state([[0., 1.], [1., 1.]], [[1., 0.], [1., 1.]],
158 ... sample_weight=[0.3, 0.7])
159 >>> m.result().numpy()
160 0.6999999
162 Usage with `compile()` API:
164 ```python
165 model.compile(
166 optimizer='sgd',
167 loss='mse',
168 metrics=[tf.keras.metrics.CosineSimilarity(axis=1)])
169 ```
170 """
172 @dtensor_utils.inject_mesh
173 def __init__(self, name="cosine_similarity", dtype=None, axis=-1):
174 super().__init__(cosine_similarity, name, dtype=dtype, axis=axis)
177@keras_export("keras.metrics.MeanAbsoluteError")
178class MeanAbsoluteError(base_metric.MeanMetricWrapper):
179 """Computes the mean absolute error between the labels and predictions.
181 Args:
182 name: (Optional) string name of the metric instance.
183 dtype: (Optional) data type of the metric result.
185 Standalone usage:
187 >>> m = tf.keras.metrics.MeanAbsoluteError()
188 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
189 >>> m.result().numpy()
190 0.25
192 >>> m.reset_state()
193 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
194 ... sample_weight=[1, 0])
195 >>> m.result().numpy()
196 0.5
198 Usage with `compile()` API:
200 ```python
201 model.compile(
202 optimizer='sgd',
203 loss='mse',
204 metrics=[tf.keras.metrics.MeanAbsoluteError()])
205 ```
206 """
208 @dtensor_utils.inject_mesh
209 def __init__(self, name="mean_absolute_error", dtype=None):
210 super().__init__(mean_absolute_error, name, dtype=dtype)
213@keras_export("keras.metrics.MeanAbsolutePercentageError")
214class MeanAbsolutePercentageError(base_metric.MeanMetricWrapper):
215 """Computes the mean absolute percentage error between `y_true` and
216 `y_pred`.
218 Args:
219 name: (Optional) string name of the metric instance.
220 dtype: (Optional) data type of the metric result.
222 Standalone usage:
224 >>> m = tf.keras.metrics.MeanAbsolutePercentageError()
225 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
226 >>> m.result().numpy()
227 250000000.0
229 >>> m.reset_state()
230 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
231 ... sample_weight=[1, 0])
232 >>> m.result().numpy()
233 500000000.0
235 Usage with `compile()` API:
237 ```python
238 model.compile(
239 optimizer='sgd',
240 loss='mse',
241 metrics=[tf.keras.metrics.MeanAbsolutePercentageError()])
242 ```
243 """
245 @dtensor_utils.inject_mesh
246 def __init__(self, name="mean_absolute_percentage_error", dtype=None):
247 super().__init__(mean_absolute_percentage_error, name, dtype=dtype)
250@keras_export("keras.metrics.MeanSquaredError")
251class MeanSquaredError(base_metric.MeanMetricWrapper):
252 """Computes the mean squared error between `y_true` and `y_pred`.
254 Args:
255 name: (Optional) string name of the metric instance.
256 dtype: (Optional) data type of the metric result.
258 Standalone usage:
260 >>> m = tf.keras.metrics.MeanSquaredError()
261 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
262 >>> m.result().numpy()
263 0.25
265 >>> m.reset_state()
266 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
267 ... sample_weight=[1, 0])
268 >>> m.result().numpy()
269 0.5
271 Usage with `compile()` API:
273 ```python
274 model.compile(
275 optimizer='sgd',
276 loss='mse',
277 metrics=[tf.keras.metrics.MeanSquaredError()])
278 ```
279 """
281 @dtensor_utils.inject_mesh
282 def __init__(self, name="mean_squared_error", dtype=None):
283 super().__init__(mean_squared_error, name, dtype=dtype)
286@keras_export("keras.metrics.MeanSquaredLogarithmicError")
287class MeanSquaredLogarithmicError(base_metric.MeanMetricWrapper):
288 """Computes the mean squared logarithmic error between `y_true` and
289 `y_pred`.
291 Args:
292 name: (Optional) string name of the metric instance.
293 dtype: (Optional) data type of the metric result.
295 Standalone usage:
297 >>> m = tf.keras.metrics.MeanSquaredLogarithmicError()
298 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
299 >>> m.result().numpy()
300 0.12011322
302 >>> m.reset_state()
303 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
304 ... sample_weight=[1, 0])
305 >>> m.result().numpy()
306 0.24022643
308 Usage with `compile()` API:
310 ```python
311 model.compile(
312 optimizer='sgd',
313 loss='mse',
314 metrics=[tf.keras.metrics.MeanSquaredLogarithmicError()])
315 ```
316 """
318 @dtensor_utils.inject_mesh
319 def __init__(self, name="mean_squared_logarithmic_error", dtype=None):
320 super().__init__(mean_squared_logarithmic_error, name, dtype=dtype)
323@keras_export("keras.metrics.RootMeanSquaredError")
324class RootMeanSquaredError(base_metric.Mean):
325 """Computes root mean squared error metric between `y_true` and `y_pred`.
327 Standalone usage:
329 >>> m = tf.keras.metrics.RootMeanSquaredError()
330 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
331 >>> m.result().numpy()
332 0.5
334 >>> m.reset_state()
335 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
336 ... sample_weight=[1, 0])
337 >>> m.result().numpy()
338 0.70710677
340 Usage with `compile()` API:
342 ```python
343 model.compile(
344 optimizer='sgd',
345 loss='mse',
346 metrics=[tf.keras.metrics.RootMeanSquaredError()])
347 ```
348 """
350 @dtensor_utils.inject_mesh
351 def __init__(self, name="root_mean_squared_error", dtype=None):
352 super().__init__(name, dtype=dtype)
354 def update_state(self, y_true, y_pred, sample_weight=None):
355 """Accumulates root mean squared error statistics.
357 Args:
358 y_true: The ground truth values.
359 y_pred: The predicted values.
360 sample_weight: Optional weighting of each example. Defaults to 1. Can
361 be a `Tensor` whose rank is either 0, or the same rank as `y_true`,
362 and must be broadcastable to `y_true`.
364 Returns:
365 Update op.
366 """
367 y_true = tf.cast(y_true, self._dtype)
368 y_pred = tf.cast(y_pred, self._dtype)
369 y_pred, y_true = losses_utils.squeeze_or_expand_dimensions(
370 y_pred, y_true
371 )
372 error_sq = tf.math.squared_difference(y_pred, y_true)
373 return super().update_state(error_sq, sample_weight=sample_weight)
375 def result(self):
376 return tf.sqrt(tf.math.divide_no_nan(self.total, self.count))
379@keras_export("keras.metrics.LogCoshError")
380class LogCoshError(base_metric.MeanMetricWrapper):
381 """Computes the logarithm of the hyperbolic cosine of the prediction error.
383 `logcosh = log((exp(x) + exp(-x))/2)`, where x is the error (y_pred -
384 y_true)
386 Args:
387 name: (Optional) string name of the metric instance.
388 dtype: (Optional) data type of the metric result.
390 Standalone usage:
392 >>> m = tf.keras.metrics.LogCoshError()
393 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
394 >>> m.result().numpy()
395 0.10844523
397 >>> m.reset_state()
398 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
399 ... sample_weight=[1, 0])
400 >>> m.result().numpy()
401 0.21689045
403 Usage with `compile()` API:
405 ```python
406 model.compile(optimizer='sgd',
407 loss='mse',
408 metrics=[tf.keras.metrics.LogCoshError()])
409 ```
410 """
412 @dtensor_utils.inject_mesh
413 def __init__(self, name="logcosh", dtype=None):
414 super().__init__(logcosh, name, dtype=dtype)
417# Adapted from TF-Addons implementation (RSquare class).
418@keras_export("keras.metrics.R2Score")
419class R2Score(base_metric.Metric):
420 """Computes R2 score.
422 This is also called the
423 [coefficient of
424 determination](https://en.wikipedia.org/wiki/Coefficient_of_determination).
426 It indicates how close the fitted regression line
427 is to ground-truth data.
429 - The highest score possible is 1.0. It indicates that the predictors
430 perfectly accounts for variation in the target.
431 - A score of 0.0 indicates that the predictors do not
432 account for variation in the target.
433 - It can also be negative if the model is worse than random.
435 This metric can also compute the "Adjusted R2" score.
437 Args:
438 class_aggregation: Specifies how to aggregate scores corresponding to
439 different output classes (or target dimensions),
440 i.e. different dimensions on the last axis of the predictions.
441 Equivalent to `multioutput` argument in Scikit-Learn.
442 Should be one of
443 `None` (no aggregation), `"uniform_average"`,
444 `"variance_weighted_average"`.
445 num_regressors: Number of independent regressors used
446 ("Adjusted R2" score). Defaults to 0 (standard R2 score).
447 name: Optional. string name of the metric instance.
448 dtype: Optional. data type of the metric result.
450 Example:
452 >>> y_true = np.array([[1], [4], [3]], dtype=np.float32)
453 >>> y_pred = np.array([[2], [4], [4]], dtype=np.float32)
454 >>> metric = tf.keras.metrics.R2Score()
455 >>> metric.update_state(y_true, y_pred)
456 >>> result = metric.result()
457 >>> result.numpy()
458 0.57142854
459 """
461 @dtensor_utils.inject_mesh
462 def __init__(
463 self,
464 class_aggregation="uniform_average",
465 num_regressors=0,
466 name="r2_score",
467 dtype=None,
468 ):
469 super().__init__(name=name, dtype=dtype)
471 valid_class_aggregation_values = (
472 None,
473 "uniform_average",
474 "variance_weighted_average",
475 )
476 if class_aggregation not in valid_class_aggregation_values:
477 raise ValueError(
478 "Invalid value for argument `class_aggregation`. Expected "
479 f"one of {valid_class_aggregation_values}. "
480 f"Received: class_aggregation={class_aggregation}"
481 )
482 if num_regressors < 0:
483 raise ValueError(
484 "Invalid value for argument `num_regressors`. "
485 "Expected a value >= 0. "
486 f"Received: num_regressors={num_regressors}"
487 )
488 self.class_aggregation = class_aggregation
489 self.num_regressors = num_regressors
490 self.num_samples = self.add_weight(name="num_samples", dtype="int32")
491 self.built = False
493 def build(self, y_true_shape, y_pred_shape):
494 if len(y_pred_shape) != 2 or len(y_true_shape) != 2:
495 raise ValueError(
496 "R2Score expects 2D inputs with shape "
497 "(batch_size, output_dim). Received input "
498 f"shapes: y_pred.shape={y_pred_shape} and "
499 f"y_true.shape={y_true_shape}."
500 )
501 if y_pred_shape[-1] is None or y_true_shape[-1] is None:
502 raise ValueError(
503 "R2Score expects 2D inputs with shape "
504 "(batch_size, output_dim), with output_dim fully "
505 "defined (not None). Received input "
506 f"shapes: y_pred.shape={y_pred_shape} and "
507 f"y_true.shape={y_true_shape}."
508 )
509 num_classes = y_pred_shape[-1]
510 self.squared_sum = self.add_weight(
511 name="squared_sum",
512 shape=[num_classes],
513 initializer="zeros",
514 )
515 self.sum = self.add_weight(
516 name="sum",
517 shape=[num_classes],
518 initializer="zeros",
519 )
520 self.total_mse = self.add_weight(
521 name="residual",
522 shape=[num_classes],
523 initializer="zeros",
524 )
525 self.count = self.add_weight(
526 name="count",
527 shape=[num_classes],
528 initializer="zeros",
529 )
530 self.built = True
532 def update_state(self, y_true, y_pred, sample_weight=None):
533 y_true = tf.convert_to_tensor(y_true, dtype=self.dtype)
534 y_pred = tf.convert_to_tensor(y_pred, dtype=self.dtype)
535 if not self.built:
536 self.build(y_true.shape, y_pred.shape)
538 if sample_weight is None:
539 sample_weight = 1
541 sample_weight = tf.convert_to_tensor(sample_weight, dtype=self.dtype)
542 if sample_weight.shape.rank == 1:
543 # Make sure there's a features dimension
544 sample_weight = tf.expand_dims(sample_weight, axis=1)
545 sample_weight = tf.__internal__.ops.broadcast_weights(
546 weights=sample_weight, values=y_true
547 )
549 weighted_y_true = y_true * sample_weight
550 self.sum.assign_add(tf.reduce_sum(weighted_y_true, axis=0))
551 self.squared_sum.assign_add(
552 tf.reduce_sum(y_true * weighted_y_true, axis=0)
553 )
554 self.total_mse.assign_add(
555 tf.reduce_sum((y_true - y_pred) ** 2 * sample_weight, axis=0)
556 )
557 self.count.assign_add(tf.reduce_sum(sample_weight, axis=0))
558 self.num_samples.assign_add(tf.size(y_true))
560 def result(self):
561 mean = self.sum / self.count
562 total = self.squared_sum - self.sum * mean
563 raw_scores = 1 - (self.total_mse / total)
564 raw_scores = tf.where(tf.math.is_inf(raw_scores), 0.0, raw_scores)
566 if self.class_aggregation == "uniform_average":
567 r2_score = tf.reduce_mean(raw_scores)
568 elif self.class_aggregation == "variance_weighted_average":
569 weighted_sum = tf.reduce_sum(total * raw_scores)
570 sum_of_weights = tf.reduce_sum(total)
571 r2_score = weighted_sum / sum_of_weights
572 else:
573 r2_score = raw_scores
575 if self.num_regressors != 0:
576 if self.num_regressors > self.num_samples - 1:
577 warnings.warn(
578 "More independent predictors than datapoints "
579 "in adjusted R2 score. Falling back to standard R2 score.",
580 stacklevel=2,
581 )
582 elif self.num_regressors == self.num_samples - 1:
583 warnings.warn(
584 "Division by zero in Adjusted R2 score. "
585 "Falling back to standard R2 score.",
586 stacklevel=2,
587 )
588 else:
589 n = tf.cast(self.num_samples, dtype=tf.float32)
590 p = tf.cast(self.num_regressors, dtype=tf.float32)
591 num = tf.multiply(
592 tf.subtract(1.0, r2_score), tf.subtract(n, 1.0)
593 )
594 den = tf.subtract(tf.subtract(n, p), 1.0)
595 r2_score = tf.subtract(1.0, tf.divide(num, den))
596 return r2_score
598 def reset_state(self):
599 for v in self.variables:
600 v.assign(tf.zeros(v.shape))
602 def get_config(self):
603 config = {
604 "class_aggregation": self.class_aggregation,
605 "num_regressors": self.num_regressors,
606 }
607 base_config = super().get_config()
608 return {**base_config, **config}
611def cosine_similarity(y_true, y_pred, axis=-1):
612 """Computes the cosine similarity between labels and predictions.
614 Args:
615 y_true: The ground truth values.
616 y_pred: The prediction values.
617 axis: (Optional) Defaults to -1. The dimension along which the cosine
618 similarity is computed.
620 Returns:
621 Cosine similarity value.
622 """
623 y_true = tf.linalg.l2_normalize(y_true, axis=axis)
624 y_pred = tf.linalg.l2_normalize(y_pred, axis=axis)
625 return tf.reduce_sum(y_true * y_pred, axis=axis)