Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/metrics/accuracy_metrics.py: 72%
69 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Accuracy metrics."""
17import tensorflow.compat.v2 as tf
19from keras.src import backend
20from keras.src.dtensor import utils as dtensor_utils
21from keras.src.metrics import base_metric
22from keras.src.utils import metrics_utils
24# isort: off
25from tensorflow.python.util.tf_export import keras_export
28@keras_export("keras.metrics.Accuracy")
29class Accuracy(base_metric.MeanMetricWrapper):
30 """Calculates how often predictions equal labels.
32 This metric creates two local variables, `total` and `count` that are used
33 to compute the frequency with which `y_pred` matches `y_true`. This
34 frequency is ultimately returned as `binary accuracy`: an idempotent
35 operation that simply divides `total` by `count`.
37 If `sample_weight` is `None`, weights default to 1.
38 Use `sample_weight` of 0 to mask values.
40 Args:
41 name: (Optional) string name of the metric instance.
42 dtype: (Optional) data type of the metric result.
44 Standalone usage:
46 >>> m = tf.keras.metrics.Accuracy()
47 >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]])
48 >>> m.result().numpy()
49 0.75
51 >>> m.reset_state()
52 >>> m.update_state([[1], [2], [3], [4]], [[0], [2], [3], [4]],
53 ... sample_weight=[1, 1, 0, 0])
54 >>> m.result().numpy()
55 0.5
57 Usage with `compile()` API:
59 ```python
60 model.compile(optimizer='sgd',
61 loss='mse',
62 metrics=[tf.keras.metrics.Accuracy()])
63 ```
64 """
66 @dtensor_utils.inject_mesh
67 def __init__(self, name="accuracy", dtype=None):
68 super().__init__(accuracy, name, dtype=dtype)
71@keras_export("keras.metrics.BinaryAccuracy")
72class BinaryAccuracy(base_metric.MeanMetricWrapper):
73 """Calculates how often predictions match binary labels.
75 This metric creates two local variables, `total` and `count` that are used
76 to compute the frequency with which `y_pred` matches `y_true`. This
77 frequency is ultimately returned as `binary accuracy`: an idempotent
78 operation that simply divides `total` by `count`.
80 If `sample_weight` is `None`, weights default to 1.
81 Use `sample_weight` of 0 to mask values.
83 Args:
84 name: (Optional) string name of the metric instance.
85 dtype: (Optional) data type of the metric result.
86 threshold: (Optional) Float representing the threshold for deciding
87 whether prediction values are 1 or 0.
89 Standalone usage:
91 >>> m = tf.keras.metrics.BinaryAccuracy()
92 >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]])
93 >>> m.result().numpy()
94 0.75
96 >>> m.reset_state()
97 >>> m.update_state([[1], [1], [0], [0]], [[0.98], [1], [0], [0.6]],
98 ... sample_weight=[1, 0, 0, 1])
99 >>> m.result().numpy()
100 0.5
102 Usage with `compile()` API:
104 ```python
105 model.compile(optimizer='sgd',
106 loss='mse',
107 metrics=[tf.keras.metrics.BinaryAccuracy()])
108 ```
109 """
111 @dtensor_utils.inject_mesh
112 def __init__(self, name="binary_accuracy", dtype=None, threshold=0.5):
113 super().__init__(
114 metrics_utils.binary_matches, name, dtype=dtype, threshold=threshold
115 )
118@keras_export("keras.metrics.CategoricalAccuracy")
119class CategoricalAccuracy(base_metric.MeanMetricWrapper):
120 """Calculates how often predictions match one-hot labels.
122 You can provide logits of classes as `y_pred`, since argmax of
123 logits and probabilities are same.
125 This metric creates two local variables, `total` and `count` that are used
126 to compute the frequency with which `y_pred` matches `y_true`. This
127 frequency is ultimately returned as `categorical accuracy`: an idempotent
128 operation that simply divides `total` by `count`.
130 `y_pred` and `y_true` should be passed in as vectors of probabilities,
131 rather than as labels. If necessary, use `tf.one_hot` to expand `y_true` as
132 a vector.
134 If `sample_weight` is `None`, weights default to 1.
135 Use `sample_weight` of 0 to mask values.
137 Args:
138 name: (Optional) string name of the metric instance.
139 dtype: (Optional) data type of the metric result.
141 Standalone usage:
143 >>> m = tf.keras.metrics.CategoricalAccuracy()
144 >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
145 ... [0.05, 0.95, 0]])
146 >>> m.result().numpy()
147 0.5
149 >>> m.reset_state()
150 >>> m.update_state([[0, 0, 1], [0, 1, 0]], [[0.1, 0.9, 0.8],
151 ... [0.05, 0.95, 0]],
152 ... sample_weight=[0.7, 0.3])
153 >>> m.result().numpy()
154 0.3
156 Usage with `compile()` API:
158 ```python
159 model.compile(
160 optimizer='sgd',
161 loss='mse',
162 metrics=[tf.keras.metrics.CategoricalAccuracy()])
163 ```
164 """
166 @dtensor_utils.inject_mesh
167 def __init__(self, name="categorical_accuracy", dtype=None):
168 super().__init__(
169 lambda y_true, y_pred: metrics_utils.sparse_categorical_matches(
170 tf.math.argmax(y_true, axis=-1), y_pred
171 ),
172 name,
173 dtype=dtype,
174 )
177@keras_export("keras.metrics.SparseCategoricalAccuracy")
178class SparseCategoricalAccuracy(base_metric.MeanMetricWrapper):
179 """Calculates how often predictions match integer labels.
181 ```python
182 acc = np.dot(sample_weight, np.equal(y_true, np.argmax(y_pred, axis=1))
183 ```
185 You can provide logits of classes as `y_pred`, since argmax of
186 logits and probabilities are same.
188 This metric creates two local variables, `total` and `count` that are used
189 to compute the frequency with which `y_pred` matches `y_true`. This
190 frequency is ultimately returned as `sparse categorical accuracy`: an
191 idempotent operation that simply divides `total` by `count`.
193 If `sample_weight` is `None`, weights default to 1.
194 Use `sample_weight` of 0 to mask values.
196 Args:
197 name: (Optional) string name of the metric instance.
198 dtype: (Optional) data type of the metric result.
200 Standalone usage:
202 >>> m = tf.keras.metrics.SparseCategoricalAccuracy()
203 >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]])
204 >>> m.result().numpy()
205 0.5
207 >>> m.reset_state()
208 >>> m.update_state([[2], [1]], [[0.1, 0.6, 0.3], [0.05, 0.95, 0]],
209 ... sample_weight=[0.7, 0.3])
210 >>> m.result().numpy()
211 0.3
213 Usage with `compile()` API:
215 ```python
216 model.compile(
217 optimizer='sgd',
218 loss='mse',
219 metrics=[tf.keras.metrics.SparseCategoricalAccuracy()])
220 ```
221 """
223 @dtensor_utils.inject_mesh
224 def __init__(self, name="sparse_categorical_accuracy", dtype=None):
225 super().__init__(
226 metrics_utils.sparse_categorical_matches, name, dtype=dtype
227 )
230_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING = """Accumulates metric statistics.
232For sparse categorical metrics, the shapes of `y_true` and `y_pred` are
233different.
235Args:
236 y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or
237 shape = `[batch_size, d0, .. dN-1, 1]`.
238 y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`.
239 sample_weight: Optional `sample_weight` acts as a
240 coefficient for the metric. If a scalar is provided, then the metric is
241 simply scaled by the given value. If `sample_weight` is a tensor of size
242 `[batch_size]`, then the metric for each sample of the batch is rescaled
243 by the corresponding element in the `sample_weight` vector. If the shape
244 of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
245 to this shape), then each metric element of `y_pred` is scaled by the
246 corresponding value of `sample_weight`. (Note on `dN-1`: all metric
247 functions reduce by 1 dimension, usually the last axis (-1)).
249Returns:
250 Update op.
251"""
253SparseCategoricalAccuracy.update_state.__doc__ = (
254 _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING
255)
258@keras_export("keras.metrics.TopKCategoricalAccuracy")
259class TopKCategoricalAccuracy(base_metric.MeanMetricWrapper):
260 """Computes how often targets are in the top `K` predictions.
262 Args:
263 k: (Optional) Number of top elements to look at for computing accuracy.
264 Defaults to 5.
265 name: (Optional) string name of the metric instance.
266 dtype: (Optional) data type of the metric result.
268 Standalone usage:
270 >>> m = tf.keras.metrics.TopKCategoricalAccuracy(k=1)
271 >>> m.update_state([[0, 0, 1], [0, 1, 0]],
272 ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
273 >>> m.result().numpy()
274 0.5
276 >>> m.reset_state()
277 >>> m.update_state([[0, 0, 1], [0, 1, 0]],
278 ... [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
279 ... sample_weight=[0.7, 0.3])
280 >>> m.result().numpy()
281 0.3
283 Usage with `compile()` API:
285 ```python
286 model.compile(optimizer='sgd',
287 loss='mse',
288 metrics=[tf.keras.metrics.TopKCategoricalAccuracy()])
289 ```
290 """
292 @dtensor_utils.inject_mesh
293 def __init__(self, k=5, name="top_k_categorical_accuracy", dtype=None):
294 super().__init__(
295 lambda yt, yp, k: metrics_utils.sparse_top_k_categorical_matches(
296 tf.math.argmax(yt, axis=-1), yp, k
297 ),
298 name,
299 dtype=dtype,
300 k=k,
301 )
304@keras_export("keras.metrics.SparseTopKCategoricalAccuracy")
305class SparseTopKCategoricalAccuracy(base_metric.MeanMetricWrapper):
306 """Computes how often integer targets are in the top `K` predictions.
308 Args:
309 k: (Optional) Number of top elements to look at for computing accuracy.
310 Defaults to 5.
311 name: (Optional) string name of the metric instance.
312 dtype: (Optional) data type of the metric result.
314 Standalone usage:
316 >>> m = tf.keras.metrics.SparseTopKCategoricalAccuracy(k=1)
317 >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]])
318 >>> m.result().numpy()
319 0.5
321 >>> m.reset_state()
322 >>> m.update_state([2, 1], [[0.1, 0.9, 0.8], [0.05, 0.95, 0]],
323 ... sample_weight=[0.7, 0.3])
324 >>> m.result().numpy()
325 0.3
327 Usage with `compile()` API:
329 ```python
330 model.compile(
331 optimizer='sgd',
332 loss='mse',
333 metrics=[tf.keras.metrics.SparseTopKCategoricalAccuracy()])
334 ```
335 """
337 @dtensor_utils.inject_mesh
338 def __init__(
339 self, k=5, name="sparse_top_k_categorical_accuracy", dtype=None
340 ):
341 super().__init__(
342 metrics_utils.sparse_top_k_categorical_matches,
343 name,
344 dtype=dtype,
345 k=k,
346 )
349SparseTopKCategoricalAccuracy.update_state.__doc__ = (
350 _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING
351)
354def accuracy(y_true, y_pred):
355 [
356 y_pred,
357 y_true,
358 ], _ = metrics_utils.ragged_assert_compatible_and_get_flat_values(
359 [y_pred, y_true]
360 )
361 y_true.shape.assert_is_compatible_with(y_pred.shape)
362 if y_true.dtype != y_pred.dtype:
363 y_pred = tf.cast(y_pred, y_true.dtype)
364 return tf.cast(tf.equal(y_true, y_pred), backend.floatx())
367@keras_export("keras.metrics.binary_accuracy")
368@tf.__internal__.dispatch.add_dispatch_support
369def binary_accuracy(y_true, y_pred, threshold=0.5):
370 """Calculates how often predictions match binary labels.
372 Standalone usage:
373 >>> y_true = [[1], [1], [0], [0]]
374 >>> y_pred = [[1], [1], [0], [0]]
375 >>> m = tf.keras.metrics.binary_accuracy(y_true, y_pred)
376 >>> assert m.shape == (4,)
377 >>> m.numpy()
378 array([1., 1., 1., 1.], dtype=float32)
380 Args:
381 y_true: Ground truth values. shape = `[batch_size, d0, .. dN]`.
382 y_pred: The predicted values. shape = `[batch_size, d0, .. dN]`.
383 threshold: (Optional) Float representing the threshold for deciding
384 whether prediction values are 1 or 0.
386 Returns:
387 Binary accuracy values. shape = `[batch_size, d0, .. dN-1]`
388 """
389 # Note: calls metrics_utils.binary_matches with mean reduction. This
390 # maintains public facing binary_accuracy behavior and seperates it from the
391 # vital behavior of the binary_matches method needed in backend
392 # dependencies.
394 return tf.reduce_mean(
395 metrics_utils.binary_matches(y_true, y_pred, threshold), axis=-1
396 )
399@keras_export("keras.metrics.categorical_accuracy")
400@tf.__internal__.dispatch.add_dispatch_support
401def categorical_accuracy(y_true, y_pred):
402 """Calculates how often predictions match one-hot labels.
404 Standalone usage:
405 >>> y_true = [[0, 0, 1], [0, 1, 0]]
406 >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
407 >>> m = tf.keras.metrics.categorical_accuracy(y_true, y_pred)
408 >>> assert m.shape == (2,)
409 >>> m.numpy()
410 array([0., 1.], dtype=float32)
412 You can provide logits of classes as `y_pred`, since argmax of
413 logits and probabilities are same.
415 Args:
416 y_true: One-hot ground truth values.
417 y_pred: The prediction values.
419 Returns:
420 Categorical accuracy values.
421 """
422 # Note: wraps metrics_utils.categorical_matches. This seperates public
423 # facing categorical_accuracy behavior from the vital behavior of the
424 # categorical_matches method needed in backend dependencies.
426 return metrics_utils.sparse_categorical_matches(
427 tf.math.argmax(y_true, axis=-1), y_pred
428 )
431@keras_export("keras.metrics.sparse_categorical_accuracy")
432@tf.__internal__.dispatch.add_dispatch_support
433def sparse_categorical_accuracy(y_true, y_pred):
434 """Calculates how often predictions match integer labels.
436 Standalone usage:
437 >>> y_true = [2, 1]
438 >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
439 >>> m = tf.keras.metrics.sparse_categorical_accuracy(y_true, y_pred)
440 >>> assert m.shape == (2,)
441 >>> m.numpy()
442 array([0., 1.], dtype=float32)
444 You can provide logits of classes as `y_pred`, since argmax of
445 logits and probabilities are same.
447 Args:
448 y_true: Integer ground truth values.
449 y_pred: The prediction values.
451 Returns:
452 Sparse categorical accuracy values.
453 """
454 # Note: wraps metrics_utils.sparse_categorical_matches method and checks for
455 # squeezing to align with expected public facing behavior. This seperates
456 # public facing sparse_categorical_accuracy behavior from the vital behavior
457 # of the sparse_categorical_matches method needed in backend dependencies.
459 matches = metrics_utils.sparse_categorical_matches(y_true, y_pred)
461 # if shape is (num_samples, 1) squeeze
462 if matches.shape.ndims > 1 and matches.shape[-1] == 1:
463 matches = tf.squeeze(matches, [-1])
465 return matches
468@keras_export("keras.metrics.top_k_categorical_accuracy")
469@tf.__internal__.dispatch.add_dispatch_support
470def top_k_categorical_accuracy(y_true, y_pred, k=5):
471 """Computes how often targets are in the top `K` predictions.
473 Standalone usage:
474 >>> y_true = [[0, 0, 1], [0, 1, 0]]
475 >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
476 >>> m = tf.keras.metrics.top_k_categorical_accuracy(y_true, y_pred, k=3)
477 >>> assert m.shape == (2,)
478 >>> m.numpy()
479 array([1., 1.], dtype=float32)
481 Args:
482 y_true: The ground truth values.
483 y_pred: The prediction values.
484 k: (Optional) Number of top elements to look at for computing accuracy.
485 Defaults to 5.
487 Returns:
488 Top K categorical accuracy value.
489 """
490 # Note: wraps metrics_utils.top_k_categorical_matches. This seperates
491 # public facing top_k_categorical_accuracy behavior from the vital behavior
492 # of the top_k_categorical_matches method needed in backend dependencies.
494 return metrics_utils.sparse_top_k_categorical_matches(
495 tf.math.argmax(y_true, axis=-1), y_pred, k
496 )
499@keras_export("keras.metrics.sparse_top_k_categorical_accuracy")
500@tf.__internal__.dispatch.add_dispatch_support
501def sparse_top_k_categorical_accuracy(y_true, y_pred, k=5):
502 """Computes how often integer targets are in the top `K` predictions.
504 Standalone usage:
505 >>> y_true = [2, 1]
506 >>> y_pred = [[0.1, 0.9, 0.8], [0.05, 0.95, 0]]
507 >>> m = tf.keras.metrics.sparse_top_k_categorical_accuracy(
508 ... y_true, y_pred, k=3)
509 >>> assert m.shape == (2,)
510 >>> m.numpy()
511 array([1., 1.], dtype=float32)
513 Args:
514 y_true: tensor of true targets.
515 y_pred: tensor of predicted targets.
516 k: (Optional) Number of top elements to look at for computing accuracy.
517 Defaults to 5.
519 Returns:
520 Sparse top K categorical accuracy value.
521 """
522 # Note: wraps metrics_utils.sparse_top_k_categorical_matches. This seperates
523 # public facing sparse_top_k_categorical_accuracy behavior from the vital
524 # behavior of the sparse_top_k_categorical_matches method needed in backend
525 # dependencies.
527 return metrics_utils.sparse_top_k_categorical_matches(y_true, y_pred, k)