Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/keras/src/metrics/probabilistic_metrics.py: 87%
39 statements
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
« prev ^ index » next coverage.py v7.4.0, created at 2024-01-03 07:57 +0000
1# Copyright 2015 The TensorFlow Authors. All Rights Reserved.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14# ==============================================================================
15"""Probabilistic metrics (based on Entropy)."""
17from typing import Optional
18from typing import Union
20import tensorflow.compat.v2 as tf
22from keras.src.dtensor import utils as dtensor_utils
23from keras.src.losses import binary_crossentropy
24from keras.src.losses import categorical_crossentropy
25from keras.src.losses import kullback_leibler_divergence
26from keras.src.losses import poisson
27from keras.src.losses import sparse_categorical_crossentropy
28from keras.src.metrics import base_metric
30# isort: off
31from tensorflow.python.util.tf_export import keras_export
34@keras_export("keras.metrics.Poisson")
35class Poisson(base_metric.MeanMetricWrapper):
36 """Computes the Poisson score between `y_true` and `y_pred`.
38 🐟 🐟 🐟
40 It is defined as: `poisson_score = y_pred - y_true * log(y_pred)`.
42 Args:
43 name: (Optional) string name of the metric instance.
44 dtype: (Optional) data type of the metric result.
46 Standalone usage:
48 >>> m = tf.keras.metrics.Poisson()
49 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]])
50 >>> m.result().numpy()
51 0.49999997
53 >>> m.reset_state()
54 >>> m.update_state([[0, 1], [0, 0]], [[1, 1], [0, 0]],
55 ... sample_weight=[1, 0])
56 >>> m.result().numpy()
57 0.99999994
59 Usage with `compile()` API:
61 ```python
62 model.compile(optimizer='sgd',
63 loss='mse',
64 metrics=[tf.keras.metrics.Poisson()])
65 ```
66 """
68 @dtensor_utils.inject_mesh
69 def __init__(self, name="poisson", dtype=None):
70 super().__init__(poisson, name, dtype=dtype)
73@keras_export("keras.metrics.KLDivergence")
74class KLDivergence(base_metric.MeanMetricWrapper):
75 """Computes Kullback-Leibler divergence metric between `y_true` and
76 `y_pred`.
78 `metric = y_true * log(y_true / y_pred)`
80 Args:
81 name: (Optional) string name of the metric instance.
82 dtype: (Optional) data type of the metric result.
84 Standalone usage:
86 >>> m = tf.keras.metrics.KLDivergence()
87 >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
88 >>> m.result().numpy()
89 0.45814306
91 >>> m.reset_state()
92 >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
93 ... sample_weight=[1, 0])
94 >>> m.result().numpy()
95 0.9162892
97 Usage with `compile()` API:
99 ```python
100 model.compile(optimizer='sgd',
101 loss='mse',
102 metrics=[tf.keras.metrics.KLDivergence()])
103 ```
104 """
106 @dtensor_utils.inject_mesh
107 def __init__(self, name="kullback_leibler_divergence", dtype=None):
108 super().__init__(kullback_leibler_divergence, name, dtype=dtype)
111@keras_export("keras.metrics.BinaryCrossentropy")
112class BinaryCrossentropy(base_metric.MeanMetricWrapper):
113 """Computes the crossentropy metric between the labels and predictions.
115 This is the crossentropy metric class to be used when there are only two
116 label classes (0 and 1).
118 Args:
119 name: (Optional) string name of the metric instance.
120 dtype: (Optional) data type of the metric result.
121 from_logits: (Optional) Whether output is expected to be a logits tensor.
122 By default, we consider that output encodes a probability distribution.
123 label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
124 smoothed, meaning the confidence on label values are relaxed.
125 e.g. `label_smoothing=0.2` means that we will use a value of `0.1` for
126 label `0` and `0.9` for label `1`".
128 Standalone usage:
130 >>> m = tf.keras.metrics.BinaryCrossentropy()
131 >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]])
132 >>> m.result().numpy()
133 0.81492424
135 >>> m.reset_state()
136 >>> m.update_state([[0, 1], [0, 0]], [[0.6, 0.4], [0.4, 0.6]],
137 ... sample_weight=[1, 0])
138 >>> m.result().numpy()
139 0.9162905
141 Usage with `compile()` API:
143 ```python
144 model.compile(
145 optimizer='sgd',
146 loss='mse',
147 metrics=[tf.keras.metrics.BinaryCrossentropy()])
148 ```
149 """
151 @dtensor_utils.inject_mesh
152 def __init__(
153 self,
154 name="binary_crossentropy",
155 dtype=None,
156 from_logits=False,
157 label_smoothing=0,
158 ):
159 super().__init__(
160 binary_crossentropy,
161 name,
162 dtype=dtype,
163 from_logits=from_logits,
164 label_smoothing=label_smoothing,
165 )
168@keras_export("keras.metrics.CategoricalCrossentropy")
169class CategoricalCrossentropy(base_metric.MeanMetricWrapper):
170 """Computes the crossentropy metric between the labels and predictions.
172 This is the crossentropy metric class to be used when there are multiple
173 label classes (2 or more). Here we assume that labels are given as a
174 `one_hot` representation. eg., When labels values are [2, 0, 1],
175 `y_true` = [[0, 0, 1], [1, 0, 0], [0, 1, 0]].
177 Args:
178 name: (Optional) string name of the metric instance.
179 dtype: (Optional) data type of the metric result.
180 from_logits: (Optional) Whether output is expected to be a logits tensor.
181 By default, we consider that output encodes a probability distribution.
182 label_smoothing: (Optional) Float in [0, 1]. When > 0, label values are
183 smoothed, meaning the confidence on label values are relaxed. e.g.
184 `label_smoothing=0.2` means that we will use a value of `0.1` for label
185 `0` and `0.9` for label `1`"
186 axis: (Optional) Defaults to -1. The dimension along which entropy is
187 computed.
189 Standalone usage:
191 >>> # EPSILON = 1e-7, y = y_true, y` = y_pred
192 >>> # y` = clip_ops.clip_by_value(output, EPSILON, 1. - EPSILON)
193 >>> # y` = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
194 >>> # xent = -sum(y * log(y'), axis = -1)
195 >>> # = -((log 0.95), (log 0.1))
196 >>> # = [0.051, 2.302]
197 >>> # Reduced xent = (0.051 + 2.302) / 2
198 >>> m = tf.keras.metrics.CategoricalCrossentropy()
199 >>> m.update_state([[0, 1, 0], [0, 0, 1]],
200 ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
201 >>> m.result().numpy()
202 1.1769392
204 >>> m.reset_state()
205 >>> m.update_state([[0, 1, 0], [0, 0, 1]],
206 ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
207 ... sample_weight=tf.constant([0.3, 0.7]))
208 >>> m.result().numpy()
209 1.6271976
211 Usage with `compile()` API:
213 ```python
214 model.compile(
215 optimizer='sgd',
216 loss='mse',
217 metrics=[tf.keras.metrics.CategoricalCrossentropy()])
218 ```
219 """
221 @dtensor_utils.inject_mesh
222 def __init__(
223 self,
224 name="categorical_crossentropy",
225 dtype=None,
226 from_logits=False,
227 label_smoothing=0,
228 axis=-1,
229 ):
230 super().__init__(
231 categorical_crossentropy,
232 name,
233 dtype=dtype,
234 from_logits=from_logits,
235 label_smoothing=label_smoothing,
236 axis=axis,
237 )
240@keras_export("keras.metrics.SparseCategoricalCrossentropy")
241class SparseCategoricalCrossentropy(base_metric.MeanMetricWrapper):
242 """Computes the crossentropy metric between the labels and predictions.
244 Use this crossentropy metric when there are two or more label classes.
245 We expect labels to be provided as integers. If you want to provide labels
246 using `one-hot` representation, please use `CategoricalCrossentropy` metric.
247 There should be `# classes` floating point values per feature for `y_pred`
248 and a single floating point value per feature for `y_true`.
250 In the snippet below, there is a single floating point value per example for
251 `y_true` and `# classes` floating pointing values per example for `y_pred`.
252 The shape of `y_true` is `[batch_size]` and the shape of `y_pred` is
253 `[batch_size, num_classes]`.
255 Args:
256 name: (Optional) string name of the metric instance.
257 dtype: (Optional) data type of the metric result.
258 from_logits: (Optional) Whether output is expected to be a logits tensor.
259 By default, we consider that output encodes a probability distribution.
260 ignore_class: Optional integer. The ID of a class to be ignored during
261 metric computation. This is useful, for example, in segmentation
262 problems featuring a "void" class (commonly -1 or 255) in segmentation
263 maps. By default (`ignore_class=None`), all classes are considered.
264 axis: (Optional) Defaults to -1. The dimension along which entropy is
265 computed.
267 Standalone usage:
269 >>> # y_true = one_hot(y_true) = [[0, 1, 0], [0, 0, 1]]
270 >>> # logits = log(y_pred)
271 >>> # softmax = exp(logits) / sum(exp(logits), axis=-1)
272 >>> # softmax = [[0.05, 0.95, EPSILON], [0.1, 0.8, 0.1]]
273 >>> # xent = -sum(y * log(softmax), 1)
274 >>> # log(softmax) = [[-2.9957, -0.0513, -16.1181],
275 >>> # [-2.3026, -0.2231, -2.3026]]
276 >>> # y_true * log(softmax) = [[0, -0.0513, 0], [0, 0, -2.3026]]
277 >>> # xent = [0.0513, 2.3026]
278 >>> # Reduced xent = (0.0513 + 2.3026) / 2
279 >>> m = tf.keras.metrics.SparseCategoricalCrossentropy()
280 >>> m.update_state([1, 2],
281 ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]])
282 >>> m.result().numpy()
283 1.1769392
285 >>> m.reset_state()
286 >>> m.update_state([1, 2],
287 ... [[0.05, 0.95, 0], [0.1, 0.8, 0.1]],
288 ... sample_weight=tf.constant([0.3, 0.7]))
289 >>> m.result().numpy()
290 1.6271976
292 Usage with `compile()` API:
294 ```python
295 model.compile(
296 optimizer='sgd',
297 loss='mse',
298 metrics=[tf.keras.metrics.SparseCategoricalCrossentropy()])
299 ```
300 """
302 @dtensor_utils.inject_mesh
303 def __init__(
304 self,
305 name: str = "sparse_categorical_crossentropy",
306 dtype: Optional[Union[str, tf.dtypes.DType]] = None,
307 from_logits: bool = False,
308 ignore_class: Optional[int] = None,
309 axis: int = -1,
310 ):
311 super().__init__(
312 sparse_categorical_crossentropy,
313 name,
314 dtype=dtype,
315 from_logits=from_logits,
316 ignore_class=ignore_class,
317 axis=axis,
318 )
321_SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING = """Accumulates metric statistics.
323For sparse categorical metrics, the shapes of `y_true` and `y_pred` are
324different.
326Args:
327 y_true: Ground truth label values. shape = `[batch_size, d0, .. dN-1]` or
328 shape = `[batch_size, d0, .. dN-1, 1]`.
329 y_pred: The predicted probability values. shape = `[batch_size, d0, .. dN]`.
330 sample_weight: Optional `sample_weight` acts as a
331 coefficient for the metric. If a scalar is provided, then the metric is
332 simply scaled by the given value. If `sample_weight` is a tensor of size
333 `[batch_size]`, then the metric for each sample of the batch is rescaled
334 by the corresponding element in the `sample_weight` vector. If the shape
335 of `sample_weight` is `[batch_size, d0, .. dN-1]` (or can be broadcasted
336 to this shape), then each metric element of `y_pred` is scaled by the
337 corresponding value of `sample_weight`. (Note on `dN-1`: all metric
338 functions reduce by 1 dimension, usually the last axis (-1)).
340Returns:
341 Update op.
342"""
344SparseCategoricalCrossentropy.update_state.__doc__ = (
345 _SPARSE_CATEGORICAL_UPDATE_STATE_DOCSTRING
346)