Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/numpy/lib/histograms.py: 12%
272 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-03 06:39 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-03 06:39 +0000
1"""
2Histogram-related functions
3"""
4import contextlib
5import functools
6import operator
7import warnings
9import numpy as np
10from numpy.core import overrides
12__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
14array_function_dispatch = functools.partial(
15 overrides.array_function_dispatch, module='numpy')
17# range is a keyword argument to many functions, so save the builtin so they can
18# use it.
19_range = range
22def _ptp(x):
23 """Peak-to-peak value of x.
25 This implementation avoids the problem of signed integer arrays having a
26 peak-to-peak value that cannot be represented with the array's data type.
27 This function returns an unsigned value for signed integer arrays.
28 """
29 return _unsigned_subtract(x.max(), x.min())
32def _hist_bin_sqrt(x, range):
33 """
34 Square root histogram bin estimator.
36 Bin width is inversely proportional to the data size. Used by many
37 programs for its simplicity.
39 Parameters
40 ----------
41 x : array_like
42 Input data that is to be histogrammed, trimmed to range. May not
43 be empty.
45 Returns
46 -------
47 h : An estimate of the optimal bin width for the given data.
48 """
49 del range # unused
50 return _ptp(x) / np.sqrt(x.size)
53def _hist_bin_sturges(x, range):
54 """
55 Sturges histogram bin estimator.
57 A very simplistic estimator based on the assumption of normality of
58 the data. This estimator has poor performance for non-normal data,
59 which becomes especially obvious for large data sets. The estimate
60 depends only on size of the data.
62 Parameters
63 ----------
64 x : array_like
65 Input data that is to be histogrammed, trimmed to range. May not
66 be empty.
68 Returns
69 -------
70 h : An estimate of the optimal bin width for the given data.
71 """
72 del range # unused
73 return _ptp(x) / (np.log2(x.size) + 1.0)
76def _hist_bin_rice(x, range):
77 """
78 Rice histogram bin estimator.
80 Another simple estimator with no normality assumption. It has better
81 performance for large data than Sturges, but tends to overestimate
82 the number of bins. The number of bins is proportional to the cube
83 root of data size (asymptotically optimal). The estimate depends
84 only on size of the data.
86 Parameters
87 ----------
88 x : array_like
89 Input data that is to be histogrammed, trimmed to range. May not
90 be empty.
92 Returns
93 -------
94 h : An estimate of the optimal bin width for the given data.
95 """
96 del range # unused
97 return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
100def _hist_bin_scott(x, range):
101 """
102 Scott histogram bin estimator.
104 The binwidth is proportional to the standard deviation of the data
105 and inversely proportional to the cube root of data size
106 (asymptotically optimal).
108 Parameters
109 ----------
110 x : array_like
111 Input data that is to be histogrammed, trimmed to range. May not
112 be empty.
114 Returns
115 -------
116 h : An estimate of the optimal bin width for the given data.
117 """
118 del range # unused
119 return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
122def _hist_bin_stone(x, range):
123 """
124 Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
126 The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
127 The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
128 https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
130 This paper by Stone appears to be the origination of this rule.
131 http://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
133 Parameters
134 ----------
135 x : array_like
136 Input data that is to be histogrammed, trimmed to range. May not
137 be empty.
138 range : (float, float)
139 The lower and upper range of the bins.
141 Returns
142 -------
143 h : An estimate of the optimal bin width for the given data.
144 """
146 n = x.size
147 ptp_x = _ptp(x)
148 if n <= 1 or ptp_x == 0:
149 return 0
151 def jhat(nbins):
152 hh = ptp_x / nbins
153 p_k = np.histogram(x, bins=nbins, range=range)[0] / n
154 return (2 - (n + 1) * p_k.dot(p_k)) / hh
156 nbins_upper_bound = max(100, int(np.sqrt(n)))
157 nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
158 if nbins == nbins_upper_bound:
159 warnings.warn("The number of bins estimated may be suboptimal.",
160 RuntimeWarning, stacklevel=3)
161 return ptp_x / nbins
164def _hist_bin_doane(x, range):
165 """
166 Doane's histogram bin estimator.
168 Improved version of Sturges' formula which works better for
169 non-normal data. See
170 stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
172 Parameters
173 ----------
174 x : array_like
175 Input data that is to be histogrammed, trimmed to range. May not
176 be empty.
178 Returns
179 -------
180 h : An estimate of the optimal bin width for the given data.
181 """
182 del range # unused
183 if x.size > 2:
184 sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
185 sigma = np.std(x)
186 if sigma > 0.0:
187 # These three operations add up to
188 # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
189 # but use only one temp array instead of three
190 temp = x - np.mean(x)
191 np.true_divide(temp, sigma, temp)
192 np.power(temp, 3, temp)
193 g1 = np.mean(temp)
194 return _ptp(x) / (1.0 + np.log2(x.size) +
195 np.log2(1.0 + np.absolute(g1) / sg1))
196 return 0.0
199def _hist_bin_fd(x, range):
200 """
201 The Freedman-Diaconis histogram bin estimator.
203 The Freedman-Diaconis rule uses interquartile range (IQR) to
204 estimate binwidth. It is considered a variation of the Scott rule
205 with more robustness as the IQR is less affected by outliers than
206 the standard deviation. However, the IQR depends on fewer points
207 than the standard deviation, so it is less accurate, especially for
208 long tailed distributions.
210 If the IQR is 0, this function returns 0 for the bin width.
211 Binwidth is inversely proportional to the cube root of data size
212 (asymptotically optimal).
214 Parameters
215 ----------
216 x : array_like
217 Input data that is to be histogrammed, trimmed to range. May not
218 be empty.
220 Returns
221 -------
222 h : An estimate of the optimal bin width for the given data.
223 """
224 del range # unused
225 iqr = np.subtract(*np.percentile(x, [75, 25]))
226 return 2.0 * iqr * x.size ** (-1.0 / 3.0)
229def _hist_bin_auto(x, range):
230 """
231 Histogram bin estimator that uses the minimum width of the
232 Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
233 If the bin width from the FD estimator is 0, the Sturges estimator is used.
235 The FD estimator is usually the most robust method, but its width
236 estimate tends to be too large for small `x` and bad for data with limited
237 variance. The Sturges estimator is quite good for small (<1000) datasets
238 and is the default in the R language. This method gives good off-the-shelf
239 behaviour.
241 .. versionchanged:: 1.15.0
242 If there is limited variance the IQR can be 0, which results in the
243 FD bin width being 0 too. This is not a valid bin width, so
244 ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
245 If the IQR is 0, it's unlikely any variance-based estimators will be of
246 use, so we revert to the Sturges estimator, which only uses the size of the
247 dataset in its calculation.
249 Parameters
250 ----------
251 x : array_like
252 Input data that is to be histogrammed, trimmed to range. May not
253 be empty.
255 Returns
256 -------
257 h : An estimate of the optimal bin width for the given data.
259 See Also
260 --------
261 _hist_bin_fd, _hist_bin_sturges
262 """
263 fd_bw = _hist_bin_fd(x, range)
264 sturges_bw = _hist_bin_sturges(x, range)
265 del range # unused
266 if fd_bw:
267 return min(fd_bw, sturges_bw)
268 else:
269 # limited variance, so we return a len dependent bw estimator
270 return sturges_bw
272# Private dict initialized at module load time
273_hist_bin_selectors = {'stone': _hist_bin_stone,
274 'auto': _hist_bin_auto,
275 'doane': _hist_bin_doane,
276 'fd': _hist_bin_fd,
277 'rice': _hist_bin_rice,
278 'scott': _hist_bin_scott,
279 'sqrt': _hist_bin_sqrt,
280 'sturges': _hist_bin_sturges}
283def _ravel_and_check_weights(a, weights):
284 """ Check a and weights have matching shapes, and ravel both """
285 a = np.asarray(a)
287 # Ensure that the array is a "subtractable" dtype
288 if a.dtype == np.bool_:
289 warnings.warn("Converting input from {} to {} for compatibility."
290 .format(a.dtype, np.uint8),
291 RuntimeWarning, stacklevel=3)
292 a = a.astype(np.uint8)
294 if weights is not None:
295 weights = np.asarray(weights)
296 if weights.shape != a.shape:
297 raise ValueError(
298 'weights should have the same shape as a.')
299 weights = weights.ravel()
300 a = a.ravel()
301 return a, weights
304def _get_outer_edges(a, range):
305 """
306 Determine the outer bin edges to use, from either the data or the range
307 argument
308 """
309 if range is not None:
310 first_edge, last_edge = range
311 if first_edge > last_edge:
312 raise ValueError(
313 'max must be larger than min in range parameter.')
314 if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
315 raise ValueError(
316 "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
317 elif a.size == 0:
318 # handle empty arrays. Can't determine range, so use 0-1.
319 first_edge, last_edge = 0, 1
320 else:
321 first_edge, last_edge = a.min(), a.max()
322 if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
323 raise ValueError(
324 "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
326 # expand empty range to avoid divide by zero
327 if first_edge == last_edge:
328 first_edge = first_edge - 0.5
329 last_edge = last_edge + 0.5
331 return first_edge, last_edge
334def _unsigned_subtract(a, b):
335 """
336 Subtract two values where a >= b, and produce an unsigned result
338 This is needed when finding the difference between the upper and lower
339 bound of an int16 histogram
340 """
341 # coerce to a single type
342 signed_to_unsigned = {
343 np.byte: np.ubyte,
344 np.short: np.ushort,
345 np.intc: np.uintc,
346 np.int_: np.uint,
347 np.longlong: np.ulonglong
348 }
349 dt = np.result_type(a, b)
350 try:
351 dt = signed_to_unsigned[dt.type]
352 except KeyError:
353 return np.subtract(a, b, dtype=dt)
354 else:
355 # we know the inputs are integers, and we are deliberately casting
356 # signed to unsigned
357 return np.subtract(a, b, casting='unsafe', dtype=dt)
360def _get_bin_edges(a, bins, range, weights):
361 """
362 Computes the bins used internally by `histogram`.
364 Parameters
365 ==========
366 a : ndarray
367 Ravelled data array
368 bins, range
369 Forwarded arguments from `histogram`.
370 weights : ndarray, optional
371 Ravelled weights array, or None
373 Returns
374 =======
375 bin_edges : ndarray
376 Array of bin edges
377 uniform_bins : (Number, Number, int):
378 The upper bound, lowerbound, and number of bins, used in the optimized
379 implementation of `histogram` that works on uniform bins.
380 """
381 # parse the overloaded bins argument
382 n_equal_bins = None
383 bin_edges = None
385 if isinstance(bins, str):
386 bin_name = bins
387 # if `bins` is a string for an automatic method,
388 # this will replace it with the number of bins calculated
389 if bin_name not in _hist_bin_selectors:
390 raise ValueError(
391 "{!r} is not a valid estimator for `bins`".format(bin_name))
392 if weights is not None:
393 raise TypeError("Automated estimation of the number of "
394 "bins is not supported for weighted data")
396 first_edge, last_edge = _get_outer_edges(a, range)
398 # truncate the range if needed
399 if range is not None:
400 keep = (a >= first_edge)
401 keep &= (a <= last_edge)
402 if not np.logical_and.reduce(keep):
403 a = a[keep]
405 if a.size == 0:
406 n_equal_bins = 1
407 else:
408 # Do not call selectors on empty arrays
409 width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
410 if width:
411 n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
412 else:
413 # Width can be zero for some estimators, e.g. FD when
414 # the IQR of the data is zero.
415 n_equal_bins = 1
417 elif np.ndim(bins) == 0:
418 try:
419 n_equal_bins = operator.index(bins)
420 except TypeError as e:
421 raise TypeError(
422 '`bins` must be an integer, a string, or an array') from e
423 if n_equal_bins < 1:
424 raise ValueError('`bins` must be positive, when an integer')
426 first_edge, last_edge = _get_outer_edges(a, range)
428 elif np.ndim(bins) == 1:
429 bin_edges = np.asarray(bins)
430 if np.any(bin_edges[:-1] > bin_edges[1:]):
431 raise ValueError(
432 '`bins` must increase monotonically, when an array')
434 else:
435 raise ValueError('`bins` must be 1d, when an array')
437 if n_equal_bins is not None:
438 # gh-10322 means that type resolution rules are dependent on array
439 # shapes. To avoid this causing problems, we pick a type now and stick
440 # with it throughout.
441 bin_type = np.result_type(first_edge, last_edge, a)
442 if np.issubdtype(bin_type, np.integer):
443 bin_type = np.result_type(bin_type, float)
445 # bin edges must be computed
446 bin_edges = np.linspace(
447 first_edge, last_edge, n_equal_bins + 1,
448 endpoint=True, dtype=bin_type)
449 return bin_edges, (first_edge, last_edge, n_equal_bins)
450 else:
451 return bin_edges, None
454def _search_sorted_inclusive(a, v):
455 """
456 Like `searchsorted`, but where the last item in `v` is placed on the right.
458 In the context of a histogram, this makes the last bin edge inclusive
459 """
460 return np.concatenate((
461 a.searchsorted(v[:-1], 'left'),
462 a.searchsorted(v[-1:], 'right')
463 ))
466def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
467 return (a, bins, weights)
470@array_function_dispatch(_histogram_bin_edges_dispatcher)
471def histogram_bin_edges(a, bins=10, range=None, weights=None):
472 r"""
473 Function to calculate only the edges of the bins used by the `histogram`
474 function.
476 Parameters
477 ----------
478 a : array_like
479 Input data. The histogram is computed over the flattened array.
480 bins : int or sequence of scalars or str, optional
481 If `bins` is an int, it defines the number of equal-width
482 bins in the given range (10, by default). If `bins` is a
483 sequence, it defines the bin edges, including the rightmost
484 edge, allowing for non-uniform bin widths.
486 If `bins` is a string from the list below, `histogram_bin_edges` will use
487 the method chosen to calculate the optimal bin width and
488 consequently the number of bins (see `Notes` for more detail on
489 the estimators) from the data that falls within the requested
490 range. While the bin width will be optimal for the actual data
491 in the range, the number of bins will be computed to fill the
492 entire range, including the empty portions. For visualisation,
493 using the 'auto' option is suggested. Weighted data is not
494 supported for automated bin size selection.
496 'auto'
497 Maximum of the 'sturges' and 'fd' estimators. Provides good
498 all around performance.
500 'fd' (Freedman Diaconis Estimator)
501 Robust (resilient to outliers) estimator that takes into
502 account data variability and data size.
504 'doane'
505 An improved version of Sturges' estimator that works better
506 with non-normal datasets.
508 'scott'
509 Less robust estimator that takes into account data variability
510 and data size.
512 'stone'
513 Estimator based on leave-one-out cross-validation estimate of
514 the integrated squared error. Can be regarded as a generalization
515 of Scott's rule.
517 'rice'
518 Estimator does not take variability into account, only data
519 size. Commonly overestimates number of bins required.
521 'sturges'
522 R's default method, only accounts for data size. Only
523 optimal for gaussian data and underestimates number of bins
524 for large non-gaussian datasets.
526 'sqrt'
527 Square root (of data size) estimator, used by Excel and
528 other programs for its speed and simplicity.
530 range : (float, float), optional
531 The lower and upper range of the bins. If not provided, range
532 is simply ``(a.min(), a.max())``. Values outside the range are
533 ignored. The first element of the range must be less than or
534 equal to the second. `range` affects the automatic bin
535 computation as well. While bin width is computed to be optimal
536 based on the actual data within `range`, the bin count will fill
537 the entire range including portions containing no data.
539 weights : array_like, optional
540 An array of weights, of the same shape as `a`. Each value in
541 `a` only contributes its associated weight towards the bin count
542 (instead of 1). This is currently not used by any of the bin estimators,
543 but may be in the future.
545 Returns
546 -------
547 bin_edges : array of dtype float
548 The edges to pass into `histogram`
550 See Also
551 --------
552 histogram
554 Notes
555 -----
556 The methods to estimate the optimal number of bins are well founded
557 in literature, and are inspired by the choices R provides for
558 histogram visualisation. Note that having the number of bins
559 proportional to :math:`n^{1/3}` is asymptotically optimal, which is
560 why it appears in most estimators. These are simply plug-in methods
561 that give good starting points for number of bins. In the equations
562 below, :math:`h` is the binwidth and :math:`n_h` is the number of
563 bins. All estimators that compute bin counts are recast to bin width
564 using the `ptp` of the data. The final bin count is obtained from
565 ``np.round(np.ceil(range / h))``. The final bin width is often less
566 than what is returned by the estimators below.
568 'auto' (maximum of the 'sturges' and 'fd' estimators)
569 A compromise to get a good value. For small datasets the Sturges
570 value will usually be chosen, while larger datasets will usually
571 default to FD. Avoids the overly conservative behaviour of FD
572 and Sturges for small and large datasets respectively.
573 Switchover point is usually :math:`a.size \approx 1000`.
575 'fd' (Freedman Diaconis Estimator)
576 .. math:: h = 2 \frac{IQR}{n^{1/3}}
578 The binwidth is proportional to the interquartile range (IQR)
579 and inversely proportional to cube root of a.size. Can be too
580 conservative for small datasets, but is quite good for large
581 datasets. The IQR is very robust to outliers.
583 'scott'
584 .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
586 The binwidth is proportional to the standard deviation of the
587 data and inversely proportional to cube root of ``x.size``. Can
588 be too conservative for small datasets, but is quite good for
589 large datasets. The standard deviation is not very robust to
590 outliers. Values are very similar to the Freedman-Diaconis
591 estimator in the absence of outliers.
593 'rice'
594 .. math:: n_h = 2n^{1/3}
596 The number of bins is only proportional to cube root of
597 ``a.size``. It tends to overestimate the number of bins and it
598 does not take into account data variability.
600 'sturges'
601 .. math:: n_h = \log _{2}(n) + 1
603 The number of bins is the base 2 log of ``a.size``. This
604 estimator assumes normality of data and is too conservative for
605 larger, non-normal datasets. This is the default method in R's
606 ``hist`` method.
608 'doane'
609 .. math:: n_h = 1 + \log_{2}(n) +
610 \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
612 g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
614 \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
616 An improved version of Sturges' formula that produces better
617 estimates for non-normal datasets. This estimator attempts to
618 account for the skew of the data.
620 'sqrt'
621 .. math:: n_h = \sqrt n
623 The simplest and fastest estimator. Only takes into account the
624 data size.
626 Examples
627 --------
628 >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
629 >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
630 array([0. , 0.25, 0.5 , 0.75, 1. ])
631 >>> np.histogram_bin_edges(arr, bins=2)
632 array([0. , 2.5, 5. ])
634 For consistency with histogram, an array of pre-computed bins is
635 passed through unmodified:
637 >>> np.histogram_bin_edges(arr, [1, 2])
638 array([1, 2])
640 This function allows one set of bins to be computed, and reused across
641 multiple histograms:
643 >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
644 >>> shared_bins
645 array([0., 1., 2., 3., 4., 5.])
647 >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
648 >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
649 >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
651 >>> hist_0; hist_1
652 array([1, 1, 0, 1, 0])
653 array([2, 0, 1, 1, 2])
655 Which gives more easily comparable results than using separate bins for
656 each histogram:
658 >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
659 >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
660 >>> hist_0; hist_1
661 array([1, 1, 1])
662 array([2, 1, 1, 2])
663 >>> bins_0; bins_1
664 array([0., 1., 2., 3.])
665 array([0. , 1.25, 2.5 , 3.75, 5. ])
667 """
668 a, weights = _ravel_and_check_weights(a, weights)
669 bin_edges, _ = _get_bin_edges(a, bins, range, weights)
670 return bin_edges
673def _histogram_dispatcher(
674 a, bins=None, range=None, density=None, weights=None):
675 return (a, bins, weights)
678@array_function_dispatch(_histogram_dispatcher)
679def histogram(a, bins=10, range=None, density=None, weights=None):
680 r"""
681 Compute the histogram of a dataset.
683 Parameters
684 ----------
685 a : array_like
686 Input data. The histogram is computed over the flattened array.
687 bins : int or sequence of scalars or str, optional
688 If `bins` is an int, it defines the number of equal-width
689 bins in the given range (10, by default). If `bins` is a
690 sequence, it defines a monotonically increasing array of bin edges,
691 including the rightmost edge, allowing for non-uniform bin widths.
693 .. versionadded:: 1.11.0
695 If `bins` is a string, it defines the method used to calculate the
696 optimal bin width, as defined by `histogram_bin_edges`.
698 range : (float, float), optional
699 The lower and upper range of the bins. If not provided, range
700 is simply ``(a.min(), a.max())``. Values outside the range are
701 ignored. The first element of the range must be less than or
702 equal to the second. `range` affects the automatic bin
703 computation as well. While bin width is computed to be optimal
704 based on the actual data within `range`, the bin count will fill
705 the entire range including portions containing no data.
706 weights : array_like, optional
707 An array of weights, of the same shape as `a`. Each value in
708 `a` only contributes its associated weight towards the bin count
709 (instead of 1). If `density` is True, the weights are
710 normalized, so that the integral of the density over the range
711 remains 1.
712 density : bool, optional
713 If ``False``, the result will contain the number of samples in
714 each bin. If ``True``, the result is the value of the
715 probability *density* function at the bin, normalized such that
716 the *integral* over the range is 1. Note that the sum of the
717 histogram values will not be equal to 1 unless bins of unity
718 width are chosen; it is not a probability *mass* function.
720 Returns
721 -------
722 hist : array
723 The values of the histogram. See `density` and `weights` for a
724 description of the possible semantics.
725 bin_edges : array of dtype float
726 Return the bin edges ``(length(hist)+1)``.
729 See Also
730 --------
731 histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
733 Notes
734 -----
735 All but the last (righthand-most) bin is half-open. In other words,
736 if `bins` is::
738 [1, 2, 3, 4]
740 then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
741 the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
742 *includes* 4.
745 Examples
746 --------
747 >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
748 (array([0, 2, 1]), array([0, 1, 2, 3]))
749 >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
750 (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
751 >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
752 (array([1, 4, 1]), array([0, 1, 2, 3]))
754 >>> a = np.arange(5)
755 >>> hist, bin_edges = np.histogram(a, density=True)
756 >>> hist
757 array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
758 >>> hist.sum()
759 2.4999999999999996
760 >>> np.sum(hist * np.diff(bin_edges))
761 1.0
763 .. versionadded:: 1.11.0
765 Automated Bin Selection Methods example, using 2 peak random data
766 with 2000 points:
768 >>> import matplotlib.pyplot as plt
769 >>> rng = np.random.RandomState(10) # deterministic random data
770 >>> a = np.hstack((rng.normal(size=1000),
771 ... rng.normal(loc=5, scale=2, size=1000)))
772 >>> _ = plt.hist(a, bins='auto') # arguments are passed to np.histogram
773 >>> plt.title("Histogram with 'auto' bins")
774 Text(0.5, 1.0, "Histogram with 'auto' bins")
775 >>> plt.show()
777 """
778 a, weights = _ravel_and_check_weights(a, weights)
780 bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
782 # Histogram is an integer or a float array depending on the weights.
783 if weights is None:
784 ntype = np.dtype(np.intp)
785 else:
786 ntype = weights.dtype
788 # We set a block size, as this allows us to iterate over chunks when
789 # computing histograms, to minimize memory usage.
790 BLOCK = 65536
792 # The fast path uses bincount, but that only works for certain types
793 # of weight
794 simple_weights = (
795 weights is None or
796 np.can_cast(weights.dtype, np.double) or
797 np.can_cast(weights.dtype, complex)
798 )
800 if uniform_bins is not None and simple_weights:
801 # Fast algorithm for equal bins
802 # We now convert values of a to bin indices, under the assumption of
803 # equal bin widths (which is valid here).
804 first_edge, last_edge, n_equal_bins = uniform_bins
806 # Initialize empty histogram
807 n = np.zeros(n_equal_bins, ntype)
809 # Pre-compute histogram scaling factor
810 norm_numerator = n_equal_bins
811 norm_denom = _unsigned_subtract(last_edge, first_edge)
813 # We iterate over blocks here for two reasons: the first is that for
814 # large arrays, it is actually faster (for example for a 10^8 array it
815 # is 2x as fast) and it results in a memory footprint 3x lower in the
816 # limit of large arrays.
817 for i in _range(0, len(a), BLOCK):
818 tmp_a = a[i:i+BLOCK]
819 if weights is None:
820 tmp_w = None
821 else:
822 tmp_w = weights[i:i + BLOCK]
824 # Only include values in the right range
825 keep = (tmp_a >= first_edge)
826 keep &= (tmp_a <= last_edge)
827 if not np.logical_and.reduce(keep):
828 tmp_a = tmp_a[keep]
829 if tmp_w is not None:
830 tmp_w = tmp_w[keep]
832 # This cast ensures no type promotions occur below, which gh-10322
833 # make unpredictable. Getting it wrong leads to precision errors
834 # like gh-8123.
835 tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
837 # Compute the bin indices, and for values that lie exactly on
838 # last_edge we need to subtract one
839 f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)
840 * norm_numerator)
841 indices = f_indices.astype(np.intp)
842 indices[indices == n_equal_bins] -= 1
844 # The index computation is not guaranteed to give exactly
845 # consistent results within ~1 ULP of the bin edges.
846 decrement = tmp_a < bin_edges[indices]
847 indices[decrement] -= 1
848 # The last bin includes the right edge. The other bins do not.
849 increment = ((tmp_a >= bin_edges[indices + 1])
850 & (indices != n_equal_bins - 1))
851 indices[increment] += 1
853 # We now compute the histogram using bincount
854 if ntype.kind == 'c':
855 n.real += np.bincount(indices, weights=tmp_w.real,
856 minlength=n_equal_bins)
857 n.imag += np.bincount(indices, weights=tmp_w.imag,
858 minlength=n_equal_bins)
859 else:
860 n += np.bincount(indices, weights=tmp_w,
861 minlength=n_equal_bins).astype(ntype)
862 else:
863 # Compute via cumulative histogram
864 cum_n = np.zeros(bin_edges.shape, ntype)
865 if weights is None:
866 for i in _range(0, len(a), BLOCK):
867 sa = np.sort(a[i:i+BLOCK])
868 cum_n += _search_sorted_inclusive(sa, bin_edges)
869 else:
870 zero = np.zeros(1, dtype=ntype)
871 for i in _range(0, len(a), BLOCK):
872 tmp_a = a[i:i+BLOCK]
873 tmp_w = weights[i:i+BLOCK]
874 sorting_index = np.argsort(tmp_a)
875 sa = tmp_a[sorting_index]
876 sw = tmp_w[sorting_index]
877 cw = np.concatenate((zero, sw.cumsum()))
878 bin_index = _search_sorted_inclusive(sa, bin_edges)
879 cum_n += cw[bin_index]
881 n = np.diff(cum_n)
883 if density:
884 db = np.array(np.diff(bin_edges), float)
885 return n/db/n.sum(), bin_edges
887 return n, bin_edges
890def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
891 weights=None):
892 if hasattr(sample, 'shape'): # same condition as used in histogramdd
893 yield sample
894 else:
895 yield from sample
896 with contextlib.suppress(TypeError):
897 yield from bins
898 yield weights
901@array_function_dispatch(_histogramdd_dispatcher)
902def histogramdd(sample, bins=10, range=None, density=None, weights=None):
903 """
904 Compute the multidimensional histogram of some data.
906 Parameters
907 ----------
908 sample : (N, D) array, or (N, D) array_like
909 The data to be histogrammed.
911 Note the unusual interpretation of sample when an array_like:
913 * When an array, each row is a coordinate in a D-dimensional space -
914 such as ``histogramdd(np.array([p1, p2, p3]))``.
915 * When an array_like, each element is the list of values for single
916 coordinate - such as ``histogramdd((X, Y, Z))``.
918 The first form should be preferred.
920 bins : sequence or int, optional
921 The bin specification:
923 * A sequence of arrays describing the monotonically increasing bin
924 edges along each dimension.
925 * The number of bins for each dimension (nx, ny, ... =bins)
926 * The number of bins for all dimensions (nx=ny=...=bins).
928 range : sequence, optional
929 A sequence of length D, each an optional (lower, upper) tuple giving
930 the outer bin edges to be used if the edges are not given explicitly in
931 `bins`.
932 An entry of None in the sequence results in the minimum and maximum
933 values being used for the corresponding dimension.
934 The default, None, is equivalent to passing a tuple of D None values.
935 density : bool, optional
936 If False, the default, returns the number of samples in each bin.
937 If True, returns the probability *density* function at the bin,
938 ``bin_count / sample_count / bin_volume``.
939 weights : (N,) array_like, optional
940 An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
941 Weights are normalized to 1 if density is True. If density is False,
942 the values of the returned histogram are equal to the sum of the
943 weights belonging to the samples falling into each bin.
945 Returns
946 -------
947 H : ndarray
948 The multidimensional histogram of sample x. See density and weights
949 for the different possible semantics.
950 edges : list
951 A list of D arrays describing the bin edges for each dimension.
953 See Also
954 --------
955 histogram: 1-D histogram
956 histogram2d: 2-D histogram
958 Examples
959 --------
960 >>> r = np.random.randn(100,3)
961 >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
962 >>> H.shape, edges[0].size, edges[1].size, edges[2].size
963 ((5, 8, 4), 6, 9, 5)
965 """
967 try:
968 # Sample is an ND-array.
969 N, D = sample.shape
970 except (AttributeError, ValueError):
971 # Sample is a sequence of 1D arrays.
972 sample = np.atleast_2d(sample).T
973 N, D = sample.shape
975 nbin = np.empty(D, np.intp)
976 edges = D*[None]
977 dedges = D*[None]
978 if weights is not None:
979 weights = np.asarray(weights)
981 try:
982 M = len(bins)
983 if M != D:
984 raise ValueError(
985 'The dimension of bins must be equal to the dimension of the '
986 'sample x.')
987 except TypeError:
988 # bins is an integer
989 bins = D*[bins]
991 # normalize the range argument
992 if range is None:
993 range = (None,) * D
994 elif len(range) != D:
995 raise ValueError('range argument must have one entry per dimension')
997 # Create edge arrays
998 for i in _range(D):
999 if np.ndim(bins[i]) == 0:
1000 if bins[i] < 1:
1001 raise ValueError(
1002 '`bins[{}]` must be positive, when an integer'.format(i))
1003 smin, smax = _get_outer_edges(sample[:,i], range[i])
1004 try:
1005 n = operator.index(bins[i])
1007 except TypeError as e:
1008 raise TypeError(
1009 "`bins[{}]` must be an integer, when a scalar".format(i)
1010 ) from e
1012 edges[i] = np.linspace(smin, smax, n + 1)
1013 elif np.ndim(bins[i]) == 1:
1014 edges[i] = np.asarray(bins[i])
1015 if np.any(edges[i][:-1] > edges[i][1:]):
1016 raise ValueError(
1017 '`bins[{}]` must be monotonically increasing, when an array'
1018 .format(i))
1019 else:
1020 raise ValueError(
1021 '`bins[{}]` must be a scalar or 1d array'.format(i))
1023 nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
1024 dedges[i] = np.diff(edges[i])
1026 # Compute the bin number each sample falls into.
1027 Ncount = tuple(
1028 # avoid np.digitize to work around gh-11022
1029 np.searchsorted(edges[i], sample[:, i], side='right')
1030 for i in _range(D)
1031 )
1033 # Using digitize, values that fall on an edge are put in the right bin.
1034 # For the rightmost bin, we want values equal to the right edge to be
1035 # counted in the last bin, and not as an outlier.
1036 for i in _range(D):
1037 # Find which points are on the rightmost edge.
1038 on_edge = (sample[:, i] == edges[i][-1])
1039 # Shift these points one bin to the left.
1040 Ncount[i][on_edge] -= 1
1042 # Compute the sample indices in the flattened histogram matrix.
1043 # This raises an error if the array is too large.
1044 xy = np.ravel_multi_index(Ncount, nbin)
1046 # Compute the number of repetitions in xy and assign it to the
1047 # flattened histmat.
1048 hist = np.bincount(xy, weights, minlength=nbin.prod())
1050 # Shape into a proper matrix
1051 hist = hist.reshape(nbin)
1053 # This preserves the (bad) behavior observed in gh-7845, for now.
1054 hist = hist.astype(float, casting='safe')
1056 # Remove outliers (indices 0 and -1 for each dimension).
1057 core = D*(slice(1, -1),)
1058 hist = hist[core]
1060 if density:
1061 # calculate the probability density function
1062 s = hist.sum()
1063 for i in _range(D):
1064 shape = np.ones(D, int)
1065 shape[i] = nbin[i] - 2
1066 hist = hist / dedges[i].reshape(shape)
1067 hist /= s
1069 if (hist.shape != nbin - 2).any():
1070 raise RuntimeError(
1071 "Internal Shape Error")
1072 return hist, edges