1"""
2Histogram-related functions
3"""
4import contextlib
5import functools
6import operator
7import warnings
8
9import numpy as np
10from numpy._core import overrides
11
12__all__ = ['histogram', 'histogramdd', 'histogram_bin_edges']
13
14array_function_dispatch = functools.partial(
15 overrides.array_function_dispatch, module='numpy')
16
17# range is a keyword argument to many functions, so save the builtin so they can
18# use it.
19_range = range
20
21
22def _ptp(x):
23 """Peak-to-peak value of x.
24
25 This implementation avoids the problem of signed integer arrays having a
26 peak-to-peak value that cannot be represented with the array's data type.
27 This function returns an unsigned value for signed integer arrays.
28 """
29 return _unsigned_subtract(x.max(), x.min())
30
31
32def _hist_bin_sqrt(x, range):
33 """
34 Square root histogram bin estimator.
35
36 Bin width is inversely proportional to the data size. Used by many
37 programs for its simplicity.
38
39 Parameters
40 ----------
41 x : array_like
42 Input data that is to be histogrammed, trimmed to range. May not
43 be empty.
44
45 Returns
46 -------
47 h : An estimate of the optimal bin width for the given data.
48 """
49 del range # unused
50 return _ptp(x) / np.sqrt(x.size)
51
52
53def _hist_bin_sturges(x, range):
54 """
55 Sturges histogram bin estimator.
56
57 A very simplistic estimator based on the assumption of normality of
58 the data. This estimator has poor performance for non-normal data,
59 which becomes especially obvious for large data sets. The estimate
60 depends only on size of the data.
61
62 Parameters
63 ----------
64 x : array_like
65 Input data that is to be histogrammed, trimmed to range. May not
66 be empty.
67
68 Returns
69 -------
70 h : An estimate of the optimal bin width for the given data.
71 """
72 del range # unused
73 return _ptp(x) / (np.log2(x.size) + 1.0)
74
75
76def _hist_bin_rice(x, range):
77 """
78 Rice histogram bin estimator.
79
80 Another simple estimator with no normality assumption. It has better
81 performance for large data than Sturges, but tends to overestimate
82 the number of bins. The number of bins is proportional to the cube
83 root of data size (asymptotically optimal). The estimate depends
84 only on size of the data.
85
86 Parameters
87 ----------
88 x : array_like
89 Input data that is to be histogrammed, trimmed to range. May not
90 be empty.
91
92 Returns
93 -------
94 h : An estimate of the optimal bin width for the given data.
95 """
96 del range # unused
97 return _ptp(x) / (2.0 * x.size ** (1.0 / 3))
98
99
100def _hist_bin_scott(x, range):
101 """
102 Scott histogram bin estimator.
103
104 The binwidth is proportional to the standard deviation of the data
105 and inversely proportional to the cube root of data size
106 (asymptotically optimal).
107
108 Parameters
109 ----------
110 x : array_like
111 Input data that is to be histogrammed, trimmed to range. May not
112 be empty.
113
114 Returns
115 -------
116 h : An estimate of the optimal bin width for the given data.
117 """
118 del range # unused
119 return (24.0 * np.pi**0.5 / x.size)**(1.0 / 3.0) * np.std(x)
120
121
122def _hist_bin_stone(x, range):
123 """
124 Histogram bin estimator based on minimizing the estimated integrated squared error (ISE).
125
126 The number of bins is chosen by minimizing the estimated ISE against the unknown true distribution.
127 The ISE is estimated using cross-validation and can be regarded as a generalization of Scott's rule.
128 https://en.wikipedia.org/wiki/Histogram#Scott.27s_normal_reference_rule
129
130 This paper by Stone appears to be the origination of this rule.
131 https://digitalassets.lib.berkeley.edu/sdtr/ucb/text/34.pdf
132
133 Parameters
134 ----------
135 x : array_like
136 Input data that is to be histogrammed, trimmed to range. May not
137 be empty.
138 range : (float, float)
139 The lower and upper range of the bins.
140
141 Returns
142 -------
143 h : An estimate of the optimal bin width for the given data.
144 """
145
146 n = x.size
147 ptp_x = _ptp(x)
148 if n <= 1 or ptp_x == 0:
149 return 0
150
151 def jhat(nbins):
152 hh = ptp_x / nbins
153 p_k = np.histogram(x, bins=nbins, range=range)[0] / n
154 return (2 - (n + 1) * p_k.dot(p_k)) / hh
155
156 nbins_upper_bound = max(100, int(np.sqrt(n)))
157 nbins = min(_range(1, nbins_upper_bound + 1), key=jhat)
158 if nbins == nbins_upper_bound:
159 warnings.warn("The number of bins estimated may be suboptimal.",
160 RuntimeWarning, stacklevel=3)
161 return ptp_x / nbins
162
163
164def _hist_bin_doane(x, range):
165 """
166 Doane's histogram bin estimator.
167
168 Improved version of Sturges' formula which works better for
169 non-normal data. See
170 stats.stackexchange.com/questions/55134/doanes-formula-for-histogram-binning
171
172 Parameters
173 ----------
174 x : array_like
175 Input data that is to be histogrammed, trimmed to range. May not
176 be empty.
177
178 Returns
179 -------
180 h : An estimate of the optimal bin width for the given data.
181 """
182 del range # unused
183 if x.size > 2:
184 sg1 = np.sqrt(6.0 * (x.size - 2) / ((x.size + 1.0) * (x.size + 3)))
185 sigma = np.std(x)
186 if sigma > 0.0:
187 # These three operations add up to
188 # g1 = np.mean(((x - np.mean(x)) / sigma)**3)
189 # but use only one temp array instead of three
190 temp = x - np.mean(x)
191 np.true_divide(temp, sigma, temp)
192 np.power(temp, 3, temp)
193 g1 = np.mean(temp)
194 return _ptp(x) / (1.0 + np.log2(x.size) +
195 np.log2(1.0 + np.absolute(g1) / sg1))
196 return 0.0
197
198
199def _hist_bin_fd(x, range):
200 """
201 The Freedman-Diaconis histogram bin estimator.
202
203 The Freedman-Diaconis rule uses interquartile range (IQR) to
204 estimate binwidth. It is considered a variation of the Scott rule
205 with more robustness as the IQR is less affected by outliers than
206 the standard deviation. However, the IQR depends on fewer points
207 than the standard deviation, so it is less accurate, especially for
208 long tailed distributions.
209
210 If the IQR is 0, this function returns 0 for the bin width.
211 Binwidth is inversely proportional to the cube root of data size
212 (asymptotically optimal).
213
214 Parameters
215 ----------
216 x : array_like
217 Input data that is to be histogrammed, trimmed to range. May not
218 be empty.
219
220 Returns
221 -------
222 h : An estimate of the optimal bin width for the given data.
223 """
224 del range # unused
225 iqr = np.subtract(*np.percentile(x, [75, 25]))
226 return 2.0 * iqr * x.size ** (-1.0 / 3.0)
227
228
229def _hist_bin_auto(x, range):
230 """
231 Histogram bin estimator that uses the minimum width of the
232 Freedman-Diaconis and Sturges estimators if the FD bin width is non-zero.
233 If the bin width from the FD estimator is 0, the Sturges estimator is used.
234
235 The FD estimator is usually the most robust method, but its width
236 estimate tends to be too large for small `x` and bad for data with limited
237 variance. The Sturges estimator is quite good for small (<1000) datasets
238 and is the default in the R language. This method gives good off-the-shelf
239 behaviour.
240
241 If there is limited variance the IQR can be 0, which results in the
242 FD bin width being 0 too. This is not a valid bin width, so
243 ``np.histogram_bin_edges`` chooses 1 bin instead, which may not be optimal.
244 If the IQR is 0, it's unlikely any variance-based estimators will be of
245 use, so we revert to the Sturges estimator, which only uses the size of the
246 dataset in its calculation.
247
248 Parameters
249 ----------
250 x : array_like
251 Input data that is to be histogrammed, trimmed to range. May not
252 be empty.
253
254 Returns
255 -------
256 h : An estimate of the optimal bin width for the given data.
257
258 See Also
259 --------
260 _hist_bin_fd, _hist_bin_sturges
261 """
262 fd_bw = _hist_bin_fd(x, range)
263 sturges_bw = _hist_bin_sturges(x, range)
264 del range # unused
265 if fd_bw:
266 return min(fd_bw, sturges_bw)
267 else:
268 # limited variance, so we return a len dependent bw estimator
269 return sturges_bw
270
271# Private dict initialized at module load time
272_hist_bin_selectors = {'stone': _hist_bin_stone,
273 'auto': _hist_bin_auto,
274 'doane': _hist_bin_doane,
275 'fd': _hist_bin_fd,
276 'rice': _hist_bin_rice,
277 'scott': _hist_bin_scott,
278 'sqrt': _hist_bin_sqrt,
279 'sturges': _hist_bin_sturges}
280
281
282def _ravel_and_check_weights(a, weights):
283 """ Check a and weights have matching shapes, and ravel both """
284 a = np.asarray(a)
285
286 # Ensure that the array is a "subtractable" dtype
287 if a.dtype == np.bool:
288 warnings.warn("Converting input from {} to {} for compatibility."
289 .format(a.dtype, np.uint8),
290 RuntimeWarning, stacklevel=3)
291 a = a.astype(np.uint8)
292
293 if weights is not None:
294 weights = np.asarray(weights)
295 if weights.shape != a.shape:
296 raise ValueError(
297 'weights should have the same shape as a.')
298 weights = weights.ravel()
299 a = a.ravel()
300 return a, weights
301
302
303def _get_outer_edges(a, range):
304 """
305 Determine the outer bin edges to use, from either the data or the range
306 argument
307 """
308 if range is not None:
309 first_edge, last_edge = range
310 if first_edge > last_edge:
311 raise ValueError(
312 'max must be larger than min in range parameter.')
313 if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
314 raise ValueError(
315 "supplied range of [{}, {}] is not finite".format(first_edge, last_edge))
316 elif a.size == 0:
317 # handle empty arrays. Can't determine range, so use 0-1.
318 first_edge, last_edge = 0, 1
319 else:
320 first_edge, last_edge = a.min(), a.max()
321 if not (np.isfinite(first_edge) and np.isfinite(last_edge)):
322 raise ValueError(
323 "autodetected range of [{}, {}] is not finite".format(first_edge, last_edge))
324
325 # expand empty range to avoid divide by zero
326 if first_edge == last_edge:
327 first_edge = first_edge - 0.5
328 last_edge = last_edge + 0.5
329
330 return first_edge, last_edge
331
332
333def _unsigned_subtract(a, b):
334 """
335 Subtract two values where a >= b, and produce an unsigned result
336
337 This is needed when finding the difference between the upper and lower
338 bound of an int16 histogram
339 """
340 # coerce to a single type
341 signed_to_unsigned = {
342 np.byte: np.ubyte,
343 np.short: np.ushort,
344 np.intc: np.uintc,
345 np.int_: np.uint,
346 np.longlong: np.ulonglong
347 }
348 dt = np.result_type(a, b)
349 try:
350 unsigned_dt = signed_to_unsigned[dt.type]
351 except KeyError:
352 return np.subtract(a, b, dtype=dt)
353 else:
354 # we know the inputs are integers, and we are deliberately casting
355 # signed to unsigned. The input may be negative python integers so
356 # ensure we pass in arrays with the initial dtype (related to NEP 50).
357 return np.subtract(np.asarray(a, dtype=dt), np.asarray(b, dtype=dt),
358 casting='unsafe', dtype=unsigned_dt)
359
360
361def _get_bin_edges(a, bins, range, weights):
362 """
363 Computes the bins used internally by `histogram`.
364
365 Parameters
366 ==========
367 a : ndarray
368 Ravelled data array
369 bins, range
370 Forwarded arguments from `histogram`.
371 weights : ndarray, optional
372 Ravelled weights array, or None
373
374 Returns
375 =======
376 bin_edges : ndarray
377 Array of bin edges
378 uniform_bins : (Number, Number, int):
379 The upper bound, lowerbound, and number of bins, used in the optimized
380 implementation of `histogram` that works on uniform bins.
381 """
382 # parse the overloaded bins argument
383 n_equal_bins = None
384 bin_edges = None
385
386 if isinstance(bins, str):
387 bin_name = bins
388 # if `bins` is a string for an automatic method,
389 # this will replace it with the number of bins calculated
390 if bin_name not in _hist_bin_selectors:
391 raise ValueError(
392 "{!r} is not a valid estimator for `bins`".format(bin_name))
393 if weights is not None:
394 raise TypeError("Automated estimation of the number of "
395 "bins is not supported for weighted data")
396
397 first_edge, last_edge = _get_outer_edges(a, range)
398
399 # truncate the range if needed
400 if range is not None:
401 keep = (a >= first_edge)
402 keep &= (a <= last_edge)
403 if not np.logical_and.reduce(keep):
404 a = a[keep]
405
406 if a.size == 0:
407 n_equal_bins = 1
408 else:
409 # Do not call selectors on empty arrays
410 width = _hist_bin_selectors[bin_name](a, (first_edge, last_edge))
411 if width:
412 if np.issubdtype(a.dtype, np.integer) and width < 1:
413 width = 1
414 n_equal_bins = int(np.ceil(_unsigned_subtract(last_edge, first_edge) / width))
415 else:
416 # Width can be zero for some estimators, e.g. FD when
417 # the IQR of the data is zero.
418 n_equal_bins = 1
419
420 elif np.ndim(bins) == 0:
421 try:
422 n_equal_bins = operator.index(bins)
423 except TypeError as e:
424 raise TypeError(
425 '`bins` must be an integer, a string, or an array') from e
426 if n_equal_bins < 1:
427 raise ValueError('`bins` must be positive, when an integer')
428
429 first_edge, last_edge = _get_outer_edges(a, range)
430
431 elif np.ndim(bins) == 1:
432 bin_edges = np.asarray(bins)
433 if np.any(bin_edges[:-1] > bin_edges[1:]):
434 raise ValueError(
435 '`bins` must increase monotonically, when an array')
436
437 else:
438 raise ValueError('`bins` must be 1d, when an array')
439
440 if n_equal_bins is not None:
441 # gh-10322 means that type resolution rules are dependent on array
442 # shapes. To avoid this causing problems, we pick a type now and stick
443 # with it throughout.
444 bin_type = np.result_type(first_edge, last_edge, a)
445 if np.issubdtype(bin_type, np.integer):
446 bin_type = np.result_type(bin_type, float)
447
448 # bin edges must be computed
449 bin_edges = np.linspace(
450 first_edge, last_edge, n_equal_bins + 1,
451 endpoint=True, dtype=bin_type)
452 if np.any(bin_edges[:-1] >= bin_edges[1:]):
453 raise ValueError(
454 f'Too many bins for data range. Cannot create {n_equal_bins} '
455 f'finite-sized bins.')
456 return bin_edges, (first_edge, last_edge, n_equal_bins)
457 else:
458 return bin_edges, None
459
460
461def _search_sorted_inclusive(a, v):
462 """
463 Like `searchsorted`, but where the last item in `v` is placed on the right.
464
465 In the context of a histogram, this makes the last bin edge inclusive
466 """
467 return np.concatenate((
468 a.searchsorted(v[:-1], 'left'),
469 a.searchsorted(v[-1:], 'right')
470 ))
471
472
473def _histogram_bin_edges_dispatcher(a, bins=None, range=None, weights=None):
474 return (a, bins, weights)
475
476
477@array_function_dispatch(_histogram_bin_edges_dispatcher)
478def histogram_bin_edges(a, bins=10, range=None, weights=None):
479 r"""
480 Function to calculate only the edges of the bins used by the `histogram`
481 function.
482
483 Parameters
484 ----------
485 a : array_like
486 Input data. The histogram is computed over the flattened array.
487 bins : int or sequence of scalars or str, optional
488 If `bins` is an int, it defines the number of equal-width
489 bins in the given range (10, by default). If `bins` is a
490 sequence, it defines the bin edges, including the rightmost
491 edge, allowing for non-uniform bin widths.
492
493 If `bins` is a string from the list below, `histogram_bin_edges` will
494 use the method chosen to calculate the optimal bin width and
495 consequently the number of bins (see the Notes section for more detail
496 on the estimators) from the data that falls within the requested range.
497 While the bin width will be optimal for the actual data
498 in the range, the number of bins will be computed to fill the
499 entire range, including the empty portions. For visualisation,
500 using the 'auto' option is suggested. Weighted data is not
501 supported for automated bin size selection.
502
503 'auto'
504 Minimum bin width between the 'sturges' and 'fd' estimators.
505 Provides good all-around performance.
506
507 'fd' (Freedman Diaconis Estimator)
508 Robust (resilient to outliers) estimator that takes into
509 account data variability and data size.
510
511 'doane'
512 An improved version of Sturges' estimator that works better
513 with non-normal datasets.
514
515 'scott'
516 Less robust estimator that takes into account data variability
517 and data size.
518
519 'stone'
520 Estimator based on leave-one-out cross-validation estimate of
521 the integrated squared error. Can be regarded as a generalization
522 of Scott's rule.
523
524 'rice'
525 Estimator does not take variability into account, only data
526 size. Commonly overestimates number of bins required.
527
528 'sturges'
529 R's default method, only accounts for data size. Only
530 optimal for gaussian data and underestimates number of bins
531 for large non-gaussian datasets.
532
533 'sqrt'
534 Square root (of data size) estimator, used by Excel and
535 other programs for its speed and simplicity.
536
537 range : (float, float), optional
538 The lower and upper range of the bins. If not provided, range
539 is simply ``(a.min(), a.max())``. Values outside the range are
540 ignored. The first element of the range must be less than or
541 equal to the second. `range` affects the automatic bin
542 computation as well. While bin width is computed to be optimal
543 based on the actual data within `range`, the bin count will fill
544 the entire range including portions containing no data.
545
546 weights : array_like, optional
547 An array of weights, of the same shape as `a`. Each value in
548 `a` only contributes its associated weight towards the bin count
549 (instead of 1). This is currently not used by any of the bin estimators,
550 but may be in the future.
551
552 Returns
553 -------
554 bin_edges : array of dtype float
555 The edges to pass into `histogram`
556
557 See Also
558 --------
559 histogram
560
561 Notes
562 -----
563 The methods to estimate the optimal number of bins are well founded
564 in literature, and are inspired by the choices R provides for
565 histogram visualisation. Note that having the number of bins
566 proportional to :math:`n^{1/3}` is asymptotically optimal, which is
567 why it appears in most estimators. These are simply plug-in methods
568 that give good starting points for number of bins. In the equations
569 below, :math:`h` is the binwidth and :math:`n_h` is the number of
570 bins. All estimators that compute bin counts are recast to bin width
571 using the `ptp` of the data. The final bin count is obtained from
572 ``np.round(np.ceil(range / h))``. The final bin width is often less
573 than what is returned by the estimators below.
574
575 'auto' (minimum bin width of the 'sturges' and 'fd' estimators)
576 A compromise to get a good value. For small datasets the Sturges
577 value will usually be chosen, while larger datasets will usually
578 default to FD. Avoids the overly conservative behaviour of FD
579 and Sturges for small and large datasets respectively.
580 Switchover point is usually :math:`a.size \approx 1000`.
581
582 'fd' (Freedman Diaconis Estimator)
583 .. math:: h = 2 \frac{IQR}{n^{1/3}}
584
585 The binwidth is proportional to the interquartile range (IQR)
586 and inversely proportional to cube root of a.size. Can be too
587 conservative for small datasets, but is quite good for large
588 datasets. The IQR is very robust to outliers.
589
590 'scott'
591 .. math:: h = \sigma \sqrt[3]{\frac{24 \sqrt{\pi}}{n}}
592
593 The binwidth is proportional to the standard deviation of the
594 data and inversely proportional to cube root of ``x.size``. Can
595 be too conservative for small datasets, but is quite good for
596 large datasets. The standard deviation is not very robust to
597 outliers. Values are very similar to the Freedman-Diaconis
598 estimator in the absence of outliers.
599
600 'rice'
601 .. math:: n_h = 2n^{1/3}
602
603 The number of bins is only proportional to cube root of
604 ``a.size``. It tends to overestimate the number of bins and it
605 does not take into account data variability.
606
607 'sturges'
608 .. math:: n_h = \log _{2}(n) + 1
609
610 The number of bins is the base 2 log of ``a.size``. This
611 estimator assumes normality of data and is too conservative for
612 larger, non-normal datasets. This is the default method in R's
613 ``hist`` method.
614
615 'doane'
616 .. math:: n_h = 1 + \log_{2}(n) +
617 \log_{2}\left(1 + \frac{|g_1|}{\sigma_{g_1}}\right)
618
619 g_1 = mean\left[\left(\frac{x - \mu}{\sigma}\right)^3\right]
620
621 \sigma_{g_1} = \sqrt{\frac{6(n - 2)}{(n + 1)(n + 3)}}
622
623 An improved version of Sturges' formula that produces better
624 estimates for non-normal datasets. This estimator attempts to
625 account for the skew of the data.
626
627 'sqrt'
628 .. math:: n_h = \sqrt n
629
630 The simplest and fastest estimator. Only takes into account the
631 data size.
632
633 Additionally, if the data is of integer dtype, then the binwidth will never
634 be less than 1.
635
636 Examples
637 --------
638 >>> import numpy as np
639 >>> arr = np.array([0, 0, 0, 1, 2, 3, 3, 4, 5])
640 >>> np.histogram_bin_edges(arr, bins='auto', range=(0, 1))
641 array([0. , 0.25, 0.5 , 0.75, 1. ])
642 >>> np.histogram_bin_edges(arr, bins=2)
643 array([0. , 2.5, 5. ])
644
645 For consistency with histogram, an array of pre-computed bins is
646 passed through unmodified:
647
648 >>> np.histogram_bin_edges(arr, [1, 2])
649 array([1, 2])
650
651 This function allows one set of bins to be computed, and reused across
652 multiple histograms:
653
654 >>> shared_bins = np.histogram_bin_edges(arr, bins='auto')
655 >>> shared_bins
656 array([0., 1., 2., 3., 4., 5.])
657
658 >>> group_id = np.array([0, 1, 1, 0, 1, 1, 0, 1, 1])
659 >>> hist_0, _ = np.histogram(arr[group_id == 0], bins=shared_bins)
660 >>> hist_1, _ = np.histogram(arr[group_id == 1], bins=shared_bins)
661
662 >>> hist_0; hist_1
663 array([1, 1, 0, 1, 0])
664 array([2, 0, 1, 1, 2])
665
666 Which gives more easily comparable results than using separate bins for
667 each histogram:
668
669 >>> hist_0, bins_0 = np.histogram(arr[group_id == 0], bins='auto')
670 >>> hist_1, bins_1 = np.histogram(arr[group_id == 1], bins='auto')
671 >>> hist_0; hist_1
672 array([1, 1, 1])
673 array([2, 1, 1, 2])
674 >>> bins_0; bins_1
675 array([0., 1., 2., 3.])
676 array([0. , 1.25, 2.5 , 3.75, 5. ])
677
678 """
679 a, weights = _ravel_and_check_weights(a, weights)
680 bin_edges, _ = _get_bin_edges(a, bins, range, weights)
681 return bin_edges
682
683
684def _histogram_dispatcher(
685 a, bins=None, range=None, density=None, weights=None):
686 return (a, bins, weights)
687
688
689@array_function_dispatch(_histogram_dispatcher)
690def histogram(a, bins=10, range=None, density=None, weights=None):
691 r"""
692 Compute the histogram of a dataset.
693
694 Parameters
695 ----------
696 a : array_like
697 Input data. The histogram is computed over the flattened array.
698 bins : int or sequence of scalars or str, optional
699 If `bins` is an int, it defines the number of equal-width
700 bins in the given range (10, by default). If `bins` is a
701 sequence, it defines a monotonically increasing array of bin edges,
702 including the rightmost edge, allowing for non-uniform bin widths.
703
704 If `bins` is a string, it defines the method used to calculate the
705 optimal bin width, as defined by `histogram_bin_edges`.
706
707 range : (float, float), optional
708 The lower and upper range of the bins. If not provided, range
709 is simply ``(a.min(), a.max())``. Values outside the range are
710 ignored. The first element of the range must be less than or
711 equal to the second. `range` affects the automatic bin
712 computation as well. While bin width is computed to be optimal
713 based on the actual data within `range`, the bin count will fill
714 the entire range including portions containing no data.
715 weights : array_like, optional
716 An array of weights, of the same shape as `a`. Each value in
717 `a` only contributes its associated weight towards the bin count
718 (instead of 1). If `density` is True, the weights are
719 normalized, so that the integral of the density over the range
720 remains 1.
721 Please note that the ``dtype`` of `weights` will also become the
722 ``dtype`` of the returned accumulator (`hist`), so it must be
723 large enough to hold accumulated values as well.
724 density : bool, optional
725 If ``False``, the result will contain the number of samples in
726 each bin. If ``True``, the result is the value of the
727 probability *density* function at the bin, normalized such that
728 the *integral* over the range is 1. Note that the sum of the
729 histogram values will not be equal to 1 unless bins of unity
730 width are chosen; it is not a probability *mass* function.
731
732 Returns
733 -------
734 hist : array
735 The values of the histogram. See `density` and `weights` for a
736 description of the possible semantics. If `weights` are given,
737 ``hist.dtype`` will be taken from `weights`.
738 bin_edges : array of dtype float
739 Return the bin edges ``(length(hist)+1)``.
740
741
742 See Also
743 --------
744 histogramdd, bincount, searchsorted, digitize, histogram_bin_edges
745
746 Notes
747 -----
748 All but the last (righthand-most) bin is half-open. In other words,
749 if `bins` is::
750
751 [1, 2, 3, 4]
752
753 then the first bin is ``[1, 2)`` (including 1, but excluding 2) and
754 the second ``[2, 3)``. The last bin, however, is ``[3, 4]``, which
755 *includes* 4.
756
757
758 Examples
759 --------
760 >>> import numpy as np
761 >>> np.histogram([1, 2, 1], bins=[0, 1, 2, 3])
762 (array([0, 2, 1]), array([0, 1, 2, 3]))
763 >>> np.histogram(np.arange(4), bins=np.arange(5), density=True)
764 (array([0.25, 0.25, 0.25, 0.25]), array([0, 1, 2, 3, 4]))
765 >>> np.histogram([[1, 2, 1], [1, 0, 1]], bins=[0,1,2,3])
766 (array([1, 4, 1]), array([0, 1, 2, 3]))
767
768 >>> a = np.arange(5)
769 >>> hist, bin_edges = np.histogram(a, density=True)
770 >>> hist
771 array([0.5, 0. , 0.5, 0. , 0. , 0.5, 0. , 0.5, 0. , 0.5])
772 >>> hist.sum()
773 2.4999999999999996
774 >>> np.sum(hist * np.diff(bin_edges))
775 1.0
776
777 Automated Bin Selection Methods example, using 2 peak random data
778 with 2000 points.
779
780 .. plot::
781 :include-source:
782
783 import matplotlib.pyplot as plt
784 import numpy as np
785
786 rng = np.random.RandomState(10) # deterministic random data
787 a = np.hstack((rng.normal(size=1000),
788 rng.normal(loc=5, scale=2, size=1000)))
789 plt.hist(a, bins='auto') # arguments are passed to np.histogram
790 plt.title("Histogram with 'auto' bins")
791 plt.show()
792
793 """
794 a, weights = _ravel_and_check_weights(a, weights)
795
796 bin_edges, uniform_bins = _get_bin_edges(a, bins, range, weights)
797
798 # Histogram is an integer or a float array depending on the weights.
799 if weights is None:
800 ntype = np.dtype(np.intp)
801 else:
802 ntype = weights.dtype
803
804 # We set a block size, as this allows us to iterate over chunks when
805 # computing histograms, to minimize memory usage.
806 BLOCK = 65536
807
808 # The fast path uses bincount, but that only works for certain types
809 # of weight
810 simple_weights = (
811 weights is None or
812 np.can_cast(weights.dtype, np.double) or
813 np.can_cast(weights.dtype, complex)
814 )
815
816 if uniform_bins is not None and simple_weights:
817 # Fast algorithm for equal bins
818 # We now convert values of a to bin indices, under the assumption of
819 # equal bin widths (which is valid here).
820 first_edge, last_edge, n_equal_bins = uniform_bins
821
822 # Initialize empty histogram
823 n = np.zeros(n_equal_bins, ntype)
824
825 # Pre-compute histogram scaling factor
826 norm_numerator = n_equal_bins
827 norm_denom = _unsigned_subtract(last_edge, first_edge)
828
829 # We iterate over blocks here for two reasons: the first is that for
830 # large arrays, it is actually faster (for example for a 10^8 array it
831 # is 2x as fast) and it results in a memory footprint 3x lower in the
832 # limit of large arrays.
833 for i in _range(0, len(a), BLOCK):
834 tmp_a = a[i:i+BLOCK]
835 if weights is None:
836 tmp_w = None
837 else:
838 tmp_w = weights[i:i + BLOCK]
839
840 # Only include values in the right range
841 keep = (tmp_a >= first_edge)
842 keep &= (tmp_a <= last_edge)
843 if not np.logical_and.reduce(keep):
844 tmp_a = tmp_a[keep]
845 if tmp_w is not None:
846 tmp_w = tmp_w[keep]
847
848 # This cast ensures no type promotions occur below, which gh-10322
849 # make unpredictable. Getting it wrong leads to precision errors
850 # like gh-8123.
851 tmp_a = tmp_a.astype(bin_edges.dtype, copy=False)
852
853 # Compute the bin indices, and for values that lie exactly on
854 # last_edge we need to subtract one
855 f_indices = ((_unsigned_subtract(tmp_a, first_edge) / norm_denom)
856 * norm_numerator)
857 indices = f_indices.astype(np.intp)
858 indices[indices == n_equal_bins] -= 1
859
860 # The index computation is not guaranteed to give exactly
861 # consistent results within ~1 ULP of the bin edges.
862 decrement = tmp_a < bin_edges[indices]
863 indices[decrement] -= 1
864 # The last bin includes the right edge. The other bins do not.
865 increment = ((tmp_a >= bin_edges[indices + 1])
866 & (indices != n_equal_bins - 1))
867 indices[increment] += 1
868
869 # We now compute the histogram using bincount
870 if ntype.kind == 'c':
871 n.real += np.bincount(indices, weights=tmp_w.real,
872 minlength=n_equal_bins)
873 n.imag += np.bincount(indices, weights=tmp_w.imag,
874 minlength=n_equal_bins)
875 else:
876 n += np.bincount(indices, weights=tmp_w,
877 minlength=n_equal_bins).astype(ntype)
878 else:
879 # Compute via cumulative histogram
880 cum_n = np.zeros(bin_edges.shape, ntype)
881 if weights is None:
882 for i in _range(0, len(a), BLOCK):
883 sa = np.sort(a[i:i+BLOCK])
884 cum_n += _search_sorted_inclusive(sa, bin_edges)
885 else:
886 zero = np.zeros(1, dtype=ntype)
887 for i in _range(0, len(a), BLOCK):
888 tmp_a = a[i:i+BLOCK]
889 tmp_w = weights[i:i+BLOCK]
890 sorting_index = np.argsort(tmp_a)
891 sa = tmp_a[sorting_index]
892 sw = tmp_w[sorting_index]
893 cw = np.concatenate((zero, sw.cumsum()))
894 bin_index = _search_sorted_inclusive(sa, bin_edges)
895 cum_n += cw[bin_index]
896
897 n = np.diff(cum_n)
898
899 if density:
900 db = np.array(np.diff(bin_edges), float)
901 return n/db/n.sum(), bin_edges
902
903 return n, bin_edges
904
905
906def _histogramdd_dispatcher(sample, bins=None, range=None, density=None,
907 weights=None):
908 if hasattr(sample, 'shape'): # same condition as used in histogramdd
909 yield sample
910 else:
911 yield from sample
912 with contextlib.suppress(TypeError):
913 yield from bins
914 yield weights
915
916
917@array_function_dispatch(_histogramdd_dispatcher)
918def histogramdd(sample, bins=10, range=None, density=None, weights=None):
919 """
920 Compute the multidimensional histogram of some data.
921
922 Parameters
923 ----------
924 sample : (N, D) array, or (N, D) array_like
925 The data to be histogrammed.
926
927 Note the unusual interpretation of sample when an array_like:
928
929 * When an array, each row is a coordinate in a D-dimensional space -
930 such as ``histogramdd(np.array([p1, p2, p3]))``.
931 * When an array_like, each element is the list of values for single
932 coordinate - such as ``histogramdd((X, Y, Z))``.
933
934 The first form should be preferred.
935
936 bins : sequence or int, optional
937 The bin specification:
938
939 * A sequence of arrays describing the monotonically increasing bin
940 edges along each dimension.
941 * The number of bins for each dimension (nx, ny, ... =bins)
942 * The number of bins for all dimensions (nx=ny=...=bins).
943
944 range : sequence, optional
945 A sequence of length D, each an optional (lower, upper) tuple giving
946 the outer bin edges to be used if the edges are not given explicitly in
947 `bins`.
948 An entry of None in the sequence results in the minimum and maximum
949 values being used for the corresponding dimension.
950 The default, None, is equivalent to passing a tuple of D None values.
951 density : bool, optional
952 If False, the default, returns the number of samples in each bin.
953 If True, returns the probability *density* function at the bin,
954 ``bin_count / sample_count / bin_volume``.
955 weights : (N,) array_like, optional
956 An array of values `w_i` weighing each sample `(x_i, y_i, z_i, ...)`.
957 Weights are normalized to 1 if density is True. If density is False,
958 the values of the returned histogram are equal to the sum of the
959 weights belonging to the samples falling into each bin.
960
961 Returns
962 -------
963 H : ndarray
964 The multidimensional histogram of sample x. See density and weights
965 for the different possible semantics.
966 edges : tuple of ndarrays
967 A tuple of D arrays describing the bin edges for each dimension.
968
969 See Also
970 --------
971 histogram: 1-D histogram
972 histogram2d: 2-D histogram
973
974 Examples
975 --------
976 >>> import numpy as np
977 >>> rng = np.random.default_rng()
978 >>> r = rng.normal(size=(100,3))
979 >>> H, edges = np.histogramdd(r, bins = (5, 8, 4))
980 >>> H.shape, edges[0].size, edges[1].size, edges[2].size
981 ((5, 8, 4), 6, 9, 5)
982
983 """
984
985 try:
986 # Sample is an ND-array.
987 N, D = sample.shape
988 except (AttributeError, ValueError):
989 # Sample is a sequence of 1D arrays.
990 sample = np.atleast_2d(sample).T
991 N, D = sample.shape
992
993 nbin = np.empty(D, np.intp)
994 edges = D*[None]
995 dedges = D*[None]
996 if weights is not None:
997 weights = np.asarray(weights)
998
999 try:
1000 M = len(bins)
1001 if M != D:
1002 raise ValueError(
1003 'The dimension of bins must be equal to the dimension of the '
1004 'sample x.')
1005 except TypeError:
1006 # bins is an integer
1007 bins = D*[bins]
1008
1009 # normalize the range argument
1010 if range is None:
1011 range = (None,) * D
1012 elif len(range) != D:
1013 raise ValueError('range argument must have one entry per dimension')
1014
1015 # Create edge arrays
1016 for i in _range(D):
1017 if np.ndim(bins[i]) == 0:
1018 if bins[i] < 1:
1019 raise ValueError(
1020 '`bins[{}]` must be positive, when an integer'.format(i))
1021 smin, smax = _get_outer_edges(sample[:,i], range[i])
1022 try:
1023 n = operator.index(bins[i])
1024
1025 except TypeError as e:
1026 raise TypeError(
1027 "`bins[{}]` must be an integer, when a scalar".format(i)
1028 ) from e
1029
1030 edges[i] = np.linspace(smin, smax, n + 1)
1031 elif np.ndim(bins[i]) == 1:
1032 edges[i] = np.asarray(bins[i])
1033 if np.any(edges[i][:-1] > edges[i][1:]):
1034 raise ValueError(
1035 '`bins[{}]` must be monotonically increasing, when an array'
1036 .format(i))
1037 else:
1038 raise ValueError(
1039 '`bins[{}]` must be a scalar or 1d array'.format(i))
1040
1041 nbin[i] = len(edges[i]) + 1 # includes an outlier on each end
1042 dedges[i] = np.diff(edges[i])
1043
1044 # Compute the bin number each sample falls into.
1045 Ncount = tuple(
1046 # avoid np.digitize to work around gh-11022
1047 np.searchsorted(edges[i], sample[:, i], side='right')
1048 for i in _range(D)
1049 )
1050
1051 # Using digitize, values that fall on an edge are put in the right bin.
1052 # For the rightmost bin, we want values equal to the right edge to be
1053 # counted in the last bin, and not as an outlier.
1054 for i in _range(D):
1055 # Find which points are on the rightmost edge.
1056 on_edge = (sample[:, i] == edges[i][-1])
1057 # Shift these points one bin to the left.
1058 Ncount[i][on_edge] -= 1
1059
1060 # Compute the sample indices in the flattened histogram matrix.
1061 # This raises an error if the array is too large.
1062 xy = np.ravel_multi_index(Ncount, nbin)
1063
1064 # Compute the number of repetitions in xy and assign it to the
1065 # flattened histmat.
1066 hist = np.bincount(xy, weights, minlength=nbin.prod())
1067
1068 # Shape into a proper matrix
1069 hist = hist.reshape(nbin)
1070
1071 # This preserves the (bad) behavior observed in gh-7845, for now.
1072 hist = hist.astype(float, casting='safe')
1073
1074 # Remove outliers (indices 0 and -1 for each dimension).
1075 core = D*(slice(1, -1),)
1076 hist = hist[core]
1077
1078 if density:
1079 # calculate the probability density function
1080 s = hist.sum()
1081 for i in _range(D):
1082 shape = np.ones(D, int)
1083 shape[i] = nbin[i] - 2
1084 hist = hist / dedges[i].reshape(shape)
1085 hist /= s
1086
1087 if (hist.shape != nbin - 2).any():
1088 raise RuntimeError(
1089 "Internal Shape Error")
1090 return hist, edges