Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.10/site-packages/numpy/lib/_function_base_impl.py: 14%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1311 statements  

1import builtins 

2import collections.abc 

3import functools 

4import re 

5import sys 

6import warnings 

7 

8import numpy as np 

9import numpy._core.numeric as _nx 

10from numpy._core import transpose, overrides 

11from numpy._core.numeric import ( 

12 ones, zeros_like, arange, concatenate, array, asarray, asanyarray, empty, 

13 ndarray, take, dot, where, intp, integer, isscalar, absolute 

14 ) 

15from numpy._core.umath import ( 

16 pi, add, arctan2, frompyfunc, cos, less_equal, sqrt, sin, 

17 mod, exp, not_equal, subtract, minimum 

18 ) 

19from numpy._core.fromnumeric import ( 

20 ravel, nonzero, partition, mean, any, sum 

21 ) 

22from numpy._core.numerictypes import typecodes 

23from numpy.lib._twodim_base_impl import diag 

24from numpy._core.multiarray import ( 

25 _place, bincount, normalize_axis_index, _monotonicity, 

26 interp as compiled_interp, interp_complex as compiled_interp_complex 

27 ) 

28from numpy._core._multiarray_umath import _array_converter 

29from numpy._utils import set_module 

30 

31# needed in this module for compatibility 

32from numpy.lib._histograms_impl import histogram, histogramdd # noqa: F401 

33 

34 

35array_function_dispatch = functools.partial( 

36 overrides.array_function_dispatch, module='numpy') 

37 

38 

39__all__ = [ 

40 'select', 'piecewise', 'trim_zeros', 'copy', 'iterable', 'percentile', 

41 'diff', 'gradient', 'angle', 'unwrap', 'sort_complex', 'flip', 

42 'rot90', 'extract', 'place', 'vectorize', 'asarray_chkfinite', 'average', 

43 'bincount', 'digitize', 'cov', 'corrcoef', 

44 'median', 'sinc', 'hamming', 'hanning', 'bartlett', 

45 'blackman', 'kaiser', 'trapezoid', 'trapz', 'i0', 

46 'meshgrid', 'delete', 'insert', 'append', 'interp', 

47 'quantile' 

48 ] 

49 

50# _QuantileMethods is a dictionary listing all the supported methods to 

51# compute quantile/percentile. 

52# 

53# Below virtual_index refers to the index of the element where the percentile 

54# would be found in the sorted sample. 

55# When the sample contains exactly the percentile wanted, the virtual_index is 

56# an integer to the index of this element. 

57# When the percentile wanted is in between two elements, the virtual_index 

58# is made of a integer part (a.k.a 'i' or 'left') and a fractional part 

59# (a.k.a 'g' or 'gamma') 

60# 

61# Each method in _QuantileMethods has two properties 

62# get_virtual_index : Callable 

63# The function used to compute the virtual_index. 

64# fix_gamma : Callable 

65# A function used for discrete methods to force the index to a specific value. 

66_QuantileMethods = dict( 

67 # --- HYNDMAN and FAN METHODS 

68 # Discrete methods 

69 inverted_cdf=dict( 

70 get_virtual_index=lambda n, quantiles: _inverted_cdf(n, quantiles), 

71 fix_gamma=None, # should never be called 

72 ), 

73 averaged_inverted_cdf=dict( 

74 get_virtual_index=lambda n, quantiles: (n * quantiles) - 1, 

75 fix_gamma=lambda gamma, _: _get_gamma_mask( 

76 shape=gamma.shape, 

77 default_value=1., 

78 conditioned_value=0.5, 

79 where=gamma == 0), 

80 ), 

81 closest_observation=dict( 

82 get_virtual_index=lambda n, quantiles: _closest_observation(n, 

83 quantiles), 

84 fix_gamma=None, # should never be called 

85 ), 

86 # Continuous methods 

87 interpolated_inverted_cdf=dict( 

88 get_virtual_index=lambda n, quantiles: 

89 _compute_virtual_index(n, quantiles, 0, 1), 

90 fix_gamma=lambda gamma, _: gamma, 

91 ), 

92 hazen=dict( 

93 get_virtual_index=lambda n, quantiles: 

94 _compute_virtual_index(n, quantiles, 0.5, 0.5), 

95 fix_gamma=lambda gamma, _: gamma, 

96 ), 

97 weibull=dict( 

98 get_virtual_index=lambda n, quantiles: 

99 _compute_virtual_index(n, quantiles, 0, 0), 

100 fix_gamma=lambda gamma, _: gamma, 

101 ), 

102 # Default method. 

103 # To avoid some rounding issues, `(n-1) * quantiles` is preferred to 

104 # `_compute_virtual_index(n, quantiles, 1, 1)`. 

105 # They are mathematically equivalent. 

106 linear=dict( 

107 get_virtual_index=lambda n, quantiles: (n - 1) * quantiles, 

108 fix_gamma=lambda gamma, _: gamma, 

109 ), 

110 median_unbiased=dict( 

111 get_virtual_index=lambda n, quantiles: 

112 _compute_virtual_index(n, quantiles, 1 / 3.0, 1 / 3.0), 

113 fix_gamma=lambda gamma, _: gamma, 

114 ), 

115 normal_unbiased=dict( 

116 get_virtual_index=lambda n, quantiles: 

117 _compute_virtual_index(n, quantiles, 3 / 8.0, 3 / 8.0), 

118 fix_gamma=lambda gamma, _: gamma, 

119 ), 

120 # --- OTHER METHODS 

121 lower=dict( 

122 get_virtual_index=lambda n, quantiles: np.floor( 

123 (n - 1) * quantiles).astype(np.intp), 

124 fix_gamma=None, # should never be called, index dtype is int 

125 ), 

126 higher=dict( 

127 get_virtual_index=lambda n, quantiles: np.ceil( 

128 (n - 1) * quantiles).astype(np.intp), 

129 fix_gamma=None, # should never be called, index dtype is int 

130 ), 

131 midpoint=dict( 

132 get_virtual_index=lambda n, quantiles: 0.5 * ( 

133 np.floor((n - 1) * quantiles) 

134 + np.ceil((n - 1) * quantiles)), 

135 fix_gamma=lambda gamma, index: _get_gamma_mask( 

136 shape=gamma.shape, 

137 default_value=0.5, 

138 conditioned_value=0., 

139 where=index % 1 == 0), 

140 ), 

141 nearest=dict( 

142 get_virtual_index=lambda n, quantiles: np.around( 

143 (n - 1) * quantiles).astype(np.intp), 

144 fix_gamma=None, 

145 # should never be called, index dtype is int 

146 )) 

147 

148 

149def _rot90_dispatcher(m, k=None, axes=None): 

150 return (m,) 

151 

152 

153@array_function_dispatch(_rot90_dispatcher) 

154def rot90(m, k=1, axes=(0, 1)): 

155 """ 

156 Rotate an array by 90 degrees in the plane specified by axes. 

157 

158 Rotation direction is from the first towards the second axis. 

159 This means for a 2D array with the default `k` and `axes`, the 

160 rotation will be counterclockwise. 

161 

162 Parameters 

163 ---------- 

164 m : array_like 

165 Array of two or more dimensions. 

166 k : integer 

167 Number of times the array is rotated by 90 degrees. 

168 axes : (2,) array_like 

169 The array is rotated in the plane defined by the axes. 

170 Axes must be different. 

171 

172 Returns 

173 ------- 

174 y : ndarray 

175 A rotated view of `m`. 

176 

177 See Also 

178 -------- 

179 flip : Reverse the order of elements in an array along the given axis. 

180 fliplr : Flip an array horizontally. 

181 flipud : Flip an array vertically. 

182 

183 Notes 

184 ----- 

185 ``rot90(m, k=1, axes=(1,0))`` is the reverse of 

186 ``rot90(m, k=1, axes=(0,1))`` 

187 

188 ``rot90(m, k=1, axes=(1,0))`` is equivalent to 

189 ``rot90(m, k=-1, axes=(0,1))`` 

190 

191 Examples 

192 -------- 

193 >>> import numpy as np 

194 >>> m = np.array([[1,2],[3,4]], int) 

195 >>> m 

196 array([[1, 2], 

197 [3, 4]]) 

198 >>> np.rot90(m) 

199 array([[2, 4], 

200 [1, 3]]) 

201 >>> np.rot90(m, 2) 

202 array([[4, 3], 

203 [2, 1]]) 

204 >>> m = np.arange(8).reshape((2,2,2)) 

205 >>> np.rot90(m, 1, (1,2)) 

206 array([[[1, 3], 

207 [0, 2]], 

208 [[5, 7], 

209 [4, 6]]]) 

210 

211 """ 

212 axes = tuple(axes) 

213 if len(axes) != 2: 

214 raise ValueError("len(axes) must be 2.") 

215 

216 m = asanyarray(m) 

217 

218 if axes[0] == axes[1] or absolute(axes[0] - axes[1]) == m.ndim: 

219 raise ValueError("Axes must be different.") 

220 

221 if (axes[0] >= m.ndim or axes[0] < -m.ndim 

222 or axes[1] >= m.ndim or axes[1] < -m.ndim): 

223 raise ValueError("Axes={} out of range for array of ndim={}." 

224 .format(axes, m.ndim)) 

225 

226 k %= 4 

227 

228 if k == 0: 

229 return m[:] 

230 if k == 2: 

231 return flip(flip(m, axes[0]), axes[1]) 

232 

233 axes_list = arange(0, m.ndim) 

234 (axes_list[axes[0]], axes_list[axes[1]]) = (axes_list[axes[1]], 

235 axes_list[axes[0]]) 

236 

237 if k == 1: 

238 return transpose(flip(m, axes[1]), axes_list) 

239 else: 

240 # k == 3 

241 return flip(transpose(m, axes_list), axes[1]) 

242 

243 

244def _flip_dispatcher(m, axis=None): 

245 return (m,) 

246 

247 

248@array_function_dispatch(_flip_dispatcher) 

249def flip(m, axis=None): 

250 """ 

251 Reverse the order of elements in an array along the given axis. 

252 

253 The shape of the array is preserved, but the elements are reordered. 

254 

255 Parameters 

256 ---------- 

257 m : array_like 

258 Input array. 

259 axis : None or int or tuple of ints, optional 

260 Axis or axes along which to flip over. The default, 

261 axis=None, will flip over all of the axes of the input array. 

262 If axis is negative it counts from the last to the first axis. 

263 

264 If axis is a tuple of ints, flipping is performed on all of the axes 

265 specified in the tuple. 

266 

267 Returns 

268 ------- 

269 out : array_like 

270 A view of `m` with the entries of axis reversed. Since a view is 

271 returned, this operation is done in constant time. 

272 

273 See Also 

274 -------- 

275 flipud : Flip an array vertically (axis=0). 

276 fliplr : Flip an array horizontally (axis=1). 

277 

278 Notes 

279 ----- 

280 flip(m, 0) is equivalent to flipud(m). 

281 

282 flip(m, 1) is equivalent to fliplr(m). 

283 

284 flip(m, n) corresponds to ``m[...,::-1,...]`` with ``::-1`` at position n. 

285 

286 flip(m) corresponds to ``m[::-1,::-1,...,::-1]`` with ``::-1`` at all 

287 positions. 

288 

289 flip(m, (0, 1)) corresponds to ``m[::-1,::-1,...]`` with ``::-1`` at 

290 position 0 and position 1. 

291 

292 Examples 

293 -------- 

294 >>> import numpy as np 

295 >>> A = np.arange(8).reshape((2,2,2)) 

296 >>> A 

297 array([[[0, 1], 

298 [2, 3]], 

299 [[4, 5], 

300 [6, 7]]]) 

301 >>> np.flip(A, 0) 

302 array([[[4, 5], 

303 [6, 7]], 

304 [[0, 1], 

305 [2, 3]]]) 

306 >>> np.flip(A, 1) 

307 array([[[2, 3], 

308 [0, 1]], 

309 [[6, 7], 

310 [4, 5]]]) 

311 >>> np.flip(A) 

312 array([[[7, 6], 

313 [5, 4]], 

314 [[3, 2], 

315 [1, 0]]]) 

316 >>> np.flip(A, (0, 2)) 

317 array([[[5, 4], 

318 [7, 6]], 

319 [[1, 0], 

320 [3, 2]]]) 

321 >>> rng = np.random.default_rng() 

322 >>> A = rng.normal(size=(3,4,5)) 

323 >>> np.all(np.flip(A,2) == A[:,:,::-1,...]) 

324 True 

325 """ 

326 if not hasattr(m, 'ndim'): 

327 m = asarray(m) 

328 if axis is None: 

329 indexer = (np.s_[::-1],) * m.ndim 

330 else: 

331 axis = _nx.normalize_axis_tuple(axis, m.ndim) 

332 indexer = [np.s_[:]] * m.ndim 

333 for ax in axis: 

334 indexer[ax] = np.s_[::-1] 

335 indexer = tuple(indexer) 

336 return m[indexer] 

337 

338 

339@set_module('numpy') 

340def iterable(y): 

341 """ 

342 Check whether or not an object can be iterated over. 

343 

344 Parameters 

345 ---------- 

346 y : object 

347 Input object. 

348 

349 Returns 

350 ------- 

351 b : bool 

352 Return ``True`` if the object has an iterator method or is a 

353 sequence and ``False`` otherwise. 

354 

355 

356 Examples 

357 -------- 

358 >>> import numpy as np 

359 >>> np.iterable([1, 2, 3]) 

360 True 

361 >>> np.iterable(2) 

362 False 

363 

364 Notes 

365 ----- 

366 In most cases, the results of ``np.iterable(obj)`` are consistent with 

367 ``isinstance(obj, collections.abc.Iterable)``. One notable exception is 

368 the treatment of 0-dimensional arrays:: 

369 

370 >>> from collections.abc import Iterable 

371 >>> a = np.array(1.0) # 0-dimensional numpy array 

372 >>> isinstance(a, Iterable) 

373 True 

374 >>> np.iterable(a) 

375 False 

376 

377 """ 

378 try: 

379 iter(y) 

380 except TypeError: 

381 return False 

382 return True 

383 

384 

385def _weights_are_valid(weights, a, axis): 

386 """Validate weights array. 

387 

388 We assume, weights is not None. 

389 """ 

390 wgt = np.asanyarray(weights) 

391 

392 # Sanity checks 

393 if a.shape != wgt.shape: 

394 if axis is None: 

395 raise TypeError( 

396 "Axis must be specified when shapes of a and weights " 

397 "differ.") 

398 if wgt.shape != tuple(a.shape[ax] for ax in axis): 

399 raise ValueError( 

400 "Shape of weights must be consistent with " 

401 "shape of a along specified axis.") 

402 

403 # setup wgt to broadcast along axis 

404 wgt = wgt.transpose(np.argsort(axis)) 

405 wgt = wgt.reshape(tuple((s if ax in axis else 1) 

406 for ax, s in enumerate(a.shape))) 

407 return wgt 

408 

409 

410def _average_dispatcher(a, axis=None, weights=None, returned=None, *, 

411 keepdims=None): 

412 return (a, weights) 

413 

414 

415@array_function_dispatch(_average_dispatcher) 

416def average(a, axis=None, weights=None, returned=False, *, 

417 keepdims=np._NoValue): 

418 """ 

419 Compute the weighted average along the specified axis. 

420 

421 Parameters 

422 ---------- 

423 a : array_like 

424 Array containing data to be averaged. If `a` is not an array, a 

425 conversion is attempted. 

426 axis : None or int or tuple of ints, optional 

427 Axis or axes along which to average `a`. The default, 

428 `axis=None`, will average over all of the elements of the input array. 

429 If axis is negative it counts from the last to the first axis. 

430 If axis is a tuple of ints, averaging is performed on all of the axes 

431 specified in the tuple instead of a single axis or all the axes as 

432 before. 

433 weights : array_like, optional 

434 An array of weights associated with the values in `a`. Each value in 

435 `a` contributes to the average according to its associated weight. 

436 The array of weights must be the same shape as `a` if no axis is 

437 specified, otherwise the weights must have dimensions and shape 

438 consistent with `a` along the specified axis. 

439 If `weights=None`, then all data in `a` are assumed to have a 

440 weight equal to one. 

441 The calculation is:: 

442 

443 avg = sum(a * weights) / sum(weights) 

444 

445 where the sum is over all included elements. 

446 The only constraint on the values of `weights` is that `sum(weights)` 

447 must not be 0. 

448 returned : bool, optional 

449 Default is `False`. If `True`, the tuple (`average`, `sum_of_weights`) 

450 is returned, otherwise only the average is returned. 

451 If `weights=None`, `sum_of_weights` is equivalent to the number of 

452 elements over which the average is taken. 

453 keepdims : bool, optional 

454 If this is set to True, the axes which are reduced are left 

455 in the result as dimensions with size one. With this option, 

456 the result will broadcast correctly against the original `a`. 

457 *Note:* `keepdims` will not work with instances of `numpy.matrix` 

458 or other classes whose methods do not support `keepdims`. 

459 

460 .. versionadded:: 1.23.0 

461 

462 Returns 

463 ------- 

464 retval, [sum_of_weights] : array_type or double 

465 Return the average along the specified axis. When `returned` is `True`, 

466 return a tuple with the average as the first element and the sum 

467 of the weights as the second element. `sum_of_weights` is of the 

468 same type as `retval`. The result dtype follows a general pattern. 

469 If `weights` is None, the result dtype will be that of `a` , or ``float64`` 

470 if `a` is integral. Otherwise, if `weights` is not None and `a` is non- 

471 integral, the result type will be the type of lowest precision capable of 

472 representing values of both `a` and `weights`. If `a` happens to be 

473 integral, the previous rules still applies but the result dtype will 

474 at least be ``float64``. 

475 

476 Raises 

477 ------ 

478 ZeroDivisionError 

479 When all weights along axis are zero. See `numpy.ma.average` for a 

480 version robust to this type of error. 

481 TypeError 

482 When `weights` does not have the same shape as `a`, and `axis=None`. 

483 ValueError 

484 When `weights` does not have dimensions and shape consistent with `a` 

485 along specified `axis`. 

486 

487 See Also 

488 -------- 

489 mean 

490 

491 ma.average : average for masked arrays -- useful if your data contains 

492 "missing" values 

493 numpy.result_type : Returns the type that results from applying the 

494 numpy type promotion rules to the arguments. 

495 

496 Examples 

497 -------- 

498 >>> import numpy as np 

499 >>> data = np.arange(1, 5) 

500 >>> data 

501 array([1, 2, 3, 4]) 

502 >>> np.average(data) 

503 2.5 

504 >>> np.average(np.arange(1, 11), weights=np.arange(10, 0, -1)) 

505 4.0 

506 

507 >>> data = np.arange(6).reshape((3, 2)) 

508 >>> data 

509 array([[0, 1], 

510 [2, 3], 

511 [4, 5]]) 

512 >>> np.average(data, axis=1, weights=[1./4, 3./4]) 

513 array([0.75, 2.75, 4.75]) 

514 >>> np.average(data, weights=[1./4, 3./4]) 

515 Traceback (most recent call last): 

516 ... 

517 TypeError: Axis must be specified when shapes of a and weights differ. 

518 

519 With ``keepdims=True``, the following result has shape (3, 1). 

520 

521 >>> np.average(data, axis=1, keepdims=True) 

522 array([[0.5], 

523 [2.5], 

524 [4.5]]) 

525 

526 >>> data = np.arange(8).reshape((2, 2, 2)) 

527 >>> data 

528 array([[[0, 1], 

529 [2, 3]], 

530 [[4, 5], 

531 [6, 7]]]) 

532 >>> np.average(data, axis=(0, 1), weights=[[1./4, 3./4], [1., 1./2]]) 

533 array([3.4, 4.4]) 

534 >>> np.average(data, axis=0, weights=[[1./4, 3./4], [1., 1./2]]) 

535 Traceback (most recent call last): 

536 ... 

537 ValueError: Shape of weights must be consistent 

538 with shape of a along specified axis. 

539 """ 

540 a = np.asanyarray(a) 

541 

542 if axis is not None: 

543 axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") 

544 

545 if keepdims is np._NoValue: 

546 # Don't pass on the keepdims argument if one wasn't given. 

547 keepdims_kw = {} 

548 else: 

549 keepdims_kw = {'keepdims': keepdims} 

550 

551 if weights is None: 

552 avg = a.mean(axis, **keepdims_kw) 

553 avg_as_array = np.asanyarray(avg) 

554 scl = avg_as_array.dtype.type(a.size/avg_as_array.size) 

555 else: 

556 wgt = _weights_are_valid(weights=weights, a=a, axis=axis) 

557 

558 if issubclass(a.dtype.type, (np.integer, np.bool)): 

559 result_dtype = np.result_type(a.dtype, wgt.dtype, 'f8') 

560 else: 

561 result_dtype = np.result_type(a.dtype, wgt.dtype) 

562 

563 scl = wgt.sum(axis=axis, dtype=result_dtype, **keepdims_kw) 

564 if np.any(scl == 0.0): 

565 raise ZeroDivisionError( 

566 "Weights sum to zero, can't be normalized") 

567 

568 avg = avg_as_array = np.multiply(a, wgt, 

569 dtype=result_dtype).sum(axis, **keepdims_kw) / scl 

570 

571 if returned: 

572 if scl.shape != avg_as_array.shape: 

573 scl = np.broadcast_to(scl, avg_as_array.shape).copy() 

574 return avg, scl 

575 else: 

576 return avg 

577 

578 

579@set_module('numpy') 

580def asarray_chkfinite(a, dtype=None, order=None): 

581 """Convert the input to an array, checking for NaNs or Infs. 

582 

583 Parameters 

584 ---------- 

585 a : array_like 

586 Input data, in any form that can be converted to an array. This 

587 includes lists, lists of tuples, tuples, tuples of tuples, tuples 

588 of lists and ndarrays. Success requires no NaNs or Infs. 

589 dtype : data-type, optional 

590 By default, the data-type is inferred from the input data. 

591 order : {'C', 'F', 'A', 'K'}, optional 

592 Memory layout. 'A' and 'K' depend on the order of input array a. 

593 'C' row-major (C-style), 

594 'F' column-major (Fortran-style) memory representation. 

595 'A' (any) means 'F' if `a` is Fortran contiguous, 'C' otherwise 

596 'K' (keep) preserve input order 

597 Defaults to 'C'. 

598 

599 Returns 

600 ------- 

601 out : ndarray 

602 Array interpretation of `a`. No copy is performed if the input 

603 is already an ndarray. If `a` is a subclass of ndarray, a base 

604 class ndarray is returned. 

605 

606 Raises 

607 ------ 

608 ValueError 

609 Raises ValueError if `a` contains NaN (Not a Number) or Inf (Infinity). 

610 

611 See Also 

612 -------- 

613 asarray : Create and array. 

614 asanyarray : Similar function which passes through subclasses. 

615 ascontiguousarray : Convert input to a contiguous array. 

616 asfortranarray : Convert input to an ndarray with column-major 

617 memory order. 

618 fromiter : Create an array from an iterator. 

619 fromfunction : Construct an array by executing a function on grid 

620 positions. 

621 

622 Examples 

623 -------- 

624 >>> import numpy as np 

625 

626 Convert a list into an array. If all elements are finite, then 

627 ``asarray_chkfinite`` is identical to ``asarray``. 

628 

629 >>> a = [1, 2] 

630 >>> np.asarray_chkfinite(a, dtype=float) 

631 array([1., 2.]) 

632 

633 Raises ValueError if array_like contains Nans or Infs. 

634 

635 >>> a = [1, 2, np.inf] 

636 >>> try: 

637 ... np.asarray_chkfinite(a) 

638 ... except ValueError: 

639 ... print('ValueError') 

640 ... 

641 ValueError 

642 

643 """ 

644 a = asarray(a, dtype=dtype, order=order) 

645 if a.dtype.char in typecodes['AllFloat'] and not np.isfinite(a).all(): 

646 raise ValueError( 

647 "array must not contain infs or NaNs") 

648 return a 

649 

650 

651def _piecewise_dispatcher(x, condlist, funclist, *args, **kw): 

652 yield x 

653 # support the undocumented behavior of allowing scalars 

654 if np.iterable(condlist): 

655 yield from condlist 

656 

657 

658@array_function_dispatch(_piecewise_dispatcher) 

659def piecewise(x, condlist, funclist, *args, **kw): 

660 """ 

661 Evaluate a piecewise-defined function. 

662 

663 Given a set of conditions and corresponding functions, evaluate each 

664 function on the input data wherever its condition is true. 

665 

666 Parameters 

667 ---------- 

668 x : ndarray or scalar 

669 The input domain. 

670 condlist : list of bool arrays or bool scalars 

671 Each boolean array corresponds to a function in `funclist`. Wherever 

672 `condlist[i]` is True, `funclist[i](x)` is used as the output value. 

673 

674 Each boolean array in `condlist` selects a piece of `x`, 

675 and should therefore be of the same shape as `x`. 

676 

677 The length of `condlist` must correspond to that of `funclist`. 

678 If one extra function is given, i.e. if 

679 ``len(funclist) == len(condlist) + 1``, then that extra function 

680 is the default value, used wherever all conditions are false. 

681 funclist : list of callables, f(x,*args,**kw), or scalars 

682 Each function is evaluated over `x` wherever its corresponding 

683 condition is True. It should take a 1d array as input and give an 1d 

684 array or a scalar value as output. If, instead of a callable, 

685 a scalar is provided then a constant function (``lambda x: scalar``) is 

686 assumed. 

687 args : tuple, optional 

688 Any further arguments given to `piecewise` are passed to the functions 

689 upon execution, i.e., if called ``piecewise(..., ..., 1, 'a')``, then 

690 each function is called as ``f(x, 1, 'a')``. 

691 kw : dict, optional 

692 Keyword arguments used in calling `piecewise` are passed to the 

693 functions upon execution, i.e., if called 

694 ``piecewise(..., ..., alpha=1)``, then each function is called as 

695 ``f(x, alpha=1)``. 

696 

697 Returns 

698 ------- 

699 out : ndarray 

700 The output is the same shape and type as x and is found by 

701 calling the functions in `funclist` on the appropriate portions of `x`, 

702 as defined by the boolean arrays in `condlist`. Portions not covered 

703 by any condition have a default value of 0. 

704 

705 

706 See Also 

707 -------- 

708 choose, select, where 

709 

710 Notes 

711 ----- 

712 This is similar to choose or select, except that functions are 

713 evaluated on elements of `x` that satisfy the corresponding condition from 

714 `condlist`. 

715 

716 The result is:: 

717 

718 |-- 

719 |funclist[0](x[condlist[0]]) 

720 out = |funclist[1](x[condlist[1]]) 

721 |... 

722 |funclist[n2](x[condlist[n2]]) 

723 |-- 

724 

725 Examples 

726 -------- 

727 >>> import numpy as np 

728 

729 Define the signum function, which is -1 for ``x < 0`` and +1 for ``x >= 0``. 

730 

731 >>> x = np.linspace(-2.5, 2.5, 6) 

732 >>> np.piecewise(x, [x < 0, x >= 0], [-1, 1]) 

733 array([-1., -1., -1., 1., 1., 1.]) 

734 

735 Define the absolute value, which is ``-x`` for ``x <0`` and ``x`` for 

736 ``x >= 0``. 

737 

738 >>> np.piecewise(x, [x < 0, x >= 0], [lambda x: -x, lambda x: x]) 

739 array([2.5, 1.5, 0.5, 0.5, 1.5, 2.5]) 

740 

741 Apply the same function to a scalar value. 

742 

743 >>> y = -2 

744 >>> np.piecewise(y, [y < 0, y >= 0], [lambda x: -x, lambda x: x]) 

745 array(2) 

746 

747 """ 

748 x = asanyarray(x) 

749 n2 = len(funclist) 

750 

751 # undocumented: single condition is promoted to a list of one condition 

752 if isscalar(condlist) or ( 

753 not isinstance(condlist[0], (list, ndarray)) and x.ndim != 0): 

754 condlist = [condlist] 

755 

756 condlist = asarray(condlist, dtype=bool) 

757 n = len(condlist) 

758 

759 if n == n2 - 1: # compute the "otherwise" condition. 

760 condelse = ~np.any(condlist, axis=0, keepdims=True) 

761 condlist = np.concatenate([condlist, condelse], axis=0) 

762 n += 1 

763 elif n != n2: 

764 raise ValueError( 

765 "with {} condition(s), either {} or {} functions are expected" 

766 .format(n, n, n+1) 

767 ) 

768 

769 y = zeros_like(x) 

770 for cond, func in zip(condlist, funclist): 

771 if not isinstance(func, collections.abc.Callable): 

772 y[cond] = func 

773 else: 

774 vals = x[cond] 

775 if vals.size > 0: 

776 y[cond] = func(vals, *args, **kw) 

777 

778 return y 

779 

780 

781def _select_dispatcher(condlist, choicelist, default=None): 

782 yield from condlist 

783 yield from choicelist 

784 

785 

786@array_function_dispatch(_select_dispatcher) 

787def select(condlist, choicelist, default=0): 

788 """ 

789 Return an array drawn from elements in choicelist, depending on conditions. 

790 

791 Parameters 

792 ---------- 

793 condlist : list of bool ndarrays 

794 The list of conditions which determine from which array in `choicelist` 

795 the output elements are taken. When multiple conditions are satisfied, 

796 the first one encountered in `condlist` is used. 

797 choicelist : list of ndarrays 

798 The list of arrays from which the output elements are taken. It has 

799 to be of the same length as `condlist`. 

800 default : scalar, optional 

801 The element inserted in `output` when all conditions evaluate to False. 

802 

803 Returns 

804 ------- 

805 output : ndarray 

806 The output at position m is the m-th element of the array in 

807 `choicelist` where the m-th element of the corresponding array in 

808 `condlist` is True. 

809 

810 See Also 

811 -------- 

812 where : Return elements from one of two arrays depending on condition. 

813 take, choose, compress, diag, diagonal 

814 

815 Examples 

816 -------- 

817 >>> import numpy as np 

818 

819 Beginning with an array of integers from 0 to 5 (inclusive), 

820 elements less than ``3`` are negated, elements greater than ``3`` 

821 are squared, and elements not meeting either of these conditions 

822 (exactly ``3``) are replaced with a `default` value of ``42``. 

823 

824 >>> x = np.arange(6) 

825 >>> condlist = [x<3, x>3] 

826 >>> choicelist = [x, x**2] 

827 >>> np.select(condlist, choicelist, 42) 

828 array([ 0, 1, 2, 42, 16, 25]) 

829 

830 When multiple conditions are satisfied, the first one encountered in 

831 `condlist` is used. 

832 

833 >>> condlist = [x<=4, x>3] 

834 >>> choicelist = [x, x**2] 

835 >>> np.select(condlist, choicelist, 55) 

836 array([ 0, 1, 2, 3, 4, 25]) 

837 

838 """ 

839 # Check the size of condlist and choicelist are the same, or abort. 

840 if len(condlist) != len(choicelist): 

841 raise ValueError( 

842 'list of cases must be same length as list of conditions') 

843 

844 # Now that the dtype is known, handle the deprecated select([], []) case 

845 if len(condlist) == 0: 

846 raise ValueError("select with an empty condition list is not possible") 

847 

848 # TODO: This preserves the Python int, float, complex manually to get the 

849 # right `result_type` with NEP 50. Most likely we will grow a better 

850 # way to spell this (and this can be replaced). 

851 choicelist = [ 

852 choice if type(choice) in (int, float, complex) else np.asarray(choice) 

853 for choice in choicelist] 

854 choicelist.append(default if type(default) in (int, float, complex) 

855 else np.asarray(default)) 

856 

857 try: 

858 dtype = np.result_type(*choicelist) 

859 except TypeError as e: 

860 msg = f'Choicelist and default value do not have a common dtype: {e}' 

861 raise TypeError(msg) from None 

862 

863 # Convert conditions to arrays and broadcast conditions and choices 

864 # as the shape is needed for the result. Doing it separately optimizes 

865 # for example when all choices are scalars. 

866 condlist = np.broadcast_arrays(*condlist) 

867 choicelist = np.broadcast_arrays(*choicelist) 

868 

869 # If cond array is not an ndarray in boolean format or scalar bool, abort. 

870 for i, cond in enumerate(condlist): 

871 if cond.dtype.type is not np.bool: 

872 raise TypeError( 

873 'invalid entry {} in condlist: should be boolean ndarray'.format(i)) 

874 

875 if choicelist[0].ndim == 0: 

876 # This may be common, so avoid the call. 

877 result_shape = condlist[0].shape 

878 else: 

879 result_shape = np.broadcast_arrays(condlist[0], choicelist[0])[0].shape 

880 

881 result = np.full(result_shape, choicelist[-1], dtype) 

882 

883 # Use np.copyto to burn each choicelist array onto result, using the 

884 # corresponding condlist as a boolean mask. This is done in reverse 

885 # order since the first choice should take precedence. 

886 choicelist = choicelist[-2::-1] 

887 condlist = condlist[::-1] 

888 for choice, cond in zip(choicelist, condlist): 

889 np.copyto(result, choice, where=cond) 

890 

891 return result 

892 

893 

894def _copy_dispatcher(a, order=None, subok=None): 

895 return (a,) 

896 

897 

898@array_function_dispatch(_copy_dispatcher) 

899def copy(a, order='K', subok=False): 

900 """ 

901 Return an array copy of the given object. 

902 

903 Parameters 

904 ---------- 

905 a : array_like 

906 Input data. 

907 order : {'C', 'F', 'A', 'K'}, optional 

908 Controls the memory layout of the copy. 'C' means C-order, 

909 'F' means F-order, 'A' means 'F' if `a` is Fortran contiguous, 

910 'C' otherwise. 'K' means match the layout of `a` as closely 

911 as possible. (Note that this function and :meth:`ndarray.copy` are very 

912 similar, but have different default values for their order= 

913 arguments.) 

914 subok : bool, optional 

915 If True, then sub-classes will be passed-through, otherwise the 

916 returned array will be forced to be a base-class array (defaults to False). 

917 

918 Returns 

919 ------- 

920 arr : ndarray 

921 Array interpretation of `a`. 

922 

923 See Also 

924 -------- 

925 ndarray.copy : Preferred method for creating an array copy 

926 

927 Notes 

928 ----- 

929 This is equivalent to: 

930 

931 >>> np.array(a, copy=True) #doctest: +SKIP 

932 

933 The copy made of the data is shallow, i.e., for arrays with object dtype, 

934 the new array will point to the same objects. 

935 See Examples from `ndarray.copy`. 

936 

937 Examples 

938 -------- 

939 >>> import numpy as np 

940 

941 Create an array x, with a reference y and a copy z: 

942 

943 >>> x = np.array([1, 2, 3]) 

944 >>> y = x 

945 >>> z = np.copy(x) 

946 

947 Note that, when we modify x, y changes, but not z: 

948 

949 >>> x[0] = 10 

950 >>> x[0] == y[0] 

951 True 

952 >>> x[0] == z[0] 

953 False 

954 

955 Note that, np.copy clears previously set WRITEABLE=False flag. 

956 

957 >>> a = np.array([1, 2, 3]) 

958 >>> a.flags["WRITEABLE"] = False 

959 >>> b = np.copy(a) 

960 >>> b.flags["WRITEABLE"] 

961 True 

962 >>> b[0] = 3 

963 >>> b 

964 array([3, 2, 3]) 

965 """ 

966 return array(a, order=order, subok=subok, copy=True) 

967 

968# Basic operations 

969 

970 

971def _gradient_dispatcher(f, *varargs, axis=None, edge_order=None): 

972 yield f 

973 yield from varargs 

974 

975 

976@array_function_dispatch(_gradient_dispatcher) 

977def gradient(f, *varargs, axis=None, edge_order=1): 

978 """ 

979 Return the gradient of an N-dimensional array. 

980 

981 The gradient is computed using second order accurate central differences 

982 in the interior points and either first or second order accurate one-sides 

983 (forward or backwards) differences at the boundaries. 

984 The returned gradient hence has the same shape as the input array. 

985 

986 Parameters 

987 ---------- 

988 f : array_like 

989 An N-dimensional array containing samples of a scalar function. 

990 varargs : list of scalar or array, optional 

991 Spacing between f values. Default unitary spacing for all dimensions. 

992 Spacing can be specified using: 

993 

994 1. single scalar to specify a sample distance for all dimensions. 

995 2. N scalars to specify a constant sample distance for each dimension. 

996 i.e. `dx`, `dy`, `dz`, ... 

997 3. N arrays to specify the coordinates of the values along each 

998 dimension of F. The length of the array must match the size of 

999 the corresponding dimension 

1000 4. Any combination of N scalars/arrays with the meaning of 2. and 3. 

1001 

1002 If `axis` is given, the number of varargs must equal the number of axes. 

1003 Default: 1. (see Examples below). 

1004 

1005 edge_order : {1, 2}, optional 

1006 Gradient is calculated using N-th order accurate differences 

1007 at the boundaries. Default: 1. 

1008 axis : None or int or tuple of ints, optional 

1009 Gradient is calculated only along the given axis or axes 

1010 The default (axis = None) is to calculate the gradient for all the axes 

1011 of the input array. axis may be negative, in which case it counts from 

1012 the last to the first axis. 

1013 

1014 Returns 

1015 ------- 

1016 gradient : ndarray or tuple of ndarray 

1017 A tuple of ndarrays (or a single ndarray if there is only one 

1018 dimension) corresponding to the derivatives of f with respect 

1019 to each dimension. Each derivative has the same shape as f. 

1020 

1021 Examples 

1022 -------- 

1023 >>> import numpy as np 

1024 >>> f = np.array([1, 2, 4, 7, 11, 16]) 

1025 >>> np.gradient(f) 

1026 array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) 

1027 >>> np.gradient(f, 2) 

1028 array([0.5 , 0.75, 1.25, 1.75, 2.25, 2.5 ]) 

1029 

1030 Spacing can be also specified with an array that represents the coordinates 

1031 of the values F along the dimensions. 

1032 For instance a uniform spacing: 

1033 

1034 >>> x = np.arange(f.size) 

1035 >>> np.gradient(f, x) 

1036 array([1. , 1.5, 2.5, 3.5, 4.5, 5. ]) 

1037 

1038 Or a non uniform one: 

1039 

1040 >>> x = np.array([0., 1., 1.5, 3.5, 4., 6.]) 

1041 >>> np.gradient(f, x) 

1042 array([1. , 3. , 3.5, 6.7, 6.9, 2.5]) 

1043 

1044 For two dimensional arrays, the return will be two arrays ordered by 

1045 axis. In this example the first array stands for the gradient in 

1046 rows and the second one in columns direction: 

1047 

1048 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]])) 

1049 (array([[ 2., 2., -1.], 

1050 [ 2., 2., -1.]]), 

1051 array([[1. , 2.5, 4. ], 

1052 [1. , 1. , 1. ]])) 

1053 

1054 In this example the spacing is also specified: 

1055 uniform for axis=0 and non uniform for axis=1 

1056 

1057 >>> dx = 2. 

1058 >>> y = [1., 1.5, 3.5] 

1059 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), dx, y) 

1060 (array([[ 1. , 1. , -0.5], 

1061 [ 1. , 1. , -0.5]]), 

1062 array([[2. , 2. , 2. ], 

1063 [2. , 1.7, 0.5]])) 

1064 

1065 It is possible to specify how boundaries are treated using `edge_order` 

1066 

1067 >>> x = np.array([0, 1, 2, 3, 4]) 

1068 >>> f = x**2 

1069 >>> np.gradient(f, edge_order=1) 

1070 array([1., 2., 4., 6., 7.]) 

1071 >>> np.gradient(f, edge_order=2) 

1072 array([0., 2., 4., 6., 8.]) 

1073 

1074 The `axis` keyword can be used to specify a subset of axes of which the 

1075 gradient is calculated 

1076 

1077 >>> np.gradient(np.array([[1, 2, 6], [3, 4, 5]]), axis=0) 

1078 array([[ 2., 2., -1.], 

1079 [ 2., 2., -1.]]) 

1080 

1081 The `varargs` argument defines the spacing between sample points in the 

1082 input array. It can take two forms: 

1083 

1084 1. An array, specifying coordinates, which may be unevenly spaced: 

1085 

1086 >>> x = np.array([0., 2., 3., 6., 8.]) 

1087 >>> y = x ** 2 

1088 >>> np.gradient(y, x, edge_order=2) 

1089 array([ 0., 4., 6., 12., 16.]) 

1090 

1091 2. A scalar, representing the fixed sample distance: 

1092 

1093 >>> dx = 2 

1094 >>> x = np.array([0., 2., 4., 6., 8.]) 

1095 >>> y = x ** 2 

1096 >>> np.gradient(y, dx, edge_order=2) 

1097 array([ 0., 4., 8., 12., 16.]) 

1098 

1099 It's possible to provide different data for spacing along each dimension. 

1100 The number of arguments must match the number of dimensions in the input 

1101 data. 

1102 

1103 >>> dx = 2 

1104 >>> dy = 3 

1105 >>> x = np.arange(0, 6, dx) 

1106 >>> y = np.arange(0, 9, dy) 

1107 >>> xs, ys = np.meshgrid(x, y) 

1108 >>> zs = xs + 2 * ys 

1109 >>> np.gradient(zs, dy, dx) # Passing two scalars 

1110 (array([[2., 2., 2.], 

1111 [2., 2., 2.], 

1112 [2., 2., 2.]]), 

1113 array([[1., 1., 1.], 

1114 [1., 1., 1.], 

1115 [1., 1., 1.]])) 

1116 

1117 Mixing scalars and arrays is also allowed: 

1118 

1119 >>> np.gradient(zs, y, dx) # Passing one array and one scalar 

1120 (array([[2., 2., 2.], 

1121 [2., 2., 2.], 

1122 [2., 2., 2.]]), 

1123 array([[1., 1., 1.], 

1124 [1., 1., 1.], 

1125 [1., 1., 1.]])) 

1126 

1127 Notes 

1128 ----- 

1129 Assuming that :math:`f\\in C^{3}` (i.e., :math:`f` has at least 3 continuous 

1130 derivatives) and let :math:`h_{*}` be a non-homogeneous stepsize, we 

1131 minimize the "consistency error" :math:`\\eta_{i}` between the true gradient 

1132 and its estimate from a linear combination of the neighboring grid-points: 

1133 

1134 .. math:: 

1135 

1136 \\eta_{i} = f_{i}^{\\left(1\\right)} - 

1137 \\left[ \\alpha f\\left(x_{i}\\right) + 

1138 \\beta f\\left(x_{i} + h_{d}\\right) + 

1139 \\gamma f\\left(x_{i}-h_{s}\\right) 

1140 \\right] 

1141 

1142 By substituting :math:`f(x_{i} + h_{d})` and :math:`f(x_{i} - h_{s})` 

1143 with their Taylor series expansion, this translates into solving 

1144 the following the linear system: 

1145 

1146 .. math:: 

1147 

1148 \\left\\{ 

1149 \\begin{array}{r} 

1150 \\alpha+\\beta+\\gamma=0 \\\\ 

1151 \\beta h_{d}-\\gamma h_{s}=1 \\\\ 

1152 \\beta h_{d}^{2}+\\gamma h_{s}^{2}=0 

1153 \\end{array} 

1154 \\right. 

1155 

1156 The resulting approximation of :math:`f_{i}^{(1)}` is the following: 

1157 

1158 .. math:: 

1159 

1160 \\hat f_{i}^{(1)} = 

1161 \\frac{ 

1162 h_{s}^{2}f\\left(x_{i} + h_{d}\\right) 

1163 + \\left(h_{d}^{2} - h_{s}^{2}\\right)f\\left(x_{i}\\right) 

1164 - h_{d}^{2}f\\left(x_{i}-h_{s}\\right)} 

1165 { h_{s}h_{d}\\left(h_{d} + h_{s}\\right)} 

1166 + \\mathcal{O}\\left(\\frac{h_{d}h_{s}^{2} 

1167 + h_{s}h_{d}^{2}}{h_{d} 

1168 + h_{s}}\\right) 

1169 

1170 It is worth noting that if :math:`h_{s}=h_{d}` 

1171 (i.e., data are evenly spaced) 

1172 we find the standard second order approximation: 

1173 

1174 .. math:: 

1175 

1176 \\hat f_{i}^{(1)}= 

1177 \\frac{f\\left(x_{i+1}\\right) - f\\left(x_{i-1}\\right)}{2h} 

1178 + \\mathcal{O}\\left(h^{2}\\right) 

1179 

1180 With a similar procedure the forward/backward approximations used for 

1181 boundaries can be derived. 

1182 

1183 References 

1184 ---------- 

1185 .. [1] Quarteroni A., Sacco R., Saleri F. (2007) Numerical Mathematics 

1186 (Texts in Applied Mathematics). New York: Springer. 

1187 .. [2] Durran D. R. (1999) Numerical Methods for Wave Equations 

1188 in Geophysical Fluid Dynamics. New York: Springer. 

1189 .. [3] Fornberg B. (1988) Generation of Finite Difference Formulas on 

1190 Arbitrarily Spaced Grids, 

1191 Mathematics of Computation 51, no. 184 : 699-706. 

1192 `PDF <https://www.ams.org/journals/mcom/1988-51-184/ 

1193 S0025-5718-1988-0935077-0/S0025-5718-1988-0935077-0.pdf>`_. 

1194 """ 

1195 f = np.asanyarray(f) 

1196 N = f.ndim # number of dimensions 

1197 

1198 if axis is None: 

1199 axes = tuple(range(N)) 

1200 else: 

1201 axes = _nx.normalize_axis_tuple(axis, N) 

1202 

1203 len_axes = len(axes) 

1204 n = len(varargs) 

1205 if n == 0: 

1206 # no spacing argument - use 1 in all axes 

1207 dx = [1.0] * len_axes 

1208 elif n == 1 and np.ndim(varargs[0]) == 0: 

1209 # single scalar for all axes 

1210 dx = varargs * len_axes 

1211 elif n == len_axes: 

1212 # scalar or 1d array for each axis 

1213 dx = list(varargs) 

1214 for i, distances in enumerate(dx): 

1215 distances = np.asanyarray(distances) 

1216 if distances.ndim == 0: 

1217 continue 

1218 elif distances.ndim != 1: 

1219 raise ValueError("distances must be either scalars or 1d") 

1220 if len(distances) != f.shape[axes[i]]: 

1221 raise ValueError("when 1d, distances must match " 

1222 "the length of the corresponding dimension") 

1223 if np.issubdtype(distances.dtype, np.integer): 

1224 # Convert numpy integer types to float64 to avoid modular 

1225 # arithmetic in np.diff(distances). 

1226 distances = distances.astype(np.float64) 

1227 diffx = np.diff(distances) 

1228 # if distances are constant reduce to the scalar case 

1229 # since it brings a consistent speedup 

1230 if (diffx == diffx[0]).all(): 

1231 diffx = diffx[0] 

1232 dx[i] = diffx 

1233 else: 

1234 raise TypeError("invalid number of arguments") 

1235 

1236 if edge_order > 2: 

1237 raise ValueError("'edge_order' greater than 2 not supported") 

1238 

1239 # use central differences on interior and one-sided differences on the 

1240 # endpoints. This preserves second order-accuracy over the full domain. 

1241 

1242 outvals = [] 

1243 

1244 # create slice objects --- initially all are [:, :, ..., :] 

1245 slice1 = [slice(None)]*N 

1246 slice2 = [slice(None)]*N 

1247 slice3 = [slice(None)]*N 

1248 slice4 = [slice(None)]*N 

1249 

1250 otype = f.dtype 

1251 if otype.type is np.datetime64: 

1252 # the timedelta dtype with the same unit information 

1253 otype = np.dtype(otype.name.replace('datetime', 'timedelta')) 

1254 # view as timedelta to allow addition 

1255 f = f.view(otype) 

1256 elif otype.type is np.timedelta64: 

1257 pass 

1258 elif np.issubdtype(otype, np.inexact): 

1259 pass 

1260 else: 

1261 # All other types convert to floating point. 

1262 # First check if f is a numpy integer type; if so, convert f to float64 

1263 # to avoid modular arithmetic when computing the changes in f. 

1264 if np.issubdtype(otype, np.integer): 

1265 f = f.astype(np.float64) 

1266 otype = np.float64 

1267 

1268 for axis, ax_dx in zip(axes, dx): 

1269 if f.shape[axis] < edge_order + 1: 

1270 raise ValueError( 

1271 "Shape of array too small to calculate a numerical gradient, " 

1272 "at least (edge_order + 1) elements are required.") 

1273 # result allocation 

1274 out = np.empty_like(f, dtype=otype) 

1275 

1276 # spacing for the current axis 

1277 uniform_spacing = np.ndim(ax_dx) == 0 

1278 

1279 # Numerical differentiation: 2nd order interior 

1280 slice1[axis] = slice(1, -1) 

1281 slice2[axis] = slice(None, -2) 

1282 slice3[axis] = slice(1, -1) 

1283 slice4[axis] = slice(2, None) 

1284 

1285 if uniform_spacing: 

1286 out[tuple(slice1)] = (f[tuple(slice4)] - f[tuple(slice2)]) / (2. * ax_dx) 

1287 else: 

1288 dx1 = ax_dx[0:-1] 

1289 dx2 = ax_dx[1:] 

1290 a = -(dx2)/(dx1 * (dx1 + dx2)) 

1291 b = (dx2 - dx1) / (dx1 * dx2) 

1292 c = dx1 / (dx2 * (dx1 + dx2)) 

1293 # fix the shape for broadcasting 

1294 shape = np.ones(N, dtype=int) 

1295 shape[axis] = -1 

1296 a.shape = b.shape = c.shape = shape 

1297 # 1D equivalent -- out[1:-1] = a * f[:-2] + b * f[1:-1] + c * f[2:] 

1298 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1299 

1300 # Numerical differentiation: 1st order edges 

1301 if edge_order == 1: 

1302 slice1[axis] = 0 

1303 slice2[axis] = 1 

1304 slice3[axis] = 0 

1305 dx_0 = ax_dx if uniform_spacing else ax_dx[0] 

1306 # 1D equivalent -- out[0] = (f[1] - f[0]) / (x[1] - x[0]) 

1307 out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_0 

1308 

1309 slice1[axis] = -1 

1310 slice2[axis] = -1 

1311 slice3[axis] = -2 

1312 dx_n = ax_dx if uniform_spacing else ax_dx[-1] 

1313 # 1D equivalent -- out[-1] = (f[-1] - f[-2]) / (x[-1] - x[-2]) 

1314 out[tuple(slice1)] = (f[tuple(slice2)] - f[tuple(slice3)]) / dx_n 

1315 

1316 # Numerical differentiation: 2nd order edges 

1317 else: 

1318 slice1[axis] = 0 

1319 slice2[axis] = 0 

1320 slice3[axis] = 1 

1321 slice4[axis] = 2 

1322 if uniform_spacing: 

1323 a = -1.5 / ax_dx 

1324 b = 2. / ax_dx 

1325 c = -0.5 / ax_dx 

1326 else: 

1327 dx1 = ax_dx[0] 

1328 dx2 = ax_dx[1] 

1329 a = -(2. * dx1 + dx2)/(dx1 * (dx1 + dx2)) 

1330 b = (dx1 + dx2) / (dx1 * dx2) 

1331 c = - dx1 / (dx2 * (dx1 + dx2)) 

1332 # 1D equivalent -- out[0] = a * f[0] + b * f[1] + c * f[2] 

1333 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1334 

1335 slice1[axis] = -1 

1336 slice2[axis] = -3 

1337 slice3[axis] = -2 

1338 slice4[axis] = -1 

1339 if uniform_spacing: 

1340 a = 0.5 / ax_dx 

1341 b = -2. / ax_dx 

1342 c = 1.5 / ax_dx 

1343 else: 

1344 dx1 = ax_dx[-2] 

1345 dx2 = ax_dx[-1] 

1346 a = (dx2) / (dx1 * (dx1 + dx2)) 

1347 b = - (dx2 + dx1) / (dx1 * dx2) 

1348 c = (2. * dx2 + dx1) / (dx2 * (dx1 + dx2)) 

1349 # 1D equivalent -- out[-1] = a * f[-3] + b * f[-2] + c * f[-1] 

1350 out[tuple(slice1)] = a * f[tuple(slice2)] + b * f[tuple(slice3)] + c * f[tuple(slice4)] 

1351 

1352 outvals.append(out) 

1353 

1354 # reset the slice object in this dimension to ":" 

1355 slice1[axis] = slice(None) 

1356 slice2[axis] = slice(None) 

1357 slice3[axis] = slice(None) 

1358 slice4[axis] = slice(None) 

1359 

1360 if len_axes == 1: 

1361 return outvals[0] 

1362 return tuple(outvals) 

1363 

1364 

1365def _diff_dispatcher(a, n=None, axis=None, prepend=None, append=None): 

1366 return (a, prepend, append) 

1367 

1368 

1369@array_function_dispatch(_diff_dispatcher) 

1370def diff(a, n=1, axis=-1, prepend=np._NoValue, append=np._NoValue): 

1371 """ 

1372 Calculate the n-th discrete difference along the given axis. 

1373 

1374 The first difference is given by ``out[i] = a[i+1] - a[i]`` along 

1375 the given axis, higher differences are calculated by using `diff` 

1376 recursively. 

1377 

1378 Parameters 

1379 ---------- 

1380 a : array_like 

1381 Input array 

1382 n : int, optional 

1383 The number of times values are differenced. If zero, the input 

1384 is returned as-is. 

1385 axis : int, optional 

1386 The axis along which the difference is taken, default is the 

1387 last axis. 

1388 prepend, append : array_like, optional 

1389 Values to prepend or append to `a` along axis prior to 

1390 performing the difference. Scalar values are expanded to 

1391 arrays with length 1 in the direction of axis and the shape 

1392 of the input array in along all other axes. Otherwise the 

1393 dimension and shape must match `a` except along axis. 

1394 

1395 Returns 

1396 ------- 

1397 diff : ndarray 

1398 The n-th differences. The shape of the output is the same as `a` 

1399 except along `axis` where the dimension is smaller by `n`. The 

1400 type of the output is the same as the type of the difference 

1401 between any two elements of `a`. This is the same as the type of 

1402 `a` in most cases. A notable exception is `datetime64`, which 

1403 results in a `timedelta64` output array. 

1404 

1405 See Also 

1406 -------- 

1407 gradient, ediff1d, cumsum 

1408 

1409 Notes 

1410 ----- 

1411 Type is preserved for boolean arrays, so the result will contain 

1412 `False` when consecutive elements are the same and `True` when they 

1413 differ. 

1414 

1415 For unsigned integer arrays, the results will also be unsigned. This 

1416 should not be surprising, as the result is consistent with 

1417 calculating the difference directly: 

1418 

1419 >>> u8_arr = np.array([1, 0], dtype=np.uint8) 

1420 >>> np.diff(u8_arr) 

1421 array([255], dtype=uint8) 

1422 >>> u8_arr[1,...] - u8_arr[0,...] 

1423 np.uint8(255) 

1424 

1425 If this is not desirable, then the array should be cast to a larger 

1426 integer type first: 

1427 

1428 >>> i16_arr = u8_arr.astype(np.int16) 

1429 >>> np.diff(i16_arr) 

1430 array([-1], dtype=int16) 

1431 

1432 Examples 

1433 -------- 

1434 >>> import numpy as np 

1435 >>> x = np.array([1, 2, 4, 7, 0]) 

1436 >>> np.diff(x) 

1437 array([ 1, 2, 3, -7]) 

1438 >>> np.diff(x, n=2) 

1439 array([ 1, 1, -10]) 

1440 

1441 >>> x = np.array([[1, 3, 6, 10], [0, 5, 6, 8]]) 

1442 >>> np.diff(x) 

1443 array([[2, 3, 4], 

1444 [5, 1, 2]]) 

1445 >>> np.diff(x, axis=0) 

1446 array([[-1, 2, 0, -2]]) 

1447 

1448 >>> x = np.arange('1066-10-13', '1066-10-16', dtype=np.datetime64) 

1449 >>> np.diff(x) 

1450 array([1, 1], dtype='timedelta64[D]') 

1451 

1452 """ 

1453 if n == 0: 

1454 return a 

1455 if n < 0: 

1456 raise ValueError( 

1457 "order must be non-negative but got " + repr(n)) 

1458 

1459 a = asanyarray(a) 

1460 nd = a.ndim 

1461 if nd == 0: 

1462 raise ValueError("diff requires input that is at least one dimensional") 

1463 axis = normalize_axis_index(axis, nd) 

1464 

1465 combined = [] 

1466 if prepend is not np._NoValue: 

1467 prepend = np.asanyarray(prepend) 

1468 if prepend.ndim == 0: 

1469 shape = list(a.shape) 

1470 shape[axis] = 1 

1471 prepend = np.broadcast_to(prepend, tuple(shape)) 

1472 combined.append(prepend) 

1473 

1474 combined.append(a) 

1475 

1476 if append is not np._NoValue: 

1477 append = np.asanyarray(append) 

1478 if append.ndim == 0: 

1479 shape = list(a.shape) 

1480 shape[axis] = 1 

1481 append = np.broadcast_to(append, tuple(shape)) 

1482 combined.append(append) 

1483 

1484 if len(combined) > 1: 

1485 a = np.concatenate(combined, axis) 

1486 

1487 slice1 = [slice(None)] * nd 

1488 slice2 = [slice(None)] * nd 

1489 slice1[axis] = slice(1, None) 

1490 slice2[axis] = slice(None, -1) 

1491 slice1 = tuple(slice1) 

1492 slice2 = tuple(slice2) 

1493 

1494 op = not_equal if a.dtype == np.bool else subtract 

1495 for _ in range(n): 

1496 a = op(a[slice1], a[slice2]) 

1497 

1498 return a 

1499 

1500 

1501def _interp_dispatcher(x, xp, fp, left=None, right=None, period=None): 

1502 return (x, xp, fp) 

1503 

1504 

1505@array_function_dispatch(_interp_dispatcher) 

1506def interp(x, xp, fp, left=None, right=None, period=None): 

1507 """ 

1508 One-dimensional linear interpolation for monotonically increasing sample points. 

1509 

1510 Returns the one-dimensional piecewise linear interpolant to a function 

1511 with given discrete data points (`xp`, `fp`), evaluated at `x`. 

1512 

1513 Parameters 

1514 ---------- 

1515 x : array_like 

1516 The x-coordinates at which to evaluate the interpolated values. 

1517 

1518 xp : 1-D sequence of floats 

1519 The x-coordinates of the data points, must be increasing if argument 

1520 `period` is not specified. Otherwise, `xp` is internally sorted after 

1521 normalizing the periodic boundaries with ``xp = xp % period``. 

1522 

1523 fp : 1-D sequence of float or complex 

1524 The y-coordinates of the data points, same length as `xp`. 

1525 

1526 left : optional float or complex corresponding to fp 

1527 Value to return for `x < xp[0]`, default is `fp[0]`. 

1528 

1529 right : optional float or complex corresponding to fp 

1530 Value to return for `x > xp[-1]`, default is `fp[-1]`. 

1531 

1532 period : None or float, optional 

1533 A period for the x-coordinates. This parameter allows the proper 

1534 interpolation of angular x-coordinates. Parameters `left` and `right` 

1535 are ignored if `period` is specified. 

1536 

1537 Returns 

1538 ------- 

1539 y : float or complex (corresponding to fp) or ndarray 

1540 The interpolated values, same shape as `x`. 

1541 

1542 Raises 

1543 ------ 

1544 ValueError 

1545 If `xp` and `fp` have different length 

1546 If `xp` or `fp` are not 1-D sequences 

1547 If `period == 0` 

1548 

1549 See Also 

1550 -------- 

1551 scipy.interpolate 

1552 

1553 Warnings 

1554 -------- 

1555 The x-coordinate sequence is expected to be increasing, but this is not 

1556 explicitly enforced. However, if the sequence `xp` is non-increasing, 

1557 interpolation results are meaningless. 

1558 

1559 Note that, since NaN is unsortable, `xp` also cannot contain NaNs. 

1560 

1561 A simple check for `xp` being strictly increasing is:: 

1562 

1563 np.all(np.diff(xp) > 0) 

1564 

1565 Examples 

1566 -------- 

1567 >>> import numpy as np 

1568 >>> xp = [1, 2, 3] 

1569 >>> fp = [3, 2, 0] 

1570 >>> np.interp(2.5, xp, fp) 

1571 1.0 

1572 >>> np.interp([0, 1, 1.5, 2.72, 3.14], xp, fp) 

1573 array([3. , 3. , 2.5 , 0.56, 0. ]) 

1574 >>> UNDEF = -99.0 

1575 >>> np.interp(3.14, xp, fp, right=UNDEF) 

1576 -99.0 

1577 

1578 Plot an interpolant to the sine function: 

1579 

1580 >>> x = np.linspace(0, 2*np.pi, 10) 

1581 >>> y = np.sin(x) 

1582 >>> xvals = np.linspace(0, 2*np.pi, 50) 

1583 >>> yinterp = np.interp(xvals, x, y) 

1584 >>> import matplotlib.pyplot as plt 

1585 >>> plt.plot(x, y, 'o') 

1586 [<matplotlib.lines.Line2D object at 0x...>] 

1587 >>> plt.plot(xvals, yinterp, '-x') 

1588 [<matplotlib.lines.Line2D object at 0x...>] 

1589 >>> plt.show() 

1590 

1591 Interpolation with periodic x-coordinates: 

1592 

1593 >>> x = [-180, -170, -185, 185, -10, -5, 0, 365] 

1594 >>> xp = [190, -190, 350, -350] 

1595 >>> fp = [5, 10, 3, 4] 

1596 >>> np.interp(x, xp, fp, period=360) 

1597 array([7.5 , 5. , 8.75, 6.25, 3. , 3.25, 3.5 , 3.75]) 

1598 

1599 Complex interpolation: 

1600 

1601 >>> x = [1.5, 4.0] 

1602 >>> xp = [2,3,5] 

1603 >>> fp = [1.0j, 0, 2+3j] 

1604 >>> np.interp(x, xp, fp) 

1605 array([0.+1.j , 1.+1.5j]) 

1606 

1607 """ 

1608 

1609 fp = np.asarray(fp) 

1610 

1611 if np.iscomplexobj(fp): 

1612 interp_func = compiled_interp_complex 

1613 input_dtype = np.complex128 

1614 else: 

1615 interp_func = compiled_interp 

1616 input_dtype = np.float64 

1617 

1618 if period is not None: 

1619 if period == 0: 

1620 raise ValueError("period must be a non-zero value") 

1621 period = abs(period) 

1622 left = None 

1623 right = None 

1624 

1625 x = np.asarray(x, dtype=np.float64) 

1626 xp = np.asarray(xp, dtype=np.float64) 

1627 fp = np.asarray(fp, dtype=input_dtype) 

1628 

1629 if xp.ndim != 1 or fp.ndim != 1: 

1630 raise ValueError("Data points must be 1-D sequences") 

1631 if xp.shape[0] != fp.shape[0]: 

1632 raise ValueError("fp and xp are not of the same length") 

1633 # normalizing periodic boundaries 

1634 x = x % period 

1635 xp = xp % period 

1636 asort_xp = np.argsort(xp) 

1637 xp = xp[asort_xp] 

1638 fp = fp[asort_xp] 

1639 xp = np.concatenate((xp[-1:]-period, xp, xp[0:1]+period)) 

1640 fp = np.concatenate((fp[-1:], fp, fp[0:1])) 

1641 

1642 return interp_func(x, xp, fp, left, right) 

1643 

1644 

1645def _angle_dispatcher(z, deg=None): 

1646 return (z,) 

1647 

1648 

1649@array_function_dispatch(_angle_dispatcher) 

1650def angle(z, deg=False): 

1651 """ 

1652 Return the angle of the complex argument. 

1653 

1654 Parameters 

1655 ---------- 

1656 z : array_like 

1657 A complex number or sequence of complex numbers. 

1658 deg : bool, optional 

1659 Return angle in degrees if True, radians if False (default). 

1660 

1661 Returns 

1662 ------- 

1663 angle : ndarray or scalar 

1664 The counterclockwise angle from the positive real axis on the complex 

1665 plane in the range ``(-pi, pi]``, with dtype as numpy.float64. 

1666 

1667 See Also 

1668 -------- 

1669 arctan2 

1670 absolute 

1671 

1672 Notes 

1673 ----- 

1674 This function passes the imaginary and real parts of the argument to 

1675 `arctan2` to compute the result; consequently, it follows the convention 

1676 of `arctan2` when the magnitude of the argument is zero. See example. 

1677 

1678 Examples 

1679 -------- 

1680 >>> import numpy as np 

1681 >>> np.angle([1.0, 1.0j, 1+1j]) # in radians 

1682 array([ 0. , 1.57079633, 0.78539816]) # may vary 

1683 >>> np.angle(1+1j, deg=True) # in degrees 

1684 45.0 

1685 >>> np.angle([0., -0., complex(0., -0.), complex(-0., -0.)]) # convention 

1686 array([ 0. , 3.14159265, -0. , -3.14159265]) 

1687 

1688 """ 

1689 z = asanyarray(z) 

1690 if issubclass(z.dtype.type, _nx.complexfloating): 

1691 zimag = z.imag 

1692 zreal = z.real 

1693 else: 

1694 zimag = 0 

1695 zreal = z 

1696 

1697 a = arctan2(zimag, zreal) 

1698 if deg: 

1699 a *= 180/pi 

1700 return a 

1701 

1702 

1703def _unwrap_dispatcher(p, discont=None, axis=None, *, period=None): 

1704 return (p,) 

1705 

1706 

1707@array_function_dispatch(_unwrap_dispatcher) 

1708def unwrap(p, discont=None, axis=-1, *, period=2*pi): 

1709 r""" 

1710 Unwrap by taking the complement of large deltas with respect to the period. 

1711 

1712 This unwraps a signal `p` by changing elements which have an absolute 

1713 difference from their predecessor of more than ``max(discont, period/2)`` 

1714 to their `period`-complementary values. 

1715 

1716 For the default case where `period` is :math:`2\pi` and `discont` is 

1717 :math:`\pi`, this unwraps a radian phase `p` such that adjacent differences 

1718 are never greater than :math:`\pi` by adding :math:`2k\pi` for some 

1719 integer :math:`k`. 

1720 

1721 Parameters 

1722 ---------- 

1723 p : array_like 

1724 Input array. 

1725 discont : float, optional 

1726 Maximum discontinuity between values, default is ``period/2``. 

1727 Values below ``period/2`` are treated as if they were ``period/2``. 

1728 To have an effect different from the default, `discont` should be 

1729 larger than ``period/2``. 

1730 axis : int, optional 

1731 Axis along which unwrap will operate, default is the last axis. 

1732 period : float, optional 

1733 Size of the range over which the input wraps. By default, it is 

1734 ``2 pi``. 

1735 

1736 .. versionadded:: 1.21.0 

1737 

1738 Returns 

1739 ------- 

1740 out : ndarray 

1741 Output array. 

1742 

1743 See Also 

1744 -------- 

1745 rad2deg, deg2rad 

1746 

1747 Notes 

1748 ----- 

1749 If the discontinuity in `p` is smaller than ``period/2``, 

1750 but larger than `discont`, no unwrapping is done because taking 

1751 the complement would only make the discontinuity larger. 

1752 

1753 Examples 

1754 -------- 

1755 >>> import numpy as np 

1756 >>> phase = np.linspace(0, np.pi, num=5) 

1757 >>> phase[3:] += np.pi 

1758 >>> phase 

1759 array([ 0. , 0.78539816, 1.57079633, 5.49778714, 6.28318531]) # may vary 

1760 >>> np.unwrap(phase) 

1761 array([ 0. , 0.78539816, 1.57079633, -0.78539816, 0. ]) # may vary 

1762 >>> np.unwrap([0, 1, 2, -1, 0], period=4) 

1763 array([0, 1, 2, 3, 4]) 

1764 >>> np.unwrap([ 1, 2, 3, 4, 5, 6, 1, 2, 3], period=6) 

1765 array([1, 2, 3, 4, 5, 6, 7, 8, 9]) 

1766 >>> np.unwrap([2, 3, 4, 5, 2, 3, 4, 5], period=4) 

1767 array([2, 3, 4, 5, 6, 7, 8, 9]) 

1768 >>> phase_deg = np.mod(np.linspace(0 ,720, 19), 360) - 180 

1769 >>> np.unwrap(phase_deg, period=360) 

1770 array([-180., -140., -100., -60., -20., 20., 60., 100., 140., 

1771 180., 220., 260., 300., 340., 380., 420., 460., 500., 

1772 540.]) 

1773 """ 

1774 p = asarray(p) 

1775 nd = p.ndim 

1776 dd = diff(p, axis=axis) 

1777 if discont is None: 

1778 discont = period/2 

1779 slice1 = [slice(None, None)]*nd # full slices 

1780 slice1[axis] = slice(1, None) 

1781 slice1 = tuple(slice1) 

1782 dtype = np.result_type(dd, period) 

1783 if _nx.issubdtype(dtype, _nx.integer): 

1784 interval_high, rem = divmod(period, 2) 

1785 boundary_ambiguous = rem == 0 

1786 else: 

1787 interval_high = period / 2 

1788 boundary_ambiguous = True 

1789 interval_low = -interval_high 

1790 ddmod = mod(dd - interval_low, period) + interval_low 

1791 if boundary_ambiguous: 

1792 # for `mask = (abs(dd) == period/2)`, the above line made 

1793 # `ddmod[mask] == -period/2`. correct these such that 

1794 # `ddmod[mask] == sign(dd[mask])*period/2`. 

1795 _nx.copyto(ddmod, interval_high, 

1796 where=(ddmod == interval_low) & (dd > 0)) 

1797 ph_correct = ddmod - dd 

1798 _nx.copyto(ph_correct, 0, where=abs(dd) < discont) 

1799 up = array(p, copy=True, dtype=dtype) 

1800 up[slice1] = p[slice1] + ph_correct.cumsum(axis) 

1801 return up 

1802 

1803 

1804def _sort_complex(a): 

1805 return (a,) 

1806 

1807 

1808@array_function_dispatch(_sort_complex) 

1809def sort_complex(a): 

1810 """ 

1811 Sort a complex array using the real part first, then the imaginary part. 

1812 

1813 Parameters 

1814 ---------- 

1815 a : array_like 

1816 Input array 

1817 

1818 Returns 

1819 ------- 

1820 out : complex ndarray 

1821 Always returns a sorted complex array. 

1822 

1823 Examples 

1824 -------- 

1825 >>> import numpy as np 

1826 >>> np.sort_complex([5, 3, 6, 2, 1]) 

1827 array([1.+0.j, 2.+0.j, 3.+0.j, 5.+0.j, 6.+0.j]) 

1828 

1829 >>> np.sort_complex([1 + 2j, 2 - 1j, 3 - 2j, 3 - 3j, 3 + 5j]) 

1830 array([1.+2.j, 2.-1.j, 3.-3.j, 3.-2.j, 3.+5.j]) 

1831 

1832 """ 

1833 b = array(a, copy=True) 

1834 b.sort() 

1835 if not issubclass(b.dtype.type, _nx.complexfloating): 

1836 if b.dtype.char in 'bhBH': 

1837 return b.astype('F') 

1838 elif b.dtype.char == 'g': 

1839 return b.astype('G') 

1840 else: 

1841 return b.astype('D') 

1842 else: 

1843 return b 

1844 

1845 

1846def _arg_trim_zeros(filt): 

1847 """Return indices of the first and last non-zero element. 

1848 

1849 Parameters 

1850 ---------- 

1851 filt : array_like 

1852 Input array. 

1853 

1854 Returns 

1855 ------- 

1856 start, stop : ndarray 

1857 Two arrays containing the indices of the first and last non-zero 

1858 element in each dimension. 

1859 

1860 See also 

1861 -------- 

1862 trim_zeros 

1863 

1864 Examples 

1865 -------- 

1866 >>> import numpy as np 

1867 >>> _arg_trim_zeros(np.array([0, 0, 1, 1, 0])) 

1868 (array([2]), array([3])) 

1869 """ 

1870 nonzero = ( 

1871 np.argwhere(filt) 

1872 if filt.dtype != np.object_ 

1873 # Historically, `trim_zeros` treats `None` in an object array 

1874 # as non-zero while argwhere doesn't, account for that 

1875 else np.argwhere(filt != 0) 

1876 ) 

1877 if nonzero.size == 0: 

1878 start = stop = np.array([], dtype=np.intp) 

1879 else: 

1880 start = nonzero.min(axis=0) 

1881 stop = nonzero.max(axis=0) 

1882 return start, stop 

1883 

1884 

1885def _trim_zeros(filt, trim=None, axis=None): 

1886 return (filt,) 

1887 

1888 

1889@array_function_dispatch(_trim_zeros) 

1890def trim_zeros(filt, trim='fb', axis=None): 

1891 """Remove values along a dimension which are zero along all other. 

1892 

1893 Parameters 

1894 ---------- 

1895 filt : array_like 

1896 Input array. 

1897 trim : {"fb", "f", "b"}, optional 

1898 A string with 'f' representing trim from front and 'b' to trim from 

1899 back. By default, zeros are trimmed on both sides. 

1900 Front and back refer to the edges of a dimension, with "front" refering 

1901 to the side with the lowest index 0, and "back" refering to the highest 

1902 index (or index -1). 

1903 axis : int or sequence, optional 

1904 If None, `filt` is cropped such, that the smallest bounding box is 

1905 returned that still contains all values which are not zero. 

1906 If an axis is specified, `filt` will be sliced in that dimension only 

1907 on the sides specified by `trim`. The remaining area will be the 

1908 smallest that still contains all values wich are not zero. 

1909 

1910 Returns 

1911 ------- 

1912 trimmed : ndarray or sequence 

1913 The result of trimming the input. The number of dimensions and the 

1914 input data type are preserved. 

1915 

1916 Notes 

1917 ----- 

1918 For all-zero arrays, the first axis is trimmed first. 

1919 

1920 Examples 

1921 -------- 

1922 >>> import numpy as np 

1923 >>> a = np.array((0, 0, 0, 1, 2, 3, 0, 2, 1, 0)) 

1924 >>> np.trim_zeros(a) 

1925 array([1, 2, 3, 0, 2, 1]) 

1926 

1927 >>> np.trim_zeros(a, trim='b') 

1928 array([0, 0, 0, ..., 0, 2, 1]) 

1929 

1930 Multiple dimensions are supported. 

1931 

1932 >>> b = np.array([[0, 0, 2, 3, 0, 0], 

1933 ... [0, 1, 0, 3, 0, 0], 

1934 ... [0, 0, 0, 0, 0, 0]]) 

1935 >>> np.trim_zeros(b) 

1936 array([[0, 2, 3], 

1937 [1, 0, 3]]) 

1938 

1939 >>> np.trim_zeros(b, axis=-1) 

1940 array([[0, 2, 3], 

1941 [1, 0, 3], 

1942 [0, 0, 0]]) 

1943 

1944 The input data type is preserved, list/tuple in means list/tuple out. 

1945 

1946 >>> np.trim_zeros([0, 1, 2, 0]) 

1947 [1, 2] 

1948 

1949 """ 

1950 filt_ = np.asarray(filt) 

1951 

1952 trim = trim.lower() 

1953 if trim not in {"fb", "bf", "f", "b"}: 

1954 raise ValueError(f"unexpected character(s) in `trim`: {trim!r}") 

1955 

1956 start, stop = _arg_trim_zeros(filt_) 

1957 stop += 1 # Adjust for slicing 

1958 

1959 if start.size == 0: 

1960 # filt is all-zero -> assign same values to start and stop so that 

1961 # resulting slice will be empty 

1962 start = stop = np.zeros(filt_.ndim, dtype=np.intp) 

1963 else: 

1964 if 'f' not in trim: 

1965 start = (None,) * filt_.ndim 

1966 if 'b' not in trim: 

1967 stop = (None,) * filt_.ndim 

1968 

1969 if len(start) == 1: 

1970 # filt is 1D -> don't use multi-dimensional slicing to preserve 

1971 # non-array input types 

1972 sl = slice(start[0], stop[0]) 

1973 elif axis is None: 

1974 # trim all axes 

1975 sl = tuple(slice(*x) for x in zip(start, stop)) 

1976 else: 

1977 # only trim single axis 

1978 axis = normalize_axis_index(axis, filt_.ndim) 

1979 sl = (slice(None),) * axis + (slice(start[axis], stop[axis]),) + (...,) 

1980 

1981 trimmed = filt[sl] 

1982 return trimmed 

1983 

1984 

1985 

1986def _extract_dispatcher(condition, arr): 

1987 return (condition, arr) 

1988 

1989 

1990@array_function_dispatch(_extract_dispatcher) 

1991def extract(condition, arr): 

1992 """ 

1993 Return the elements of an array that satisfy some condition. 

1994 

1995 This is equivalent to ``np.compress(ravel(condition), ravel(arr))``. If 

1996 `condition` is boolean ``np.extract`` is equivalent to ``arr[condition]``. 

1997 

1998 Note that `place` does the exact opposite of `extract`. 

1999 

2000 Parameters 

2001 ---------- 

2002 condition : array_like 

2003 An array whose nonzero or True entries indicate the elements of `arr` 

2004 to extract. 

2005 arr : array_like 

2006 Input array of the same size as `condition`. 

2007 

2008 Returns 

2009 ------- 

2010 extract : ndarray 

2011 Rank 1 array of values from `arr` where `condition` is True. 

2012 

2013 See Also 

2014 -------- 

2015 take, put, copyto, compress, place 

2016 

2017 Examples 

2018 -------- 

2019 >>> import numpy as np 

2020 >>> arr = np.arange(12).reshape((3, 4)) 

2021 >>> arr 

2022 array([[ 0, 1, 2, 3], 

2023 [ 4, 5, 6, 7], 

2024 [ 8, 9, 10, 11]]) 

2025 >>> condition = np.mod(arr, 3)==0 

2026 >>> condition 

2027 array([[ True, False, False, True], 

2028 [False, False, True, False], 

2029 [False, True, False, False]]) 

2030 >>> np.extract(condition, arr) 

2031 array([0, 3, 6, 9]) 

2032 

2033 

2034 If `condition` is boolean: 

2035 

2036 >>> arr[condition] 

2037 array([0, 3, 6, 9]) 

2038 

2039 """ 

2040 return _nx.take(ravel(arr), nonzero(ravel(condition))[0]) 

2041 

2042 

2043def _place_dispatcher(arr, mask, vals): 

2044 return (arr, mask, vals) 

2045 

2046 

2047@array_function_dispatch(_place_dispatcher) 

2048def place(arr, mask, vals): 

2049 """ 

2050 Change elements of an array based on conditional and input values. 

2051 

2052 Similar to ``np.copyto(arr, vals, where=mask)``, the difference is that 

2053 `place` uses the first N elements of `vals`, where N is the number of 

2054 True values in `mask`, while `copyto` uses the elements where `mask` 

2055 is True. 

2056 

2057 Note that `extract` does the exact opposite of `place`. 

2058 

2059 Parameters 

2060 ---------- 

2061 arr : ndarray 

2062 Array to put data into. 

2063 mask : array_like 

2064 Boolean mask array. Must have the same size as `a`. 

2065 vals : 1-D sequence 

2066 Values to put into `a`. Only the first N elements are used, where 

2067 N is the number of True values in `mask`. If `vals` is smaller 

2068 than N, it will be repeated, and if elements of `a` are to be masked, 

2069 this sequence must be non-empty. 

2070 

2071 See Also 

2072 -------- 

2073 copyto, put, take, extract 

2074 

2075 Examples 

2076 -------- 

2077 >>> import numpy as np 

2078 >>> arr = np.arange(6).reshape(2, 3) 

2079 >>> np.place(arr, arr>2, [44, 55]) 

2080 >>> arr 

2081 array([[ 0, 1, 2], 

2082 [44, 55, 44]]) 

2083 

2084 """ 

2085 return _place(arr, mask, vals) 

2086 

2087 

2088def disp(mesg, device=None, linefeed=True): 

2089 """ 

2090 Display a message on a device. 

2091 

2092 .. deprecated:: 2.0 

2093 Use your own printing function instead. 

2094 

2095 Parameters 

2096 ---------- 

2097 mesg : str 

2098 Message to display. 

2099 device : object 

2100 Device to write message. If None, defaults to ``sys.stdout`` which is 

2101 very similar to ``print``. `device` needs to have ``write()`` and 

2102 ``flush()`` methods. 

2103 linefeed : bool, optional 

2104 Option whether to print a line feed or not. Defaults to True. 

2105 

2106 Raises 

2107 ------ 

2108 AttributeError 

2109 If `device` does not have a ``write()`` or ``flush()`` method. 

2110 

2111 Examples 

2112 -------- 

2113 >>> import numpy as np 

2114 

2115 Besides ``sys.stdout``, a file-like object can also be used as it has 

2116 both required methods: 

2117 

2118 >>> from io import StringIO 

2119 >>> buf = StringIO() 

2120 >>> np.disp('"Display" in a file', device=buf) 

2121 >>> buf.getvalue() 

2122 '"Display" in a file\\n' 

2123 

2124 """ 

2125 

2126 # Deprecated in NumPy 2.0, 2023-07-11 

2127 warnings.warn( 

2128 "`disp` is deprecated, " 

2129 "use your own printing function instead. " 

2130 "(deprecated in NumPy 2.0)", 

2131 DeprecationWarning, 

2132 stacklevel=2 

2133 ) 

2134 

2135 if device is None: 

2136 device = sys.stdout 

2137 if linefeed: 

2138 device.write('%s\n' % mesg) 

2139 else: 

2140 device.write('%s' % mesg) 

2141 device.flush() 

2142 return 

2143 

2144 

2145# See https://docs.scipy.org/doc/numpy/reference/c-api.generalized-ufuncs.html 

2146_DIMENSION_NAME = r'\w+' 

2147_CORE_DIMENSION_LIST = '(?:{0:}(?:,{0:})*)?'.format(_DIMENSION_NAME) 

2148_ARGUMENT = r'\({}\)'.format(_CORE_DIMENSION_LIST) 

2149_ARGUMENT_LIST = '{0:}(?:,{0:})*'.format(_ARGUMENT) 

2150_SIGNATURE = '^{0:}->{0:}$'.format(_ARGUMENT_LIST) 

2151 

2152 

2153def _parse_gufunc_signature(signature): 

2154 """ 

2155 Parse string signatures for a generalized universal function. 

2156 

2157 Arguments 

2158 --------- 

2159 signature : string 

2160 Generalized universal function signature, e.g., ``(m,n),(n,p)->(m,p)`` 

2161 for ``np.matmul``. 

2162 

2163 Returns 

2164 ------- 

2165 Tuple of input and output core dimensions parsed from the signature, each 

2166 of the form List[Tuple[str, ...]]. 

2167 """ 

2168 signature = re.sub(r'\s+', '', signature) 

2169 

2170 if not re.match(_SIGNATURE, signature): 

2171 raise ValueError( 

2172 'not a valid gufunc signature: {}'.format(signature)) 

2173 return tuple([tuple(re.findall(_DIMENSION_NAME, arg)) 

2174 for arg in re.findall(_ARGUMENT, arg_list)] 

2175 for arg_list in signature.split('->')) 

2176 

2177 

2178def _update_dim_sizes(dim_sizes, arg, core_dims): 

2179 """ 

2180 Incrementally check and update core dimension sizes for a single argument. 

2181 

2182 Arguments 

2183 --------- 

2184 dim_sizes : Dict[str, int] 

2185 Sizes of existing core dimensions. Will be updated in-place. 

2186 arg : ndarray 

2187 Argument to examine. 

2188 core_dims : Tuple[str, ...] 

2189 Core dimensions for this argument. 

2190 """ 

2191 if not core_dims: 

2192 return 

2193 

2194 num_core_dims = len(core_dims) 

2195 if arg.ndim < num_core_dims: 

2196 raise ValueError( 

2197 '%d-dimensional argument does not have enough ' 

2198 'dimensions for all core dimensions %r' 

2199 % (arg.ndim, core_dims)) 

2200 

2201 core_shape = arg.shape[-num_core_dims:] 

2202 for dim, size in zip(core_dims, core_shape): 

2203 if dim in dim_sizes: 

2204 if size != dim_sizes[dim]: 

2205 raise ValueError( 

2206 'inconsistent size for core dimension %r: %r vs %r' 

2207 % (dim, size, dim_sizes[dim])) 

2208 else: 

2209 dim_sizes[dim] = size 

2210 

2211 

2212def _parse_input_dimensions(args, input_core_dims): 

2213 """ 

2214 Parse broadcast and core dimensions for vectorize with a signature. 

2215 

2216 Arguments 

2217 --------- 

2218 args : Tuple[ndarray, ...] 

2219 Tuple of input arguments to examine. 

2220 input_core_dims : List[Tuple[str, ...]] 

2221 List of core dimensions corresponding to each input. 

2222 

2223 Returns 

2224 ------- 

2225 broadcast_shape : Tuple[int, ...] 

2226 Common shape to broadcast all non-core dimensions to. 

2227 dim_sizes : Dict[str, int] 

2228 Common sizes for named core dimensions. 

2229 """ 

2230 broadcast_args = [] 

2231 dim_sizes = {} 

2232 for arg, core_dims in zip(args, input_core_dims): 

2233 _update_dim_sizes(dim_sizes, arg, core_dims) 

2234 ndim = arg.ndim - len(core_dims) 

2235 dummy_array = np.lib.stride_tricks.as_strided(0, arg.shape[:ndim]) 

2236 broadcast_args.append(dummy_array) 

2237 broadcast_shape = np.lib._stride_tricks_impl._broadcast_shape( 

2238 *broadcast_args 

2239 ) 

2240 return broadcast_shape, dim_sizes 

2241 

2242 

2243def _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims): 

2244 """Helper for calculating broadcast shapes with core dimensions.""" 

2245 return [broadcast_shape + tuple(dim_sizes[dim] for dim in core_dims) 

2246 for core_dims in list_of_core_dims] 

2247 

2248 

2249def _create_arrays(broadcast_shape, dim_sizes, list_of_core_dims, dtypes, 

2250 results=None): 

2251 """Helper for creating output arrays in vectorize.""" 

2252 shapes = _calculate_shapes(broadcast_shape, dim_sizes, list_of_core_dims) 

2253 if dtypes is None: 

2254 dtypes = [None] * len(shapes) 

2255 if results is None: 

2256 arrays = tuple(np.empty(shape=shape, dtype=dtype) 

2257 for shape, dtype in zip(shapes, dtypes)) 

2258 else: 

2259 arrays = tuple(np.empty_like(result, shape=shape, dtype=dtype) 

2260 for result, shape, dtype 

2261 in zip(results, shapes, dtypes)) 

2262 return arrays 

2263 

2264 

2265def _get_vectorize_dtype(dtype): 

2266 if dtype.char in "SU": 

2267 return dtype.char 

2268 return dtype 

2269 

2270 

2271@set_module('numpy') 

2272class vectorize: 

2273 """ 

2274 vectorize(pyfunc=np._NoValue, otypes=None, doc=None, excluded=None, 

2275 cache=False, signature=None) 

2276 

2277 Returns an object that acts like pyfunc, but takes arrays as input. 

2278 

2279 Define a vectorized function which takes a nested sequence of objects or 

2280 numpy arrays as inputs and returns a single numpy array or a tuple of numpy 

2281 arrays. The vectorized function evaluates `pyfunc` over successive tuples 

2282 of the input arrays like the python map function, except it uses the 

2283 broadcasting rules of numpy. 

2284 

2285 The data type of the output of `vectorized` is determined by calling 

2286 the function with the first element of the input. This can be avoided 

2287 by specifying the `otypes` argument. 

2288 

2289 Parameters 

2290 ---------- 

2291 pyfunc : callable, optional 

2292 A python function or method. 

2293 Can be omitted to produce a decorator with keyword arguments. 

2294 otypes : str or list of dtypes, optional 

2295 The output data type. It must be specified as either a string of 

2296 typecode characters or a list of data type specifiers. There should 

2297 be one data type specifier for each output. 

2298 doc : str, optional 

2299 The docstring for the function. If None, the docstring will be the 

2300 ``pyfunc.__doc__``. 

2301 excluded : set, optional 

2302 Set of strings or integers representing the positional or keyword 

2303 arguments for which the function will not be vectorized. These will be 

2304 passed directly to `pyfunc` unmodified. 

2305 

2306 cache : bool, optional 

2307 If `True`, then cache the first function call that determines the number 

2308 of outputs if `otypes` is not provided. 

2309 

2310 signature : string, optional 

2311 Generalized universal function signature, e.g., ``(m,n),(n)->(m)`` for 

2312 vectorized matrix-vector multiplication. If provided, ``pyfunc`` will 

2313 be called with (and expected to return) arrays with shapes given by the 

2314 size of corresponding core dimensions. By default, ``pyfunc`` is 

2315 assumed to take scalars as input and output. 

2316 

2317 Returns 

2318 ------- 

2319 out : callable 

2320 A vectorized function if ``pyfunc`` was provided, 

2321 a decorator otherwise. 

2322 

2323 See Also 

2324 -------- 

2325 frompyfunc : Takes an arbitrary Python function and returns a ufunc 

2326 

2327 Notes 

2328 ----- 

2329 The `vectorize` function is provided primarily for convenience, not for 

2330 performance. The implementation is essentially a for loop. 

2331 

2332 If `otypes` is not specified, then a call to the function with the 

2333 first argument will be used to determine the number of outputs. The 

2334 results of this call will be cached if `cache` is `True` to prevent 

2335 calling the function twice. However, to implement the cache, the 

2336 original function must be wrapped which will slow down subsequent 

2337 calls, so only do this if your function is expensive. 

2338 

2339 The new keyword argument interface and `excluded` argument support 

2340 further degrades performance. 

2341 

2342 References 

2343 ---------- 

2344 .. [1] :doc:`/reference/c-api/generalized-ufuncs` 

2345 

2346 Examples 

2347 -------- 

2348 >>> import numpy as np 

2349 >>> def myfunc(a, b): 

2350 ... "Return a-b if a>b, otherwise return a+b" 

2351 ... if a > b: 

2352 ... return a - b 

2353 ... else: 

2354 ... return a + b 

2355 

2356 >>> vfunc = np.vectorize(myfunc) 

2357 >>> vfunc([1, 2, 3, 4], 2) 

2358 array([3, 4, 1, 2]) 

2359 

2360 The docstring is taken from the input function to `vectorize` unless it 

2361 is specified: 

2362 

2363 >>> vfunc.__doc__ 

2364 'Return a-b if a>b, otherwise return a+b' 

2365 >>> vfunc = np.vectorize(myfunc, doc='Vectorized `myfunc`') 

2366 >>> vfunc.__doc__ 

2367 'Vectorized `myfunc`' 

2368 

2369 The output type is determined by evaluating the first element of the input, 

2370 unless it is specified: 

2371 

2372 >>> out = vfunc([1, 2, 3, 4], 2) 

2373 >>> type(out[0]) 

2374 <class 'numpy.int64'> 

2375 >>> vfunc = np.vectorize(myfunc, otypes=[float]) 

2376 >>> out = vfunc([1, 2, 3, 4], 2) 

2377 >>> type(out[0]) 

2378 <class 'numpy.float64'> 

2379 

2380 The `excluded` argument can be used to prevent vectorizing over certain 

2381 arguments. This can be useful for array-like arguments of a fixed length 

2382 such as the coefficients for a polynomial as in `polyval`: 

2383 

2384 >>> def mypolyval(p, x): 

2385 ... _p = list(p) 

2386 ... res = _p.pop(0) 

2387 ... while _p: 

2388 ... res = res*x + _p.pop(0) 

2389 ... return res 

2390 

2391 Here, we exclude the zeroth argument from vectorization whether it is 

2392 passed by position or keyword. 

2393 

2394 >>> vpolyval = np.vectorize(mypolyval, excluded={0, 'p'}) 

2395 >>> vpolyval([1, 2, 3], x=[0, 1]) 

2396 array([3, 6]) 

2397 >>> vpolyval(p=[1, 2, 3], x=[0, 1]) 

2398 array([3, 6]) 

2399 

2400 The `signature` argument allows for vectorizing functions that act on 

2401 non-scalar arrays of fixed length. For example, you can use it for a 

2402 vectorized calculation of Pearson correlation coefficient and its p-value: 

2403 

2404 >>> import scipy.stats 

2405 >>> pearsonr = np.vectorize(scipy.stats.pearsonr, 

2406 ... signature='(n),(n)->(),()') 

2407 >>> pearsonr([[0, 1, 2, 3]], [[1, 2, 3, 4], [4, 3, 2, 1]]) 

2408 (array([ 1., -1.]), array([ 0., 0.])) 

2409 

2410 Or for a vectorized convolution: 

2411 

2412 >>> convolve = np.vectorize(np.convolve, signature='(n),(m)->(k)') 

2413 >>> convolve(np.eye(4), [1, 2, 1]) 

2414 array([[1., 2., 1., 0., 0., 0.], 

2415 [0., 1., 2., 1., 0., 0.], 

2416 [0., 0., 1., 2., 1., 0.], 

2417 [0., 0., 0., 1., 2., 1.]]) 

2418 

2419 Decorator syntax is supported. The decorator can be called as 

2420 a function to provide keyword arguments: 

2421 

2422 >>> @np.vectorize 

2423 ... def identity(x): 

2424 ... return x 

2425 ... 

2426 >>> identity([0, 1, 2]) 

2427 array([0, 1, 2]) 

2428 >>> @np.vectorize(otypes=[float]) 

2429 ... def as_float(x): 

2430 ... return x 

2431 ... 

2432 >>> as_float([0, 1, 2]) 

2433 array([0., 1., 2.]) 

2434 """ 

2435 def __init__(self, pyfunc=np._NoValue, otypes=None, doc=None, 

2436 excluded=None, cache=False, signature=None): 

2437 

2438 if (pyfunc != np._NoValue) and (not callable(pyfunc)): 

2439 #Splitting the error message to keep 

2440 #the length below 79 characters. 

2441 part1 = "When used as a decorator, " 

2442 part2 = "only accepts keyword arguments." 

2443 raise TypeError(part1 + part2) 

2444 

2445 self.pyfunc = pyfunc 

2446 self.cache = cache 

2447 self.signature = signature 

2448 if pyfunc != np._NoValue and hasattr(pyfunc, '__name__'): 

2449 self.__name__ = pyfunc.__name__ 

2450 

2451 self._ufunc = {} # Caching to improve default performance 

2452 self._doc = None 

2453 self.__doc__ = doc 

2454 if doc is None and hasattr(pyfunc, '__doc__'): 

2455 self.__doc__ = pyfunc.__doc__ 

2456 else: 

2457 self._doc = doc 

2458 

2459 if isinstance(otypes, str): 

2460 for char in otypes: 

2461 if char not in typecodes['All']: 

2462 raise ValueError("Invalid otype specified: %s" % (char,)) 

2463 elif iterable(otypes): 

2464 otypes = [_get_vectorize_dtype(_nx.dtype(x)) for x in otypes] 

2465 elif otypes is not None: 

2466 raise ValueError("Invalid otype specification") 

2467 self.otypes = otypes 

2468 

2469 # Excluded variable support 

2470 if excluded is None: 

2471 excluded = set() 

2472 self.excluded = set(excluded) 

2473 

2474 if signature is not None: 

2475 self._in_and_out_core_dims = _parse_gufunc_signature(signature) 

2476 else: 

2477 self._in_and_out_core_dims = None 

2478 

2479 def _init_stage_2(self, pyfunc, *args, **kwargs): 

2480 self.__name__ = pyfunc.__name__ 

2481 self.pyfunc = pyfunc 

2482 if self._doc is None: 

2483 self.__doc__ = pyfunc.__doc__ 

2484 else: 

2485 self.__doc__ = self._doc 

2486 

2487 def _call_as_normal(self, *args, **kwargs): 

2488 """ 

2489 Return arrays with the results of `pyfunc` broadcast (vectorized) over 

2490 `args` and `kwargs` not in `excluded`. 

2491 """ 

2492 excluded = self.excluded 

2493 if not kwargs and not excluded: 

2494 func = self.pyfunc 

2495 vargs = args 

2496 else: 

2497 # The wrapper accepts only positional arguments: we use `names` and 

2498 # `inds` to mutate `the_args` and `kwargs` to pass to the original 

2499 # function. 

2500 nargs = len(args) 

2501 

2502 names = [_n for _n in kwargs if _n not in excluded] 

2503 inds = [_i for _i in range(nargs) if _i not in excluded] 

2504 the_args = list(args) 

2505 

2506 def func(*vargs): 

2507 for _n, _i in enumerate(inds): 

2508 the_args[_i] = vargs[_n] 

2509 kwargs.update(zip(names, vargs[len(inds):])) 

2510 return self.pyfunc(*the_args, **kwargs) 

2511 

2512 vargs = [args[_i] for _i in inds] 

2513 vargs.extend([kwargs[_n] for _n in names]) 

2514 

2515 return self._vectorize_call(func=func, args=vargs) 

2516 

2517 def __call__(self, *args, **kwargs): 

2518 if self.pyfunc is np._NoValue: 

2519 self._init_stage_2(*args, **kwargs) 

2520 return self 

2521 

2522 return self._call_as_normal(*args, **kwargs) 

2523 

2524 def _get_ufunc_and_otypes(self, func, args): 

2525 """Return (ufunc, otypes).""" 

2526 # frompyfunc will fail if args is empty 

2527 if not args: 

2528 raise ValueError('args can not be empty') 

2529 

2530 if self.otypes is not None: 

2531 otypes = self.otypes 

2532 

2533 # self._ufunc is a dictionary whose keys are the number of 

2534 # arguments (i.e. len(args)) and whose values are ufuncs created 

2535 # by frompyfunc. len(args) can be different for different calls if 

2536 # self.pyfunc has parameters with default values. We only use the 

2537 # cache when func is self.pyfunc, which occurs when the call uses 

2538 # only positional arguments and no arguments are excluded. 

2539 

2540 nin = len(args) 

2541 nout = len(self.otypes) 

2542 if func is not self.pyfunc or nin not in self._ufunc: 

2543 ufunc = frompyfunc(func, nin, nout) 

2544 else: 

2545 ufunc = None # We'll get it from self._ufunc 

2546 if func is self.pyfunc: 

2547 ufunc = self._ufunc.setdefault(nin, ufunc) 

2548 else: 

2549 # Get number of outputs and output types by calling the function on 

2550 # the first entries of args. We also cache the result to prevent 

2551 # the subsequent call when the ufunc is evaluated. 

2552 # Assumes that ufunc first evaluates the 0th elements in the input 

2553 # arrays (the input values are not checked to ensure this) 

2554 args = [asarray(arg) for arg in args] 

2555 if builtins.any(arg.size == 0 for arg in args): 

2556 raise ValueError('cannot call `vectorize` on size 0 inputs ' 

2557 'unless `otypes` is set') 

2558 

2559 inputs = [arg.flat[0] for arg in args] 

2560 outputs = func(*inputs) 

2561 

2562 # Performance note: profiling indicates that -- for simple 

2563 # functions at least -- this wrapping can almost double the 

2564 # execution time. 

2565 # Hence we make it optional. 

2566 if self.cache: 

2567 _cache = [outputs] 

2568 

2569 def _func(*vargs): 

2570 if _cache: 

2571 return _cache.pop() 

2572 else: 

2573 return func(*vargs) 

2574 else: 

2575 _func = func 

2576 

2577 if isinstance(outputs, tuple): 

2578 nout = len(outputs) 

2579 else: 

2580 nout = 1 

2581 outputs = (outputs,) 

2582 

2583 otypes = ''.join([asarray(outputs[_k]).dtype.char 

2584 for _k in range(nout)]) 

2585 

2586 # Performance note: profiling indicates that creating the ufunc is 

2587 # not a significant cost compared with wrapping so it seems not 

2588 # worth trying to cache this. 

2589 ufunc = frompyfunc(_func, len(args), nout) 

2590 

2591 return ufunc, otypes 

2592 

2593 def _vectorize_call(self, func, args): 

2594 """Vectorized call to `func` over positional `args`.""" 

2595 if self.signature is not None: 

2596 res = self._vectorize_call_with_signature(func, args) 

2597 elif not args: 

2598 res = func() 

2599 else: 

2600 ufunc, otypes = self._get_ufunc_and_otypes(func=func, args=args) 

2601 

2602 # Convert args to object arrays first 

2603 inputs = [asanyarray(a, dtype=object) for a in args] 

2604 

2605 outputs = ufunc(*inputs) 

2606 

2607 if ufunc.nout == 1: 

2608 res = asanyarray(outputs, dtype=otypes[0]) 

2609 else: 

2610 res = tuple([asanyarray(x, dtype=t) 

2611 for x, t in zip(outputs, otypes)]) 

2612 return res 

2613 

2614 def _vectorize_call_with_signature(self, func, args): 

2615 """Vectorized call over positional arguments with a signature.""" 

2616 input_core_dims, output_core_dims = self._in_and_out_core_dims 

2617 

2618 if len(args) != len(input_core_dims): 

2619 raise TypeError('wrong number of positional arguments: ' 

2620 'expected %r, got %r' 

2621 % (len(input_core_dims), len(args))) 

2622 args = tuple(asanyarray(arg) for arg in args) 

2623 

2624 broadcast_shape, dim_sizes = _parse_input_dimensions( 

2625 args, input_core_dims) 

2626 input_shapes = _calculate_shapes(broadcast_shape, dim_sizes, 

2627 input_core_dims) 

2628 args = [np.broadcast_to(arg, shape, subok=True) 

2629 for arg, shape in zip(args, input_shapes)] 

2630 

2631 outputs = None 

2632 otypes = self.otypes 

2633 nout = len(output_core_dims) 

2634 

2635 for index in np.ndindex(*broadcast_shape): 

2636 results = func(*(arg[index] for arg in args)) 

2637 

2638 n_results = len(results) if isinstance(results, tuple) else 1 

2639 

2640 if nout != n_results: 

2641 raise ValueError( 

2642 'wrong number of outputs from pyfunc: expected %r, got %r' 

2643 % (nout, n_results)) 

2644 

2645 if nout == 1: 

2646 results = (results,) 

2647 

2648 if outputs is None: 

2649 for result, core_dims in zip(results, output_core_dims): 

2650 _update_dim_sizes(dim_sizes, result, core_dims) 

2651 

2652 outputs = _create_arrays(broadcast_shape, dim_sizes, 

2653 output_core_dims, otypes, results) 

2654 

2655 for output, result in zip(outputs, results): 

2656 output[index] = result 

2657 

2658 if outputs is None: 

2659 # did not call the function even once 

2660 if otypes is None: 

2661 raise ValueError('cannot call `vectorize` on size 0 inputs ' 

2662 'unless `otypes` is set') 

2663 if builtins.any(dim not in dim_sizes 

2664 for dims in output_core_dims 

2665 for dim in dims): 

2666 raise ValueError('cannot call `vectorize` with a signature ' 

2667 'including new output dimensions on size 0 ' 

2668 'inputs') 

2669 outputs = _create_arrays(broadcast_shape, dim_sizes, 

2670 output_core_dims, otypes) 

2671 

2672 return outputs[0] if nout == 1 else outputs 

2673 

2674 

2675def _cov_dispatcher(m, y=None, rowvar=None, bias=None, ddof=None, 

2676 fweights=None, aweights=None, *, dtype=None): 

2677 return (m, y, fweights, aweights) 

2678 

2679 

2680@array_function_dispatch(_cov_dispatcher) 

2681def cov(m, y=None, rowvar=True, bias=False, ddof=None, fweights=None, 

2682 aweights=None, *, dtype=None): 

2683 """ 

2684 Estimate a covariance matrix, given data and weights. 

2685 

2686 Covariance indicates the level to which two variables vary together. 

2687 If we examine N-dimensional samples, :math:`X = [x_1, x_2, ... x_N]^T`, 

2688 then the covariance matrix element :math:`C_{ij}` is the covariance of 

2689 :math:`x_i` and :math:`x_j`. The element :math:`C_{ii}` is the variance 

2690 of :math:`x_i`. 

2691 

2692 See the notes for an outline of the algorithm. 

2693 

2694 Parameters 

2695 ---------- 

2696 m : array_like 

2697 A 1-D or 2-D array containing multiple variables and observations. 

2698 Each row of `m` represents a variable, and each column a single 

2699 observation of all those variables. Also see `rowvar` below. 

2700 y : array_like, optional 

2701 An additional set of variables and observations. `y` has the same form 

2702 as that of `m`. 

2703 rowvar : bool, optional 

2704 If `rowvar` is True (default), then each row represents a 

2705 variable, with observations in the columns. Otherwise, the relationship 

2706 is transposed: each column represents a variable, while the rows 

2707 contain observations. 

2708 bias : bool, optional 

2709 Default normalization (False) is by ``(N - 1)``, where ``N`` is the 

2710 number of observations given (unbiased estimate). If `bias` is True, 

2711 then normalization is by ``N``. These values can be overridden by using 

2712 the keyword ``ddof`` in numpy versions >= 1.5. 

2713 ddof : int, optional 

2714 If not ``None`` the default value implied by `bias` is overridden. 

2715 Note that ``ddof=1`` will return the unbiased estimate, even if both 

2716 `fweights` and `aweights` are specified, and ``ddof=0`` will return 

2717 the simple average. See the notes for the details. The default value 

2718 is ``None``. 

2719 fweights : array_like, int, optional 

2720 1-D array of integer frequency weights; the number of times each 

2721 observation vector should be repeated. 

2722 aweights : array_like, optional 

2723 1-D array of observation vector weights. These relative weights are 

2724 typically large for observations considered "important" and smaller for 

2725 observations considered less "important". If ``ddof=0`` the array of 

2726 weights can be used to assign probabilities to observation vectors. 

2727 dtype : data-type, optional 

2728 Data-type of the result. By default, the return data-type will have 

2729 at least `numpy.float64` precision. 

2730 

2731 .. versionadded:: 1.20 

2732 

2733 Returns 

2734 ------- 

2735 out : ndarray 

2736 The covariance matrix of the variables. 

2737 

2738 See Also 

2739 -------- 

2740 corrcoef : Normalized covariance matrix 

2741 

2742 Notes 

2743 ----- 

2744 Assume that the observations are in the columns of the observation 

2745 array `m` and let ``f = fweights`` and ``a = aweights`` for brevity. The 

2746 steps to compute the weighted covariance are as follows:: 

2747 

2748 >>> m = np.arange(10, dtype=np.float64) 

2749 >>> f = np.arange(10) * 2 

2750 >>> a = np.arange(10) ** 2. 

2751 >>> ddof = 1 

2752 >>> w = f * a 

2753 >>> v1 = np.sum(w) 

2754 >>> v2 = np.sum(w * a) 

2755 >>> m -= np.sum(m * w, axis=None, keepdims=True) / v1 

2756 >>> cov = np.dot(m * w, m.T) * v1 / (v1**2 - ddof * v2) 

2757 

2758 Note that when ``a == 1``, the normalization factor 

2759 ``v1 / (v1**2 - ddof * v2)`` goes over to ``1 / (np.sum(f) - ddof)`` 

2760 as it should. 

2761 

2762 Examples 

2763 -------- 

2764 >>> import numpy as np 

2765 

2766 Consider two variables, :math:`x_0` and :math:`x_1`, which 

2767 correlate perfectly, but in opposite directions: 

2768 

2769 >>> x = np.array([[0, 2], [1, 1], [2, 0]]).T 

2770 >>> x 

2771 array([[0, 1, 2], 

2772 [2, 1, 0]]) 

2773 

2774 Note how :math:`x_0` increases while :math:`x_1` decreases. The covariance 

2775 matrix shows this clearly: 

2776 

2777 >>> np.cov(x) 

2778 array([[ 1., -1.], 

2779 [-1., 1.]]) 

2780 

2781 Note that element :math:`C_{0,1}`, which shows the correlation between 

2782 :math:`x_0` and :math:`x_1`, is negative. 

2783 

2784 Further, note how `x` and `y` are combined: 

2785 

2786 >>> x = [-2.1, -1, 4.3] 

2787 >>> y = [3, 1.1, 0.12] 

2788 >>> X = np.stack((x, y), axis=0) 

2789 >>> np.cov(X) 

2790 array([[11.71 , -4.286 ], # may vary 

2791 [-4.286 , 2.144133]]) 

2792 >>> np.cov(x, y) 

2793 array([[11.71 , -4.286 ], # may vary 

2794 [-4.286 , 2.144133]]) 

2795 >>> np.cov(x) 

2796 array(11.71) 

2797 

2798 """ 

2799 # Check inputs 

2800 if ddof is not None and ddof != int(ddof): 

2801 raise ValueError( 

2802 "ddof must be integer") 

2803 

2804 # Handles complex arrays too 

2805 m = np.asarray(m) 

2806 if m.ndim > 2: 

2807 raise ValueError("m has more than 2 dimensions") 

2808 

2809 if y is not None: 

2810 y = np.asarray(y) 

2811 if y.ndim > 2: 

2812 raise ValueError("y has more than 2 dimensions") 

2813 

2814 if dtype is None: 

2815 if y is None: 

2816 dtype = np.result_type(m, np.float64) 

2817 else: 

2818 dtype = np.result_type(m, y, np.float64) 

2819 

2820 X = array(m, ndmin=2, dtype=dtype) 

2821 if not rowvar and m.ndim != 1: 

2822 X = X.T 

2823 if X.shape[0] == 0: 

2824 return np.array([]).reshape(0, 0) 

2825 if y is not None: 

2826 y = array(y, copy=None, ndmin=2, dtype=dtype) 

2827 if not rowvar and y.shape[0] != 1: 

2828 y = y.T 

2829 X = np.concatenate((X, y), axis=0) 

2830 

2831 if ddof is None: 

2832 if bias == 0: 

2833 ddof = 1 

2834 else: 

2835 ddof = 0 

2836 

2837 # Get the product of frequencies and weights 

2838 w = None 

2839 if fweights is not None: 

2840 fweights = np.asarray(fweights, dtype=float) 

2841 if not np.all(fweights == np.around(fweights)): 

2842 raise TypeError( 

2843 "fweights must be integer") 

2844 if fweights.ndim > 1: 

2845 raise RuntimeError( 

2846 "cannot handle multidimensional fweights") 

2847 if fweights.shape[0] != X.shape[1]: 

2848 raise RuntimeError( 

2849 "incompatible numbers of samples and fweights") 

2850 if any(fweights < 0): 

2851 raise ValueError( 

2852 "fweights cannot be negative") 

2853 w = fweights 

2854 if aweights is not None: 

2855 aweights = np.asarray(aweights, dtype=float) 

2856 if aweights.ndim > 1: 

2857 raise RuntimeError( 

2858 "cannot handle multidimensional aweights") 

2859 if aweights.shape[0] != X.shape[1]: 

2860 raise RuntimeError( 

2861 "incompatible numbers of samples and aweights") 

2862 if any(aweights < 0): 

2863 raise ValueError( 

2864 "aweights cannot be negative") 

2865 if w is None: 

2866 w = aweights 

2867 else: 

2868 w *= aweights 

2869 

2870 avg, w_sum = average(X, axis=1, weights=w, returned=True) 

2871 w_sum = w_sum[0] 

2872 

2873 # Determine the normalization 

2874 if w is None: 

2875 fact = X.shape[1] - ddof 

2876 elif ddof == 0: 

2877 fact = w_sum 

2878 elif aweights is None: 

2879 fact = w_sum - ddof 

2880 else: 

2881 fact = w_sum - ddof*sum(w*aweights)/w_sum 

2882 

2883 if fact <= 0: 

2884 warnings.warn("Degrees of freedom <= 0 for slice", 

2885 RuntimeWarning, stacklevel=2) 

2886 fact = 0.0 

2887 

2888 X -= avg[:, None] 

2889 if w is None: 

2890 X_T = X.T 

2891 else: 

2892 X_T = (X*w).T 

2893 c = dot(X, X_T.conj()) 

2894 c *= np.true_divide(1, fact) 

2895 return c.squeeze() 

2896 

2897 

2898def _corrcoef_dispatcher(x, y=None, rowvar=None, bias=None, ddof=None, *, 

2899 dtype=None): 

2900 return (x, y) 

2901 

2902 

2903@array_function_dispatch(_corrcoef_dispatcher) 

2904def corrcoef(x, y=None, rowvar=True, bias=np._NoValue, ddof=np._NoValue, *, 

2905 dtype=None): 

2906 """ 

2907 Return Pearson product-moment correlation coefficients. 

2908 

2909 Please refer to the documentation for `cov` for more detail. The 

2910 relationship between the correlation coefficient matrix, `R`, and the 

2911 covariance matrix, `C`, is 

2912 

2913 .. math:: R_{ij} = \\frac{ C_{ij} } { \\sqrt{ C_{ii} C_{jj} } } 

2914 

2915 The values of `R` are between -1 and 1, inclusive. 

2916 

2917 Parameters 

2918 ---------- 

2919 x : array_like 

2920 A 1-D or 2-D array containing multiple variables and observations. 

2921 Each row of `x` represents a variable, and each column a single 

2922 observation of all those variables. Also see `rowvar` below. 

2923 y : array_like, optional 

2924 An additional set of variables and observations. `y` has the same 

2925 shape as `x`. 

2926 rowvar : bool, optional 

2927 If `rowvar` is True (default), then each row represents a 

2928 variable, with observations in the columns. Otherwise, the relationship 

2929 is transposed: each column represents a variable, while the rows 

2930 contain observations. 

2931 bias : _NoValue, optional 

2932 Has no effect, do not use. 

2933 

2934 .. deprecated:: 1.10.0 

2935 ddof : _NoValue, optional 

2936 Has no effect, do not use. 

2937 

2938 .. deprecated:: 1.10.0 

2939 dtype : data-type, optional 

2940 Data-type of the result. By default, the return data-type will have 

2941 at least `numpy.float64` precision. 

2942 

2943 .. versionadded:: 1.20 

2944 

2945 Returns 

2946 ------- 

2947 R : ndarray 

2948 The correlation coefficient matrix of the variables. 

2949 

2950 See Also 

2951 -------- 

2952 cov : Covariance matrix 

2953 

2954 Notes 

2955 ----- 

2956 Due to floating point rounding the resulting array may not be Hermitian, 

2957 the diagonal elements may not be 1, and the elements may not satisfy the 

2958 inequality abs(a) <= 1. The real and imaginary parts are clipped to the 

2959 interval [-1, 1] in an attempt to improve on that situation but is not 

2960 much help in the complex case. 

2961 

2962 This function accepts but discards arguments `bias` and `ddof`. This is 

2963 for backwards compatibility with previous versions of this function. These 

2964 arguments had no effect on the return values of the function and can be 

2965 safely ignored in this and previous versions of numpy. 

2966 

2967 Examples 

2968 -------- 

2969 >>> import numpy as np 

2970 

2971 In this example we generate two random arrays, ``xarr`` and ``yarr``, and 

2972 compute the row-wise and column-wise Pearson correlation coefficients, 

2973 ``R``. Since ``rowvar`` is true by default, we first find the row-wise 

2974 Pearson correlation coefficients between the variables of ``xarr``. 

2975 

2976 >>> import numpy as np 

2977 >>> rng = np.random.default_rng(seed=42) 

2978 >>> xarr = rng.random((3, 3)) 

2979 >>> xarr 

2980 array([[0.77395605, 0.43887844, 0.85859792], 

2981 [0.69736803, 0.09417735, 0.97562235], 

2982 [0.7611397 , 0.78606431, 0.12811363]]) 

2983 >>> R1 = np.corrcoef(xarr) 

2984 >>> R1 

2985 array([[ 1. , 0.99256089, -0.68080986], 

2986 [ 0.99256089, 1. , -0.76492172], 

2987 [-0.68080986, -0.76492172, 1. ]]) 

2988 

2989 If we add another set of variables and observations ``yarr``, we can 

2990 compute the row-wise Pearson correlation coefficients between the 

2991 variables in ``xarr`` and ``yarr``. 

2992 

2993 >>> yarr = rng.random((3, 3)) 

2994 >>> yarr 

2995 array([[0.45038594, 0.37079802, 0.92676499], 

2996 [0.64386512, 0.82276161, 0.4434142 ], 

2997 [0.22723872, 0.55458479, 0.06381726]]) 

2998 >>> R2 = np.corrcoef(xarr, yarr) 

2999 >>> R2 

3000 array([[ 1. , 0.99256089, -0.68080986, 0.75008178, -0.934284 , 

3001 -0.99004057], 

3002 [ 0.99256089, 1. , -0.76492172, 0.82502011, -0.97074098, 

3003 -0.99981569], 

3004 [-0.68080986, -0.76492172, 1. , -0.99507202, 0.89721355, 

3005 0.77714685], 

3006 [ 0.75008178, 0.82502011, -0.99507202, 1. , -0.93657855, 

3007 -0.83571711], 

3008 [-0.934284 , -0.97074098, 0.89721355, -0.93657855, 1. , 

3009 0.97517215], 

3010 [-0.99004057, -0.99981569, 0.77714685, -0.83571711, 0.97517215, 

3011 1. ]]) 

3012 

3013 Finally if we use the option ``rowvar=False``, the columns are now 

3014 being treated as the variables and we will find the column-wise Pearson 

3015 correlation coefficients between variables in ``xarr`` and ``yarr``. 

3016 

3017 >>> R3 = np.corrcoef(xarr, yarr, rowvar=False) 

3018 >>> R3 

3019 array([[ 1. , 0.77598074, -0.47458546, -0.75078643, -0.9665554 , 

3020 0.22423734], 

3021 [ 0.77598074, 1. , -0.92346708, -0.99923895, -0.58826587, 

3022 -0.44069024], 

3023 [-0.47458546, -0.92346708, 1. , 0.93773029, 0.23297648, 

3024 0.75137473], 

3025 [-0.75078643, -0.99923895, 0.93773029, 1. , 0.55627469, 

3026 0.47536961], 

3027 [-0.9665554 , -0.58826587, 0.23297648, 0.55627469, 1. , 

3028 -0.46666491], 

3029 [ 0.22423734, -0.44069024, 0.75137473, 0.47536961, -0.46666491, 

3030 1. ]]) 

3031 

3032 """ 

3033 if bias is not np._NoValue or ddof is not np._NoValue: 

3034 # 2015-03-15, 1.10 

3035 warnings.warn('bias and ddof have no effect and are deprecated', 

3036 DeprecationWarning, stacklevel=2) 

3037 c = cov(x, y, rowvar, dtype=dtype) 

3038 try: 

3039 d = diag(c) 

3040 except ValueError: 

3041 # scalar covariance 

3042 # nan if incorrect value (nan, inf, 0), 1 otherwise 

3043 return c / c 

3044 stddev = sqrt(d.real) 

3045 c /= stddev[:, None] 

3046 c /= stddev[None, :] 

3047 

3048 # Clip real and imaginary parts to [-1, 1]. This does not guarantee 

3049 # abs(a[i,j]) <= 1 for complex arrays, but is the best we can do without 

3050 # excessive work. 

3051 np.clip(c.real, -1, 1, out=c.real) 

3052 if np.iscomplexobj(c): 

3053 np.clip(c.imag, -1, 1, out=c.imag) 

3054 

3055 return c 

3056 

3057 

3058@set_module('numpy') 

3059def blackman(M): 

3060 """ 

3061 Return the Blackman window. 

3062 

3063 The Blackman window is a taper formed by using the first three 

3064 terms of a summation of cosines. It was designed to have close to the 

3065 minimal leakage possible. It is close to optimal, only slightly worse 

3066 than a Kaiser window. 

3067 

3068 Parameters 

3069 ---------- 

3070 M : int 

3071 Number of points in the output window. If zero or less, an empty 

3072 array is returned. 

3073 

3074 Returns 

3075 ------- 

3076 out : ndarray 

3077 The window, with the maximum value normalized to one (the value one 

3078 appears only if the number of samples is odd). 

3079 

3080 See Also 

3081 -------- 

3082 bartlett, hamming, hanning, kaiser 

3083 

3084 Notes 

3085 ----- 

3086 The Blackman window is defined as 

3087 

3088 .. math:: w(n) = 0.42 - 0.5 \\cos(2\\pi n/M) + 0.08 \\cos(4\\pi n/M) 

3089 

3090 Most references to the Blackman window come from the signal processing 

3091 literature, where it is used as one of many windowing functions for 

3092 smoothing values. It is also known as an apodization (which means 

3093 "removing the foot", i.e. smoothing discontinuities at the beginning 

3094 and end of the sampled signal) or tapering function. It is known as a 

3095 "near optimal" tapering function, almost as good (by some measures) 

3096 as the kaiser window. 

3097 

3098 References 

3099 ---------- 

3100 Blackman, R.B. and Tukey, J.W., (1958) The measurement of power spectra, 

3101 Dover Publications, New York. 

3102 

3103 Oppenheim, A.V., and R.W. Schafer. Discrete-Time Signal Processing. 

3104 Upper Saddle River, NJ: Prentice-Hall, 1999, pp. 468-471. 

3105 

3106 Examples 

3107 -------- 

3108 >>> import numpy as np 

3109 >>> import matplotlib.pyplot as plt 

3110 >>> np.blackman(12) 

3111 array([-1.38777878e-17, 3.26064346e-02, 1.59903635e-01, # may vary 

3112 4.14397981e-01, 7.36045180e-01, 9.67046769e-01, 

3113 9.67046769e-01, 7.36045180e-01, 4.14397981e-01, 

3114 1.59903635e-01, 3.26064346e-02, -1.38777878e-17]) 

3115 

3116 Plot the window and the frequency response. 

3117 

3118 .. plot:: 

3119 :include-source: 

3120 

3121 import matplotlib.pyplot as plt 

3122 from numpy.fft import fft, fftshift 

3123 window = np.blackman(51) 

3124 plt.plot(window) 

3125 plt.title("Blackman window") 

3126 plt.ylabel("Amplitude") 

3127 plt.xlabel("Sample") 

3128 plt.show() # doctest: +SKIP 

3129 

3130 plt.figure() 

3131 A = fft(window, 2048) / 25.5 

3132 mag = np.abs(fftshift(A)) 

3133 freq = np.linspace(-0.5, 0.5, len(A)) 

3134 with np.errstate(divide='ignore', invalid='ignore'): 

3135 response = 20 * np.log10(mag) 

3136 response = np.clip(response, -100, 100) 

3137 plt.plot(freq, response) 

3138 plt.title("Frequency response of Blackman window") 

3139 plt.ylabel("Magnitude [dB]") 

3140 plt.xlabel("Normalized frequency [cycles per sample]") 

3141 plt.axis('tight') 

3142 plt.show() 

3143 

3144 """ 

3145 # Ensures at least float64 via 0.0. M should be an integer, but conversion 

3146 # to double is safe for a range. 

3147 values = np.array([0.0, M]) 

3148 M = values[1] 

3149 

3150 if M < 1: 

3151 return array([], dtype=values.dtype) 

3152 if M == 1: 

3153 return ones(1, dtype=values.dtype) 

3154 n = arange(1-M, M, 2) 

3155 return 0.42 + 0.5*cos(pi*n/(M-1)) + 0.08*cos(2.0*pi*n/(M-1)) 

3156 

3157 

3158@set_module('numpy') 

3159def bartlett(M): 

3160 """ 

3161 Return the Bartlett window. 

3162 

3163 The Bartlett window is very similar to a triangular window, except 

3164 that the end points are at zero. It is often used in signal 

3165 processing for tapering a signal, without generating too much 

3166 ripple in the frequency domain. 

3167 

3168 Parameters 

3169 ---------- 

3170 M : int 

3171 Number of points in the output window. If zero or less, an 

3172 empty array is returned. 

3173 

3174 Returns 

3175 ------- 

3176 out : array 

3177 The triangular window, with the maximum value normalized to one 

3178 (the value one appears only if the number of samples is odd), with 

3179 the first and last samples equal to zero. 

3180 

3181 See Also 

3182 -------- 

3183 blackman, hamming, hanning, kaiser 

3184 

3185 Notes 

3186 ----- 

3187 The Bartlett window is defined as 

3188 

3189 .. math:: w(n) = \\frac{2}{M-1} \\left( 

3190 \\frac{M-1}{2} - \\left|n - \\frac{M-1}{2}\\right| 

3191 \\right) 

3192 

3193 Most references to the Bartlett window come from the signal processing 

3194 literature, where it is used as one of many windowing functions for 

3195 smoothing values. Note that convolution with this window produces linear 

3196 interpolation. It is also known as an apodization (which means "removing 

3197 the foot", i.e. smoothing discontinuities at the beginning and end of the 

3198 sampled signal) or tapering function. The Fourier transform of the 

3199 Bartlett window is the product of two sinc functions. Note the excellent 

3200 discussion in Kanasewich [2]_. 

3201 

3202 References 

3203 ---------- 

3204 .. [1] M.S. Bartlett, "Periodogram Analysis and Continuous Spectra", 

3205 Biometrika 37, 1-16, 1950. 

3206 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", 

3207 The University of Alberta Press, 1975, pp. 109-110. 

3208 .. [3] A.V. Oppenheim and R.W. Schafer, "Discrete-Time Signal 

3209 Processing", Prentice-Hall, 1999, pp. 468-471. 

3210 .. [4] Wikipedia, "Window function", 

3211 https://en.wikipedia.org/wiki/Window_function 

3212 .. [5] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

3213 "Numerical Recipes", Cambridge University Press, 1986, page 429. 

3214 

3215 Examples 

3216 -------- 

3217 >>> import numpy as np 

3218 >>> import matplotlib.pyplot as plt 

3219 >>> np.bartlett(12) 

3220 array([ 0. , 0.18181818, 0.36363636, 0.54545455, 0.72727273, # may vary 

3221 0.90909091, 0.90909091, 0.72727273, 0.54545455, 0.36363636, 

3222 0.18181818, 0. ]) 

3223 

3224 Plot the window and its frequency response (requires SciPy and matplotlib). 

3225 

3226 .. plot:: 

3227 :include-source: 

3228 

3229 import matplotlib.pyplot as plt 

3230 from numpy.fft import fft, fftshift 

3231 window = np.bartlett(51) 

3232 plt.plot(window) 

3233 plt.title("Bartlett window") 

3234 plt.ylabel("Amplitude") 

3235 plt.xlabel("Sample") 

3236 plt.show() 

3237 plt.figure() 

3238 A = fft(window, 2048) / 25.5 

3239 mag = np.abs(fftshift(A)) 

3240 freq = np.linspace(-0.5, 0.5, len(A)) 

3241 with np.errstate(divide='ignore', invalid='ignore'): 

3242 response = 20 * np.log10(mag) 

3243 response = np.clip(response, -100, 100) 

3244 plt.plot(freq, response) 

3245 plt.title("Frequency response of Bartlett window") 

3246 plt.ylabel("Magnitude [dB]") 

3247 plt.xlabel("Normalized frequency [cycles per sample]") 

3248 plt.axis('tight') 

3249 plt.show() 

3250 

3251 """ 

3252 # Ensures at least float64 via 0.0. M should be an integer, but conversion 

3253 # to double is safe for a range. 

3254 values = np.array([0.0, M]) 

3255 M = values[1] 

3256 

3257 if M < 1: 

3258 return array([], dtype=values.dtype) 

3259 if M == 1: 

3260 return ones(1, dtype=values.dtype) 

3261 n = arange(1-M, M, 2) 

3262 return where(less_equal(n, 0), 1 + n/(M-1), 1 - n/(M-1)) 

3263 

3264 

3265@set_module('numpy') 

3266def hanning(M): 

3267 """ 

3268 Return the Hanning window. 

3269 

3270 The Hanning window is a taper formed by using a weighted cosine. 

3271 

3272 Parameters 

3273 ---------- 

3274 M : int 

3275 Number of points in the output window. If zero or less, an 

3276 empty array is returned. 

3277 

3278 Returns 

3279 ------- 

3280 out : ndarray, shape(M,) 

3281 The window, with the maximum value normalized to one (the value 

3282 one appears only if `M` is odd). 

3283 

3284 See Also 

3285 -------- 

3286 bartlett, blackman, hamming, kaiser 

3287 

3288 Notes 

3289 ----- 

3290 The Hanning window is defined as 

3291 

3292 .. math:: w(n) = 0.5 - 0.5\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) 

3293 \\qquad 0 \\leq n \\leq M-1 

3294 

3295 The Hanning was named for Julius von Hann, an Austrian meteorologist. 

3296 It is also known as the Cosine Bell. Some authors prefer that it be 

3297 called a Hann window, to help avoid confusion with the very similar 

3298 Hamming window. 

3299 

3300 Most references to the Hanning window come from the signal processing 

3301 literature, where it is used as one of many windowing functions for 

3302 smoothing values. It is also known as an apodization (which means 

3303 "removing the foot", i.e. smoothing discontinuities at the beginning 

3304 and end of the sampled signal) or tapering function. 

3305 

3306 References 

3307 ---------- 

3308 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power 

3309 spectra, Dover Publications, New York. 

3310 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", 

3311 The University of Alberta Press, 1975, pp. 106-108. 

3312 .. [3] Wikipedia, "Window function", 

3313 https://en.wikipedia.org/wiki/Window_function 

3314 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

3315 "Numerical Recipes", Cambridge University Press, 1986, page 425. 

3316 

3317 Examples 

3318 -------- 

3319 >>> import numpy as np 

3320 >>> np.hanning(12) 

3321 array([0. , 0.07937323, 0.29229249, 0.57115742, 0.82743037, 

3322 0.97974649, 0.97974649, 0.82743037, 0.57115742, 0.29229249, 

3323 0.07937323, 0. ]) 

3324 

3325 Plot the window and its frequency response. 

3326 

3327 .. plot:: 

3328 :include-source: 

3329 

3330 import matplotlib.pyplot as plt 

3331 from numpy.fft import fft, fftshift 

3332 window = np.hanning(51) 

3333 plt.plot(window) 

3334 plt.title("Hann window") 

3335 plt.ylabel("Amplitude") 

3336 plt.xlabel("Sample") 

3337 plt.show() 

3338 

3339 plt.figure() 

3340 A = fft(window, 2048) / 25.5 

3341 mag = np.abs(fftshift(A)) 

3342 freq = np.linspace(-0.5, 0.5, len(A)) 

3343 with np.errstate(divide='ignore', invalid='ignore'): 

3344 response = 20 * np.log10(mag) 

3345 response = np.clip(response, -100, 100) 

3346 plt.plot(freq, response) 

3347 plt.title("Frequency response of the Hann window") 

3348 plt.ylabel("Magnitude [dB]") 

3349 plt.xlabel("Normalized frequency [cycles per sample]") 

3350 plt.axis('tight') 

3351 plt.show() 

3352 

3353 """ 

3354 # Ensures at least float64 via 0.0. M should be an integer, but conversion 

3355 # to double is safe for a range. 

3356 values = np.array([0.0, M]) 

3357 M = values[1] 

3358 

3359 if M < 1: 

3360 return array([], dtype=values.dtype) 

3361 if M == 1: 

3362 return ones(1, dtype=values.dtype) 

3363 n = arange(1-M, M, 2) 

3364 return 0.5 + 0.5*cos(pi*n/(M-1)) 

3365 

3366 

3367@set_module('numpy') 

3368def hamming(M): 

3369 """ 

3370 Return the Hamming window. 

3371 

3372 The Hamming window is a taper formed by using a weighted cosine. 

3373 

3374 Parameters 

3375 ---------- 

3376 M : int 

3377 Number of points in the output window. If zero or less, an 

3378 empty array is returned. 

3379 

3380 Returns 

3381 ------- 

3382 out : ndarray 

3383 The window, with the maximum value normalized to one (the value 

3384 one appears only if the number of samples is odd). 

3385 

3386 See Also 

3387 -------- 

3388 bartlett, blackman, hanning, kaiser 

3389 

3390 Notes 

3391 ----- 

3392 The Hamming window is defined as 

3393 

3394 .. math:: w(n) = 0.54 - 0.46\\cos\\left(\\frac{2\\pi{n}}{M-1}\\right) 

3395 \\qquad 0 \\leq n \\leq M-1 

3396 

3397 The Hamming was named for R. W. Hamming, an associate of J. W. Tukey 

3398 and is described in Blackman and Tukey. It was recommended for 

3399 smoothing the truncated autocovariance function in the time domain. 

3400 Most references to the Hamming window come from the signal processing 

3401 literature, where it is used as one of many windowing functions for 

3402 smoothing values. It is also known as an apodization (which means 

3403 "removing the foot", i.e. smoothing discontinuities at the beginning 

3404 and end of the sampled signal) or tapering function. 

3405 

3406 References 

3407 ---------- 

3408 .. [1] Blackman, R.B. and Tukey, J.W., (1958) The measurement of power 

3409 spectra, Dover Publications, New York. 

3410 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The 

3411 University of Alberta Press, 1975, pp. 109-110. 

3412 .. [3] Wikipedia, "Window function", 

3413 https://en.wikipedia.org/wiki/Window_function 

3414 .. [4] W.H. Press, B.P. Flannery, S.A. Teukolsky, and W.T. Vetterling, 

3415 "Numerical Recipes", Cambridge University Press, 1986, page 425. 

3416 

3417 Examples 

3418 -------- 

3419 >>> import numpy as np 

3420 >>> np.hamming(12) 

3421 array([ 0.08 , 0.15302337, 0.34890909, 0.60546483, 0.84123594, # may vary 

3422 0.98136677, 0.98136677, 0.84123594, 0.60546483, 0.34890909, 

3423 0.15302337, 0.08 ]) 

3424 

3425 Plot the window and the frequency response. 

3426 

3427 .. plot:: 

3428 :include-source: 

3429 

3430 import matplotlib.pyplot as plt 

3431 from numpy.fft import fft, fftshift 

3432 window = np.hamming(51) 

3433 plt.plot(window) 

3434 plt.title("Hamming window") 

3435 plt.ylabel("Amplitude") 

3436 plt.xlabel("Sample") 

3437 plt.show() 

3438 

3439 plt.figure() 

3440 A = fft(window, 2048) / 25.5 

3441 mag = np.abs(fftshift(A)) 

3442 freq = np.linspace(-0.5, 0.5, len(A)) 

3443 response = 20 * np.log10(mag) 

3444 response = np.clip(response, -100, 100) 

3445 plt.plot(freq, response) 

3446 plt.title("Frequency response of Hamming window") 

3447 plt.ylabel("Magnitude [dB]") 

3448 plt.xlabel("Normalized frequency [cycles per sample]") 

3449 plt.axis('tight') 

3450 plt.show() 

3451 

3452 """ 

3453 # Ensures at least float64 via 0.0. M should be an integer, but conversion 

3454 # to double is safe for a range. 

3455 values = np.array([0.0, M]) 

3456 M = values[1] 

3457 

3458 if M < 1: 

3459 return array([], dtype=values.dtype) 

3460 if M == 1: 

3461 return ones(1, dtype=values.dtype) 

3462 n = arange(1-M, M, 2) 

3463 return 0.54 + 0.46*cos(pi*n/(M-1)) 

3464 

3465 

3466## Code from cephes for i0 

3467 

3468_i0A = [ 

3469 -4.41534164647933937950E-18, 

3470 3.33079451882223809783E-17, 

3471 -2.43127984654795469359E-16, 

3472 1.71539128555513303061E-15, 

3473 -1.16853328779934516808E-14, 

3474 7.67618549860493561688E-14, 

3475 -4.85644678311192946090E-13, 

3476 2.95505266312963983461E-12, 

3477 -1.72682629144155570723E-11, 

3478 9.67580903537323691224E-11, 

3479 -5.18979560163526290666E-10, 

3480 2.65982372468238665035E-9, 

3481 -1.30002500998624804212E-8, 

3482 6.04699502254191894932E-8, 

3483 -2.67079385394061173391E-7, 

3484 1.11738753912010371815E-6, 

3485 -4.41673835845875056359E-6, 

3486 1.64484480707288970893E-5, 

3487 -5.75419501008210370398E-5, 

3488 1.88502885095841655729E-4, 

3489 -5.76375574538582365885E-4, 

3490 1.63947561694133579842E-3, 

3491 -4.32430999505057594430E-3, 

3492 1.05464603945949983183E-2, 

3493 -2.37374148058994688156E-2, 

3494 4.93052842396707084878E-2, 

3495 -9.49010970480476444210E-2, 

3496 1.71620901522208775349E-1, 

3497 -3.04682672343198398683E-1, 

3498 6.76795274409476084995E-1 

3499 ] 

3500 

3501_i0B = [ 

3502 -7.23318048787475395456E-18, 

3503 -4.83050448594418207126E-18, 

3504 4.46562142029675999901E-17, 

3505 3.46122286769746109310E-17, 

3506 -2.82762398051658348494E-16, 

3507 -3.42548561967721913462E-16, 

3508 1.77256013305652638360E-15, 

3509 3.81168066935262242075E-15, 

3510 -9.55484669882830764870E-15, 

3511 -4.15056934728722208663E-14, 

3512 1.54008621752140982691E-14, 

3513 3.85277838274214270114E-13, 

3514 7.18012445138366623367E-13, 

3515 -1.79417853150680611778E-12, 

3516 -1.32158118404477131188E-11, 

3517 -3.14991652796324136454E-11, 

3518 1.18891471078464383424E-11, 

3519 4.94060238822496958910E-10, 

3520 3.39623202570838634515E-9, 

3521 2.26666899049817806459E-8, 

3522 2.04891858946906374183E-7, 

3523 2.89137052083475648297E-6, 

3524 6.88975834691682398426E-5, 

3525 3.36911647825569408990E-3, 

3526 8.04490411014108831608E-1 

3527 ] 

3528 

3529 

3530def _chbevl(x, vals): 

3531 b0 = vals[0] 

3532 b1 = 0.0 

3533 

3534 for i in range(1, len(vals)): 

3535 b2 = b1 

3536 b1 = b0 

3537 b0 = x*b1 - b2 + vals[i] 

3538 

3539 return 0.5*(b0 - b2) 

3540 

3541 

3542def _i0_1(x): 

3543 return exp(x) * _chbevl(x/2.0-2, _i0A) 

3544 

3545 

3546def _i0_2(x): 

3547 return exp(x) * _chbevl(32.0/x - 2.0, _i0B) / sqrt(x) 

3548 

3549 

3550def _i0_dispatcher(x): 

3551 return (x,) 

3552 

3553 

3554@array_function_dispatch(_i0_dispatcher) 

3555def i0(x): 

3556 """ 

3557 Modified Bessel function of the first kind, order 0. 

3558 

3559 Usually denoted :math:`I_0`. 

3560 

3561 Parameters 

3562 ---------- 

3563 x : array_like of float 

3564 Argument of the Bessel function. 

3565 

3566 Returns 

3567 ------- 

3568 out : ndarray, shape = x.shape, dtype = float 

3569 The modified Bessel function evaluated at each of the elements of `x`. 

3570 

3571 See Also 

3572 -------- 

3573 scipy.special.i0, scipy.special.iv, scipy.special.ive 

3574 

3575 Notes 

3576 ----- 

3577 The scipy implementation is recommended over this function: it is a 

3578 proper ufunc written in C, and more than an order of magnitude faster. 

3579 

3580 We use the algorithm published by Clenshaw [1]_ and referenced by 

3581 Abramowitz and Stegun [2]_, for which the function domain is 

3582 partitioned into the two intervals [0,8] and (8,inf), and Chebyshev 

3583 polynomial expansions are employed in each interval. Relative error on 

3584 the domain [0,30] using IEEE arithmetic is documented [3]_ as having a 

3585 peak of 5.8e-16 with an rms of 1.4e-16 (n = 30000). 

3586 

3587 References 

3588 ---------- 

3589 .. [1] C. W. Clenshaw, "Chebyshev series for mathematical functions", in 

3590 *National Physical Laboratory Mathematical Tables*, vol. 5, London: 

3591 Her Majesty's Stationery Office, 1962. 

3592 .. [2] M. Abramowitz and I. A. Stegun, *Handbook of Mathematical 

3593 Functions*, 10th printing, New York: Dover, 1964, pp. 379. 

3594 https://personal.math.ubc.ca/~cbm/aands/page_379.htm 

3595 .. [3] https://metacpan.org/pod/distribution/Math-Cephes/lib/Math/Cephes.pod#i0:-Modified-Bessel-function-of-order-zero 

3596 

3597 Examples 

3598 -------- 

3599 >>> import numpy as np 

3600 >>> np.i0(0.) 

3601 array(1.0) 

3602 >>> np.i0([0, 1, 2, 3]) 

3603 array([1. , 1.26606588, 2.2795853 , 4.88079259]) 

3604 

3605 """ 

3606 x = np.asanyarray(x) 

3607 if x.dtype.kind == 'c': 

3608 raise TypeError("i0 not supported for complex values") 

3609 if x.dtype.kind != 'f': 

3610 x = x.astype(float) 

3611 x = np.abs(x) 

3612 return piecewise(x, [x <= 8.0], [_i0_1, _i0_2]) 

3613 

3614## End of cephes code for i0 

3615 

3616 

3617@set_module('numpy') 

3618def kaiser(M, beta): 

3619 """ 

3620 Return the Kaiser window. 

3621 

3622 The Kaiser window is a taper formed by using a Bessel function. 

3623 

3624 Parameters 

3625 ---------- 

3626 M : int 

3627 Number of points in the output window. If zero or less, an 

3628 empty array is returned. 

3629 beta : float 

3630 Shape parameter for window. 

3631 

3632 Returns 

3633 ------- 

3634 out : array 

3635 The window, with the maximum value normalized to one (the value 

3636 one appears only if the number of samples is odd). 

3637 

3638 See Also 

3639 -------- 

3640 bartlett, blackman, hamming, hanning 

3641 

3642 Notes 

3643 ----- 

3644 The Kaiser window is defined as 

3645 

3646 .. math:: w(n) = I_0\\left( \\beta \\sqrt{1-\\frac{4n^2}{(M-1)^2}} 

3647 \\right)/I_0(\\beta) 

3648 

3649 with 

3650 

3651 .. math:: \\quad -\\frac{M-1}{2} \\leq n \\leq \\frac{M-1}{2}, 

3652 

3653 where :math:`I_0` is the modified zeroth-order Bessel function. 

3654 

3655 The Kaiser was named for Jim Kaiser, who discovered a simple 

3656 approximation to the DPSS window based on Bessel functions. The Kaiser 

3657 window is a very good approximation to the Digital Prolate Spheroidal 

3658 Sequence, or Slepian window, which is the transform which maximizes the 

3659 energy in the main lobe of the window relative to total energy. 

3660 

3661 The Kaiser can approximate many other windows by varying the beta 

3662 parameter. 

3663 

3664 ==== ======================= 

3665 beta Window shape 

3666 ==== ======================= 

3667 0 Rectangular 

3668 5 Similar to a Hamming 

3669 6 Similar to a Hanning 

3670 8.6 Similar to a Blackman 

3671 ==== ======================= 

3672 

3673 A beta value of 14 is probably a good starting point. Note that as beta 

3674 gets large, the window narrows, and so the number of samples needs to be 

3675 large enough to sample the increasingly narrow spike, otherwise NaNs will 

3676 get returned. 

3677 

3678 Most references to the Kaiser window come from the signal processing 

3679 literature, where it is used as one of many windowing functions for 

3680 smoothing values. It is also known as an apodization (which means 

3681 "removing the foot", i.e. smoothing discontinuities at the beginning 

3682 and end of the sampled signal) or tapering function. 

3683 

3684 References 

3685 ---------- 

3686 .. [1] J. F. Kaiser, "Digital Filters" - Ch 7 in "Systems analysis by 

3687 digital computer", Editors: F.F. Kuo and J.F. Kaiser, p 218-285. 

3688 John Wiley and Sons, New York, (1966). 

3689 .. [2] E.R. Kanasewich, "Time Sequence Analysis in Geophysics", The 

3690 University of Alberta Press, 1975, pp. 177-178. 

3691 .. [3] Wikipedia, "Window function", 

3692 https://en.wikipedia.org/wiki/Window_function 

3693 

3694 Examples 

3695 -------- 

3696 >>> import numpy as np 

3697 >>> import matplotlib.pyplot as plt 

3698 >>> np.kaiser(12, 14) 

3699 array([7.72686684e-06, 3.46009194e-03, 4.65200189e-02, # may vary 

3700 2.29737120e-01, 5.99885316e-01, 9.45674898e-01, 

3701 9.45674898e-01, 5.99885316e-01, 2.29737120e-01, 

3702 4.65200189e-02, 3.46009194e-03, 7.72686684e-06]) 

3703 

3704 

3705 Plot the window and the frequency response. 

3706 

3707 .. plot:: 

3708 :include-source: 

3709 

3710 import matplotlib.pyplot as plt 

3711 from numpy.fft import fft, fftshift 

3712 window = np.kaiser(51, 14) 

3713 plt.plot(window) 

3714 plt.title("Kaiser window") 

3715 plt.ylabel("Amplitude") 

3716 plt.xlabel("Sample") 

3717 plt.show() 

3718 

3719 plt.figure() 

3720 A = fft(window, 2048) / 25.5 

3721 mag = np.abs(fftshift(A)) 

3722 freq = np.linspace(-0.5, 0.5, len(A)) 

3723 response = 20 * np.log10(mag) 

3724 response = np.clip(response, -100, 100) 

3725 plt.plot(freq, response) 

3726 plt.title("Frequency response of Kaiser window") 

3727 plt.ylabel("Magnitude [dB]") 

3728 plt.xlabel("Normalized frequency [cycles per sample]") 

3729 plt.axis('tight') 

3730 plt.show() 

3731 

3732 """ 

3733 # Ensures at least float64 via 0.0. M should be an integer, but conversion 

3734 # to double is safe for a range. (Simplified result_type with 0.0 

3735 # strongly typed. result-type is not/less order sensitive, but that mainly 

3736 # matters for integers anyway.) 

3737 values = np.array([0.0, M, beta]) 

3738 M = values[1] 

3739 beta = values[2] 

3740 

3741 if M == 1: 

3742 return np.ones(1, dtype=values.dtype) 

3743 n = arange(0, M) 

3744 alpha = (M-1)/2.0 

3745 return i0(beta * sqrt(1-((n-alpha)/alpha)**2.0))/i0(beta) 

3746 

3747 

3748def _sinc_dispatcher(x): 

3749 return (x,) 

3750 

3751 

3752@array_function_dispatch(_sinc_dispatcher) 

3753def sinc(x): 

3754 r""" 

3755 Return the normalized sinc function. 

3756 

3757 The sinc function is equal to :math:`\sin(\pi x)/(\pi x)` for any argument 

3758 :math:`x\ne 0`. ``sinc(0)`` takes the limit value 1, making ``sinc`` not 

3759 only everywhere continuous but also infinitely differentiable. 

3760 

3761 .. note:: 

3762 

3763 Note the normalization factor of ``pi`` used in the definition. 

3764 This is the most commonly used definition in signal processing. 

3765 Use ``sinc(x / np.pi)`` to obtain the unnormalized sinc function 

3766 :math:`\sin(x)/x` that is more common in mathematics. 

3767 

3768 Parameters 

3769 ---------- 

3770 x : ndarray 

3771 Array (possibly multi-dimensional) of values for which to calculate 

3772 ``sinc(x)``. 

3773 

3774 Returns 

3775 ------- 

3776 out : ndarray 

3777 ``sinc(x)``, which has the same shape as the input. 

3778 

3779 Notes 

3780 ----- 

3781 The name sinc is short for "sine cardinal" or "sinus cardinalis". 

3782 

3783 The sinc function is used in various signal processing applications, 

3784 including in anti-aliasing, in the construction of a Lanczos resampling 

3785 filter, and in interpolation. 

3786 

3787 For bandlimited interpolation of discrete-time signals, the ideal 

3788 interpolation kernel is proportional to the sinc function. 

3789 

3790 References 

3791 ---------- 

3792 .. [1] Weisstein, Eric W. "Sinc Function." From MathWorld--A Wolfram Web 

3793 Resource. https://mathworld.wolfram.com/SincFunction.html 

3794 .. [2] Wikipedia, "Sinc function", 

3795 https://en.wikipedia.org/wiki/Sinc_function 

3796 

3797 Examples 

3798 -------- 

3799 >>> import numpy as np 

3800 >>> import matplotlib.pyplot as plt 

3801 >>> x = np.linspace(-4, 4, 41) 

3802 >>> np.sinc(x) 

3803 array([-3.89804309e-17, -4.92362781e-02, -8.40918587e-02, # may vary 

3804 -8.90384387e-02, -5.84680802e-02, 3.89804309e-17, 

3805 6.68206631e-02, 1.16434881e-01, 1.26137788e-01, 

3806 8.50444803e-02, -3.89804309e-17, -1.03943254e-01, 

3807 -1.89206682e-01, -2.16236208e-01, -1.55914881e-01, 

3808 3.89804309e-17, 2.33872321e-01, 5.04551152e-01, 

3809 7.56826729e-01, 9.35489284e-01, 1.00000000e+00, 

3810 9.35489284e-01, 7.56826729e-01, 5.04551152e-01, 

3811 2.33872321e-01, 3.89804309e-17, -1.55914881e-01, 

3812 -2.16236208e-01, -1.89206682e-01, -1.03943254e-01, 

3813 -3.89804309e-17, 8.50444803e-02, 1.26137788e-01, 

3814 1.16434881e-01, 6.68206631e-02, 3.89804309e-17, 

3815 -5.84680802e-02, -8.90384387e-02, -8.40918587e-02, 

3816 -4.92362781e-02, -3.89804309e-17]) 

3817 

3818 >>> plt.plot(x, np.sinc(x)) 

3819 [<matplotlib.lines.Line2D object at 0x...>] 

3820 >>> plt.title("Sinc Function") 

3821 Text(0.5, 1.0, 'Sinc Function') 

3822 >>> plt.ylabel("Amplitude") 

3823 Text(0, 0.5, 'Amplitude') 

3824 >>> plt.xlabel("X") 

3825 Text(0.5, 0, 'X') 

3826 >>> plt.show() 

3827 

3828 """ 

3829 x = np.asanyarray(x) 

3830 y = pi * where(x == 0, 1.0e-20, x) 

3831 return sin(y)/y 

3832 

3833 

3834def _ureduce(a, func, keepdims=False, **kwargs): 

3835 """ 

3836 Internal Function. 

3837 Call `func` with `a` as first argument swapping the axes to use extended 

3838 axis on functions that don't support it natively. 

3839 

3840 Returns result and a.shape with axis dims set to 1. 

3841 

3842 Parameters 

3843 ---------- 

3844 a : array_like 

3845 Input array or object that can be converted to an array. 

3846 func : callable 

3847 Reduction function capable of receiving a single axis argument. 

3848 It is called with `a` as first argument followed by `kwargs`. 

3849 kwargs : keyword arguments 

3850 additional keyword arguments to pass to `func`. 

3851 

3852 Returns 

3853 ------- 

3854 result : tuple 

3855 Result of func(a, **kwargs) and a.shape with axis dims set to 1 

3856 which can be used to reshape the result to the same shape a ufunc with 

3857 keepdims=True would produce. 

3858 

3859 """ 

3860 a = np.asanyarray(a) 

3861 axis = kwargs.get('axis', None) 

3862 out = kwargs.get('out', None) 

3863 

3864 if keepdims is np._NoValue: 

3865 keepdims = False 

3866 

3867 nd = a.ndim 

3868 if axis is not None: 

3869 axis = _nx.normalize_axis_tuple(axis, nd) 

3870 

3871 if keepdims: 

3872 if out is not None: 

3873 index_out = tuple( 

3874 0 if i in axis else slice(None) for i in range(nd)) 

3875 kwargs['out'] = out[(Ellipsis, ) + index_out] 

3876 

3877 if len(axis) == 1: 

3878 kwargs['axis'] = axis[0] 

3879 else: 

3880 keep = set(range(nd)) - set(axis) 

3881 nkeep = len(keep) 

3882 # swap axis that should not be reduced to front 

3883 for i, s in enumerate(sorted(keep)): 

3884 a = a.swapaxes(i, s) 

3885 # merge reduced axis 

3886 a = a.reshape(a.shape[:nkeep] + (-1,)) 

3887 kwargs['axis'] = -1 

3888 else: 

3889 if keepdims: 

3890 if out is not None: 

3891 index_out = (0, ) * nd 

3892 kwargs['out'] = out[(Ellipsis, ) + index_out] 

3893 

3894 r = func(a, **kwargs) 

3895 

3896 if out is not None: 

3897 return out 

3898 

3899 if keepdims: 

3900 if axis is None: 

3901 index_r = (np.newaxis, ) * nd 

3902 else: 

3903 index_r = tuple( 

3904 np.newaxis if i in axis else slice(None) 

3905 for i in range(nd)) 

3906 r = r[(Ellipsis, ) + index_r] 

3907 

3908 return r 

3909 

3910 

3911def _median_dispatcher( 

3912 a, axis=None, out=None, overwrite_input=None, keepdims=None): 

3913 return (a, out) 

3914 

3915 

3916@array_function_dispatch(_median_dispatcher) 

3917def median(a, axis=None, out=None, overwrite_input=False, keepdims=False): 

3918 """ 

3919 Compute the median along the specified axis. 

3920 

3921 Returns the median of the array elements. 

3922 

3923 Parameters 

3924 ---------- 

3925 a : array_like 

3926 Input array or object that can be converted to an array. 

3927 axis : {int, sequence of int, None}, optional 

3928 Axis or axes along which the medians are computed. The default, 

3929 axis=None, will compute the median along a flattened version of 

3930 the array. If a sequence of axes, the array is first flattened 

3931 along the given axes, then the median is computed along the 

3932 resulting flattened axis. 

3933 out : ndarray, optional 

3934 Alternative output array in which to place the result. It must 

3935 have the same shape and buffer length as the expected output, 

3936 but the type (of the output) will be cast if necessary. 

3937 overwrite_input : bool, optional 

3938 If True, then allow use of memory of input array `a` for 

3939 calculations. The input array will be modified by the call to 

3940 `median`. This will save memory when you do not need to preserve 

3941 the contents of the input array. Treat the input as undefined, 

3942 but it will probably be fully or partially sorted. Default is 

3943 False. If `overwrite_input` is ``True`` and `a` is not already an 

3944 `ndarray`, an error will be raised. 

3945 keepdims : bool, optional 

3946 If this is set to True, the axes which are reduced are left 

3947 in the result as dimensions with size one. With this option, 

3948 the result will broadcast correctly against the original `arr`. 

3949 

3950 Returns 

3951 ------- 

3952 median : ndarray 

3953 A new array holding the result. If the input contains integers 

3954 or floats smaller than ``float64``, then the output data-type is 

3955 ``np.float64``. Otherwise, the data-type of the output is the 

3956 same as that of the input. If `out` is specified, that array is 

3957 returned instead. 

3958 

3959 See Also 

3960 -------- 

3961 mean, percentile 

3962 

3963 Notes 

3964 ----- 

3965 Given a vector ``V`` of length ``N``, the median of ``V`` is the 

3966 middle value of a sorted copy of ``V``, ``V_sorted`` - i 

3967 e., ``V_sorted[(N-1)/2]``, when ``N`` is odd, and the average of the 

3968 two middle values of ``V_sorted`` when ``N`` is even. 

3969 

3970 Examples 

3971 -------- 

3972 >>> import numpy as np 

3973 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

3974 >>> a 

3975 array([[10, 7, 4], 

3976 [ 3, 2, 1]]) 

3977 >>> np.median(a) 

3978 np.float64(3.5) 

3979 >>> np.median(a, axis=0) 

3980 array([6.5, 4.5, 2.5]) 

3981 >>> np.median(a, axis=1) 

3982 array([7., 2.]) 

3983 >>> np.median(a, axis=(0, 1)) 

3984 np.float64(3.5) 

3985 >>> m = np.median(a, axis=0) 

3986 >>> out = np.zeros_like(m) 

3987 >>> np.median(a, axis=0, out=m) 

3988 array([6.5, 4.5, 2.5]) 

3989 >>> m 

3990 array([6.5, 4.5, 2.5]) 

3991 >>> b = a.copy() 

3992 >>> np.median(b, axis=1, overwrite_input=True) 

3993 array([7., 2.]) 

3994 >>> assert not np.all(a==b) 

3995 >>> b = a.copy() 

3996 >>> np.median(b, axis=None, overwrite_input=True) 

3997 np.float64(3.5) 

3998 >>> assert not np.all(a==b) 

3999 

4000 """ 

4001 return _ureduce(a, func=_median, keepdims=keepdims, axis=axis, out=out, 

4002 overwrite_input=overwrite_input) 

4003 

4004 

4005def _median(a, axis=None, out=None, overwrite_input=False): 

4006 # can't be reasonably be implemented in terms of percentile as we have to 

4007 # call mean to not break astropy 

4008 a = np.asanyarray(a) 

4009 

4010 # Set the partition indexes 

4011 if axis is None: 

4012 sz = a.size 

4013 else: 

4014 sz = a.shape[axis] 

4015 if sz % 2 == 0: 

4016 szh = sz // 2 

4017 kth = [szh - 1, szh] 

4018 else: 

4019 kth = [(sz - 1) // 2] 

4020 

4021 # We have to check for NaNs (as of writing 'M' doesn't actually work). 

4022 supports_nans = np.issubdtype(a.dtype, np.inexact) or a.dtype.kind in 'Mm' 

4023 if supports_nans: 

4024 kth.append(-1) 

4025 

4026 if overwrite_input: 

4027 if axis is None: 

4028 part = a.ravel() 

4029 part.partition(kth) 

4030 else: 

4031 a.partition(kth, axis=axis) 

4032 part = a 

4033 else: 

4034 part = partition(a, kth, axis=axis) 

4035 

4036 if part.shape == (): 

4037 # make 0-D arrays work 

4038 return part.item() 

4039 if axis is None: 

4040 axis = 0 

4041 

4042 indexer = [slice(None)] * part.ndim 

4043 index = part.shape[axis] // 2 

4044 if part.shape[axis] % 2 == 1: 

4045 # index with slice to allow mean (below) to work 

4046 indexer[axis] = slice(index, index+1) 

4047 else: 

4048 indexer[axis] = slice(index-1, index+1) 

4049 indexer = tuple(indexer) 

4050 

4051 # Use mean in both odd and even case to coerce data type, 

4052 # using out array if needed. 

4053 rout = mean(part[indexer], axis=axis, out=out) 

4054 if supports_nans and sz > 0: 

4055 # If nans are possible, warn and replace by nans like mean would. 

4056 rout = np.lib._utils_impl._median_nancheck(part, rout, axis) 

4057 

4058 return rout 

4059 

4060 

4061def _percentile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, 

4062 method=None, keepdims=None, *, weights=None, 

4063 interpolation=None): 

4064 return (a, q, out, weights) 

4065 

4066 

4067@array_function_dispatch(_percentile_dispatcher) 

4068def percentile(a, 

4069 q, 

4070 axis=None, 

4071 out=None, 

4072 overwrite_input=False, 

4073 method="linear", 

4074 keepdims=False, 

4075 *, 

4076 weights=None, 

4077 interpolation=None): 

4078 """ 

4079 Compute the q-th percentile of the data along the specified axis. 

4080 

4081 Returns the q-th percentile(s) of the array elements. 

4082 

4083 Parameters 

4084 ---------- 

4085 a : array_like of real numbers 

4086 Input array or object that can be converted to an array. 

4087 q : array_like of float 

4088 Percentage or sequence of percentages for the percentiles to compute. 

4089 Values must be between 0 and 100 inclusive. 

4090 axis : {int, tuple of int, None}, optional 

4091 Axis or axes along which the percentiles are computed. The 

4092 default is to compute the percentile(s) along a flattened 

4093 version of the array. 

4094 out : ndarray, optional 

4095 Alternative output array in which to place the result. It must 

4096 have the same shape and buffer length as the expected output, 

4097 but the type (of the output) will be cast if necessary. 

4098 overwrite_input : bool, optional 

4099 If True, then allow the input array `a` to be modified by intermediate 

4100 calculations, to save memory. In this case, the contents of the input 

4101 `a` after this function completes is undefined. 

4102 method : str, optional 

4103 This parameter specifies the method to use for estimating the 

4104 percentile. There are many different methods, some unique to NumPy. 

4105 See the notes for explanation. The options sorted by their R type 

4106 as summarized in the H&F paper [1]_ are: 

4107 

4108 1. 'inverted_cdf' 

4109 2. 'averaged_inverted_cdf' 

4110 3. 'closest_observation' 

4111 4. 'interpolated_inverted_cdf' 

4112 5. 'hazen' 

4113 6. 'weibull' 

4114 7. 'linear' (default) 

4115 8. 'median_unbiased' 

4116 9. 'normal_unbiased' 

4117 

4118 The first three methods are discontinuous. NumPy further defines the 

4119 following discontinuous variations of the default 'linear' (7.) option: 

4120 

4121 * 'lower' 

4122 * 'higher', 

4123 * 'midpoint' 

4124 * 'nearest' 

4125 

4126 .. versionchanged:: 1.22.0 

4127 This argument was previously called "interpolation" and only 

4128 offered the "linear" default and last four options. 

4129 

4130 keepdims : bool, optional 

4131 If this is set to True, the axes which are reduced are left in 

4132 the result as dimensions with size one. With this option, the 

4133 result will broadcast correctly against the original array `a`. 

4134 

4135 weights : array_like, optional 

4136 An array of weights associated with the values in `a`. Each value in 

4137 `a` contributes to the percentile according to its associated weight. 

4138 The weights array can either be 1-D (in which case its length must be 

4139 the size of `a` along the given axis) or of the same shape as `a`. 

4140 If `weights=None`, then all data in `a` are assumed to have a 

4141 weight equal to one. 

4142 Only `method="inverted_cdf"` supports weights. 

4143 See the notes for more details. 

4144 

4145 .. versionadded:: 2.0.0 

4146 

4147 interpolation : str, optional 

4148 Deprecated name for the method keyword argument. 

4149 

4150 .. deprecated:: 1.22.0 

4151 

4152 Returns 

4153 ------- 

4154 percentile : scalar or ndarray 

4155 If `q` is a single percentile and `axis=None`, then the result 

4156 is a scalar. If multiple percentiles are given, first axis of 

4157 the result corresponds to the percentiles. The other axes are 

4158 the axes that remain after the reduction of `a`. If the input 

4159 contains integers or floats smaller than ``float64``, the output 

4160 data-type is ``float64``. Otherwise, the output data-type is the 

4161 same as that of the input. If `out` is specified, that array is 

4162 returned instead. 

4163 

4164 See Also 

4165 -------- 

4166 mean 

4167 median : equivalent to ``percentile(..., 50)`` 

4168 nanpercentile 

4169 quantile : equivalent to percentile, except q in the range [0, 1]. 

4170 

4171 Notes 

4172 ----- 

4173 The behavior of `numpy.percentile` with percentage `q` is 

4174 that of `numpy.quantile` with argument ``q/100``. 

4175 For more information, please see `numpy.quantile`. 

4176 

4177 Examples 

4178 -------- 

4179 >>> import numpy as np 

4180 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

4181 >>> a 

4182 array([[10, 7, 4], 

4183 [ 3, 2, 1]]) 

4184 >>> np.percentile(a, 50) 

4185 3.5 

4186 >>> np.percentile(a, 50, axis=0) 

4187 array([6.5, 4.5, 2.5]) 

4188 >>> np.percentile(a, 50, axis=1) 

4189 array([7., 2.]) 

4190 >>> np.percentile(a, 50, axis=1, keepdims=True) 

4191 array([[7.], 

4192 [2.]]) 

4193 

4194 >>> m = np.percentile(a, 50, axis=0) 

4195 >>> out = np.zeros_like(m) 

4196 >>> np.percentile(a, 50, axis=0, out=out) 

4197 array([6.5, 4.5, 2.5]) 

4198 >>> m 

4199 array([6.5, 4.5, 2.5]) 

4200 

4201 >>> b = a.copy() 

4202 >>> np.percentile(b, 50, axis=1, overwrite_input=True) 

4203 array([7., 2.]) 

4204 >>> assert not np.all(a == b) 

4205 

4206 The different methods can be visualized graphically: 

4207 

4208 .. plot:: 

4209 

4210 import matplotlib.pyplot as plt 

4211 

4212 a = np.arange(4) 

4213 p = np.linspace(0, 100, 6001) 

4214 ax = plt.gca() 

4215 lines = [ 

4216 ('linear', '-', 'C0'), 

4217 ('inverted_cdf', ':', 'C1'), 

4218 # Almost the same as `inverted_cdf`: 

4219 ('averaged_inverted_cdf', '-.', 'C1'), 

4220 ('closest_observation', ':', 'C2'), 

4221 ('interpolated_inverted_cdf', '--', 'C1'), 

4222 ('hazen', '--', 'C3'), 

4223 ('weibull', '-.', 'C4'), 

4224 ('median_unbiased', '--', 'C5'), 

4225 ('normal_unbiased', '-.', 'C6'), 

4226 ] 

4227 for method, style, color in lines: 

4228 ax.plot( 

4229 p, np.percentile(a, p, method=method), 

4230 label=method, linestyle=style, color=color) 

4231 ax.set( 

4232 title='Percentiles for different methods and data: ' + str(a), 

4233 xlabel='Percentile', 

4234 ylabel='Estimated percentile value', 

4235 yticks=a) 

4236 ax.legend(bbox_to_anchor=(1.03, 1)) 

4237 plt.tight_layout() 

4238 plt.show() 

4239 

4240 References 

4241 ---------- 

4242 .. [1] R. J. Hyndman and Y. Fan, 

4243 "Sample quantiles in statistical packages," 

4244 The American Statistician, 50(4), pp. 361-365, 1996 

4245 

4246 """ 

4247 if interpolation is not None: 

4248 method = _check_interpolation_as_method( 

4249 method, interpolation, "percentile") 

4250 

4251 a = np.asanyarray(a) 

4252 if a.dtype.kind == "c": 

4253 raise TypeError("a must be an array of real numbers") 

4254 

4255 # Use dtype of array if possible (e.g., if q is a python int or float) 

4256 # by making the divisor have the dtype of the data array. 

4257 q = np.true_divide(q, a.dtype.type(100) if a.dtype.kind == "f" else 100) 

4258 q = asanyarray(q) # undo any decay that the ufunc performed (see gh-13105) 

4259 if not _quantile_is_valid(q): 

4260 raise ValueError("Percentiles must be in the range [0, 100]") 

4261 

4262 if weights is not None: 

4263 if method != "inverted_cdf": 

4264 msg = ("Only method 'inverted_cdf' supports weights. " 

4265 f"Got: {method}.") 

4266 raise ValueError(msg) 

4267 if axis is not None: 

4268 axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") 

4269 weights = _weights_are_valid(weights=weights, a=a, axis=axis) 

4270 if np.any(weights < 0): 

4271 raise ValueError("Weights must be non-negative.") 

4272 

4273 return _quantile_unchecked( 

4274 a, q, axis, out, overwrite_input, method, keepdims, weights) 

4275 

4276 

4277def _quantile_dispatcher(a, q, axis=None, out=None, overwrite_input=None, 

4278 method=None, keepdims=None, *, weights=None, 

4279 interpolation=None): 

4280 return (a, q, out, weights) 

4281 

4282 

4283@array_function_dispatch(_quantile_dispatcher) 

4284def quantile(a, 

4285 q, 

4286 axis=None, 

4287 out=None, 

4288 overwrite_input=False, 

4289 method="linear", 

4290 keepdims=False, 

4291 *, 

4292 weights=None, 

4293 interpolation=None): 

4294 """ 

4295 Compute the q-th quantile of the data along the specified axis. 

4296 

4297 Parameters 

4298 ---------- 

4299 a : array_like of real numbers 

4300 Input array or object that can be converted to an array. 

4301 q : array_like of float 

4302 Probability or sequence of probabilities of the quantiles to compute. 

4303 Values must be between 0 and 1 inclusive. 

4304 axis : {int, tuple of int, None}, optional 

4305 Axis or axes along which the quantiles are computed. The default is 

4306 to compute the quantile(s) along a flattened version of the array. 

4307 out : ndarray, optional 

4308 Alternative output array in which to place the result. It must have 

4309 the same shape and buffer length as the expected output, but the 

4310 type (of the output) will be cast if necessary. 

4311 overwrite_input : bool, optional 

4312 If True, then allow the input array `a` to be modified by 

4313 intermediate calculations, to save memory. In this case, the 

4314 contents of the input `a` after this function completes is 

4315 undefined. 

4316 method : str, optional 

4317 This parameter specifies the method to use for estimating the 

4318 quantile. There are many different methods, some unique to NumPy. 

4319 The recommended options, numbered as they appear in [1]_, are: 

4320 

4321 1. 'inverted_cdf' 

4322 2. 'averaged_inverted_cdf' 

4323 3. 'closest_observation' 

4324 4. 'interpolated_inverted_cdf' 

4325 5. 'hazen' 

4326 6. 'weibull' 

4327 7. 'linear' (default) 

4328 8. 'median_unbiased' 

4329 9. 'normal_unbiased' 

4330 

4331 The first three methods are discontinuous. For backward compatibility 

4332 with previous versions of NumPy, the following discontinuous variations 

4333 of the default 'linear' (7.) option are available: 

4334 

4335 * 'lower' 

4336 * 'higher', 

4337 * 'midpoint' 

4338 * 'nearest' 

4339 

4340 See Notes for details. 

4341 

4342 .. versionchanged:: 1.22.0 

4343 This argument was previously called "interpolation" and only 

4344 offered the "linear" default and last four options. 

4345 

4346 keepdims : bool, optional 

4347 If this is set to True, the axes which are reduced are left in 

4348 the result as dimensions with size one. With this option, the 

4349 result will broadcast correctly against the original array `a`. 

4350 

4351 weights : array_like, optional 

4352 An array of weights associated with the values in `a`. Each value in 

4353 `a` contributes to the quantile according to its associated weight. 

4354 The weights array can either be 1-D (in which case its length must be 

4355 the size of `a` along the given axis) or of the same shape as `a`. 

4356 If `weights=None`, then all data in `a` are assumed to have a 

4357 weight equal to one. 

4358 Only `method="inverted_cdf"` supports weights. 

4359 See the notes for more details. 

4360 

4361 .. versionadded:: 2.0.0 

4362 

4363 interpolation : str, optional 

4364 Deprecated name for the method keyword argument. 

4365 

4366 .. deprecated:: 1.22.0 

4367 

4368 Returns 

4369 ------- 

4370 quantile : scalar or ndarray 

4371 If `q` is a single probability and `axis=None`, then the result 

4372 is a scalar. If multiple probability levels are given, first axis 

4373 of the result corresponds to the quantiles. The other axes are 

4374 the axes that remain after the reduction of `a`. If the input 

4375 contains integers or floats smaller than ``float64``, the output 

4376 data-type is ``float64``. Otherwise, the output data-type is the 

4377 same as that of the input. If `out` is specified, that array is 

4378 returned instead. 

4379 

4380 See Also 

4381 -------- 

4382 mean 

4383 percentile : equivalent to quantile, but with q in the range [0, 100]. 

4384 median : equivalent to ``quantile(..., 0.5)`` 

4385 nanquantile 

4386 

4387 Notes 

4388 ----- 

4389 Given a sample `a` from an underlying distribution, `quantile` provides a 

4390 nonparametric estimate of the inverse cumulative distribution function. 

4391 

4392 By default, this is done by interpolating between adjacent elements in 

4393 ``y``, a sorted copy of `a`:: 

4394 

4395 (1-g)*y[j] + g*y[j+1] 

4396 

4397 where the index ``j`` and coefficient ``g`` are the integral and 

4398 fractional components of ``q * (n-1)``, and ``n`` is the number of 

4399 elements in the sample. 

4400 

4401 This is a special case of Equation 1 of H&F [1]_. More generally, 

4402 

4403 - ``j = (q*n + m - 1) // 1``, and 

4404 - ``g = (q*n + m - 1) % 1``, 

4405 

4406 where ``m`` may be defined according to several different conventions. 

4407 The preferred convention may be selected using the ``method`` parameter: 

4408 

4409 =============================== =============== =============== 

4410 ``method`` number in H&F ``m`` 

4411 =============================== =============== =============== 

4412 ``interpolated_inverted_cdf`` 4 ``0`` 

4413 ``hazen`` 5 ``1/2`` 

4414 ``weibull`` 6 ``q`` 

4415 ``linear`` (default) 7 ``1 - q`` 

4416 ``median_unbiased`` 8 ``q/3 + 1/3`` 

4417 ``normal_unbiased`` 9 ``q/4 + 3/8`` 

4418 =============================== =============== =============== 

4419 

4420 Note that indices ``j`` and ``j + 1`` are clipped to the range ``0`` to 

4421 ``n - 1`` when the results of the formula would be outside the allowed 

4422 range of non-negative indices. The ``- 1`` in the formulas for ``j`` and 

4423 ``g`` accounts for Python's 0-based indexing. 

4424 

4425 The table above includes only the estimators from H&F that are continuous 

4426 functions of probability `q` (estimators 4-9). NumPy also provides the 

4427 three discontinuous estimators from H&F (estimators 1-3), where ``j`` is 

4428 defined as above, ``m`` is defined as follows, and ``g`` is a function 

4429 of the real-valued ``index = q*n + m - 1`` and ``j``. 

4430 

4431 1. ``inverted_cdf``: ``m = 0`` and ``g = int(index - j > 0)`` 

4432 2. ``averaged_inverted_cdf``: ``m = 0`` and 

4433 ``g = (1 + int(index - j > 0)) / 2`` 

4434 3. ``closest_observation``: ``m = -1/2`` and 

4435 ``g = 1 - int((index == j) & (j%2 == 1))`` 

4436 

4437 For backward compatibility with previous versions of NumPy, `quantile` 

4438 provides four additional discontinuous estimators. Like 

4439 ``method='linear'``, all have ``m = 1 - q`` so that ``j = q*(n-1) // 1``, 

4440 but ``g`` is defined as follows. 

4441 

4442 - ``lower``: ``g = 0`` 

4443 - ``midpoint``: ``g = 0.5`` 

4444 - ``higher``: ``g = 1`` 

4445 - ``nearest``: ``g = (q*(n-1) % 1) > 0.5`` 

4446 

4447 **Weighted quantiles:** 

4448 More formally, the quantile at probability level :math:`q` of a cumulative 

4449 distribution function :math:`F(y)=P(Y \\leq y)` with probability measure 

4450 :math:`P` is defined as any number :math:`x` that fulfills the 

4451 *coverage conditions* 

4452 

4453 .. math:: P(Y < x) \\leq q \\quad\\text{and}\\quad P(Y \\leq x) \\geq q 

4454 

4455 with random variable :math:`Y\\sim P`. 

4456 Sample quantiles, the result of `quantile`, provide nonparametric 

4457 estimation of the underlying population counterparts, represented by the 

4458 unknown :math:`F`, given a data vector `a` of length ``n``. 

4459 

4460 Some of the estimators above arise when one considers :math:`F` as the 

4461 empirical distribution function of the data, i.e. 

4462 :math:`F(y) = \\frac{1}{n} \\sum_i 1_{a_i \\leq y}`. 

4463 Then, different methods correspond to different choices of :math:`x` that 

4464 fulfill the above coverage conditions. Methods that follow this approach 

4465 are ``inverted_cdf`` and ``averaged_inverted_cdf``. 

4466 

4467 For weighted quantiles, the coverage conditions still hold. The 

4468 empirical cumulative distribution is simply replaced by its weighted 

4469 version, i.e. 

4470 :math:`P(Y \\leq t) = \\frac{1}{\\sum_i w_i} \\sum_i w_i 1_{x_i \\leq t}`. 

4471 Only ``method="inverted_cdf"`` supports weights. 

4472 

4473 Examples 

4474 -------- 

4475 >>> import numpy as np 

4476 >>> a = np.array([[10, 7, 4], [3, 2, 1]]) 

4477 >>> a 

4478 array([[10, 7, 4], 

4479 [ 3, 2, 1]]) 

4480 >>> np.quantile(a, 0.5) 

4481 3.5 

4482 >>> np.quantile(a, 0.5, axis=0) 

4483 array([6.5, 4.5, 2.5]) 

4484 >>> np.quantile(a, 0.5, axis=1) 

4485 array([7., 2.]) 

4486 >>> np.quantile(a, 0.5, axis=1, keepdims=True) 

4487 array([[7.], 

4488 [2.]]) 

4489 >>> m = np.quantile(a, 0.5, axis=0) 

4490 >>> out = np.zeros_like(m) 

4491 >>> np.quantile(a, 0.5, axis=0, out=out) 

4492 array([6.5, 4.5, 2.5]) 

4493 >>> m 

4494 array([6.5, 4.5, 2.5]) 

4495 >>> b = a.copy() 

4496 >>> np.quantile(b, 0.5, axis=1, overwrite_input=True) 

4497 array([7., 2.]) 

4498 >>> assert not np.all(a == b) 

4499 

4500 See also `numpy.percentile` for a visualization of most methods. 

4501 

4502 References 

4503 ---------- 

4504 .. [1] R. J. Hyndman and Y. Fan, 

4505 "Sample quantiles in statistical packages," 

4506 The American Statistician, 50(4), pp. 361-365, 1996 

4507 

4508 """ 

4509 if interpolation is not None: 

4510 method = _check_interpolation_as_method( 

4511 method, interpolation, "quantile") 

4512 

4513 a = np.asanyarray(a) 

4514 if a.dtype.kind == "c": 

4515 raise TypeError("a must be an array of real numbers") 

4516 

4517 # Use dtype of array if possible (e.g., if q is a python int or float). 

4518 if isinstance(q, (int, float)) and a.dtype.kind == "f": 

4519 q = np.asanyarray(q, dtype=a.dtype) 

4520 else: 

4521 q = np.asanyarray(q) 

4522 

4523 if not _quantile_is_valid(q): 

4524 raise ValueError("Quantiles must be in the range [0, 1]") 

4525 

4526 if weights is not None: 

4527 if method != "inverted_cdf": 

4528 msg = ("Only method 'inverted_cdf' supports weights. " 

4529 f"Got: {method}.") 

4530 raise ValueError(msg) 

4531 if axis is not None: 

4532 axis = _nx.normalize_axis_tuple(axis, a.ndim, argname="axis") 

4533 weights = _weights_are_valid(weights=weights, a=a, axis=axis) 

4534 if np.any(weights < 0): 

4535 raise ValueError("Weights must be non-negative.") 

4536 

4537 return _quantile_unchecked( 

4538 a, q, axis, out, overwrite_input, method, keepdims, weights) 

4539 

4540 

4541def _quantile_unchecked(a, 

4542 q, 

4543 axis=None, 

4544 out=None, 

4545 overwrite_input=False, 

4546 method="linear", 

4547 keepdims=False, 

4548 weights=None): 

4549 """Assumes that q is in [0, 1], and is an ndarray""" 

4550 return _ureduce(a, 

4551 func=_quantile_ureduce_func, 

4552 q=q, 

4553 weights=weights, 

4554 keepdims=keepdims, 

4555 axis=axis, 

4556 out=out, 

4557 overwrite_input=overwrite_input, 

4558 method=method) 

4559 

4560 

4561def _quantile_is_valid(q): 

4562 # avoid expensive reductions, relevant for arrays with < O(1000) elements 

4563 if q.ndim == 1 and q.size < 10: 

4564 for i in range(q.size): 

4565 if not (0.0 <= q[i] <= 1.0): 

4566 return False 

4567 else: 

4568 if not (q.min() >= 0 and q.max() <= 1): 

4569 return False 

4570 return True 

4571 

4572 

4573def _check_interpolation_as_method(method, interpolation, fname): 

4574 # Deprecated NumPy 1.22, 2021-11-08 

4575 warnings.warn( 

4576 f"the `interpolation=` argument to {fname} was renamed to " 

4577 "`method=`, which has additional options.\n" 

4578 "Users of the modes 'nearest', 'lower', 'higher', or " 

4579 "'midpoint' are encouraged to review the method they used. " 

4580 "(Deprecated NumPy 1.22)", 

4581 DeprecationWarning, stacklevel=4) 

4582 if method != "linear": 

4583 # sanity check, we assume this basically never happens 

4584 raise TypeError( 

4585 "You shall not pass both `method` and `interpolation`!\n" 

4586 "(`interpolation` is Deprecated in favor of `method`)") 

4587 return interpolation 

4588 

4589 

4590def _compute_virtual_index(n, quantiles, alpha: float, beta: float): 

4591 """ 

4592 Compute the floating point indexes of an array for the linear 

4593 interpolation of quantiles. 

4594 n : array_like 

4595 The sample sizes. 

4596 quantiles : array_like 

4597 The quantiles values. 

4598 alpha : float 

4599 A constant used to correct the index computed. 

4600 beta : float 

4601 A constant used to correct the index computed. 

4602 

4603 alpha and beta values depend on the chosen method 

4604 (see quantile documentation) 

4605 

4606 Reference: 

4607 Hyndman&Fan paper "Sample Quantiles in Statistical Packages", 

4608 DOI: 10.1080/00031305.1996.10473566 

4609 """ 

4610 return n * quantiles + ( 

4611 alpha + quantiles * (1 - alpha - beta) 

4612 ) - 1 

4613 

4614 

4615def _get_gamma(virtual_indexes, previous_indexes, method): 

4616 """ 

4617 Compute gamma (a.k.a 'm' or 'weight') for the linear interpolation 

4618 of quantiles. 

4619 

4620 virtual_indexes : array_like 

4621 The indexes where the percentile is supposed to be found in the sorted 

4622 sample. 

4623 previous_indexes : array_like 

4624 The floor values of virtual_indexes. 

4625 interpolation : dict 

4626 The interpolation method chosen, which may have a specific rule 

4627 modifying gamma. 

4628 

4629 gamma is usually the fractional part of virtual_indexes but can be modified 

4630 by the interpolation method. 

4631 """ 

4632 gamma = np.asanyarray(virtual_indexes - previous_indexes) 

4633 gamma = method["fix_gamma"](gamma, virtual_indexes) 

4634 # Ensure both that we have an array, and that we keep the dtype 

4635 # (which may have been matched to the input array). 

4636 return np.asanyarray(gamma, dtype=virtual_indexes.dtype) 

4637 

4638 

4639def _lerp(a, b, t, out=None): 

4640 """ 

4641 Compute the linear interpolation weighted by gamma on each point of 

4642 two same shape array. 

4643 

4644 a : array_like 

4645 Left bound. 

4646 b : array_like 

4647 Right bound. 

4648 t : array_like 

4649 The interpolation weight. 

4650 out : array_like 

4651 Output array. 

4652 """ 

4653 diff_b_a = subtract(b, a) 

4654 # asanyarray is a stop-gap until gh-13105 

4655 lerp_interpolation = asanyarray(add(a, diff_b_a * t, out=out)) 

4656 subtract(b, diff_b_a * (1 - t), out=lerp_interpolation, where=t >= 0.5, 

4657 casting='unsafe', dtype=type(lerp_interpolation.dtype)) 

4658 if lerp_interpolation.ndim == 0 and out is None: 

4659 lerp_interpolation = lerp_interpolation[()] # unpack 0d arrays 

4660 return lerp_interpolation 

4661 

4662 

4663def _get_gamma_mask(shape, default_value, conditioned_value, where): 

4664 out = np.full(shape, default_value) 

4665 np.copyto(out, conditioned_value, where=where, casting="unsafe") 

4666 return out 

4667 

4668 

4669def _discrete_interpolation_to_boundaries(index, gamma_condition_fun): 

4670 previous = np.floor(index) 

4671 next = previous + 1 

4672 gamma = index - previous 

4673 res = _get_gamma_mask(shape=index.shape, 

4674 default_value=next, 

4675 conditioned_value=previous, 

4676 where=gamma_condition_fun(gamma, index) 

4677 ).astype(np.intp) 

4678 # Some methods can lead to out-of-bound integers, clip them: 

4679 res[res < 0] = 0 

4680 return res 

4681 

4682 

4683def _closest_observation(n, quantiles): 

4684 # "choose the nearest even order statistic at g=0" (H&F (1996) pp. 362). 

4685 # Order is 1-based so for zero-based indexing round to nearest odd index. 

4686 gamma_fun = lambda gamma, index: (gamma == 0) & (np.floor(index) % 2 == 1) 

4687 return _discrete_interpolation_to_boundaries((n * quantiles) - 1 - 0.5, 

4688 gamma_fun) 

4689 

4690 

4691def _inverted_cdf(n, quantiles): 

4692 gamma_fun = lambda gamma, _: (gamma == 0) 

4693 return _discrete_interpolation_to_boundaries((n * quantiles) - 1, 

4694 gamma_fun) 

4695 

4696 

4697def _quantile_ureduce_func( 

4698 a: np.array, 

4699 q: np.array, 

4700 weights: np.array, 

4701 axis: int | None = None, 

4702 out=None, 

4703 overwrite_input: bool = False, 

4704 method="linear", 

4705) -> np.array: 

4706 if q.ndim > 2: 

4707 # The code below works fine for nd, but it might not have useful 

4708 # semantics. For now, keep the supported dimensions the same as it was 

4709 # before. 

4710 raise ValueError("q must be a scalar or 1d") 

4711 if overwrite_input: 

4712 if axis is None: 

4713 axis = 0 

4714 arr = a.ravel() 

4715 wgt = None if weights is None else weights.ravel() 

4716 else: 

4717 arr = a 

4718 wgt = weights 

4719 else: 

4720 if axis is None: 

4721 axis = 0 

4722 arr = a.flatten() 

4723 wgt = None if weights is None else weights.flatten() 

4724 else: 

4725 arr = a.copy() 

4726 wgt = weights 

4727 result = _quantile(arr, 

4728 quantiles=q, 

4729 axis=axis, 

4730 method=method, 

4731 out=out, 

4732 weights=wgt) 

4733 return result 

4734 

4735 

4736def _get_indexes(arr, virtual_indexes, valid_values_count): 

4737 """ 

4738 Get the valid indexes of arr neighbouring virtual_indexes. 

4739 Note 

4740 This is a companion function to linear interpolation of 

4741 Quantiles 

4742 

4743 Returns 

4744 ------- 

4745 (previous_indexes, next_indexes): Tuple 

4746 A Tuple of virtual_indexes neighbouring indexes 

4747 """ 

4748 previous_indexes = np.asanyarray(np.floor(virtual_indexes)) 

4749 next_indexes = np.asanyarray(previous_indexes + 1) 

4750 indexes_above_bounds = virtual_indexes >= valid_values_count - 1 

4751 # When indexes is above max index, take the max value of the array 

4752 if indexes_above_bounds.any(): 

4753 previous_indexes[indexes_above_bounds] = -1 

4754 next_indexes[indexes_above_bounds] = -1 

4755 # When indexes is below min index, take the min value of the array 

4756 indexes_below_bounds = virtual_indexes < 0 

4757 if indexes_below_bounds.any(): 

4758 previous_indexes[indexes_below_bounds] = 0 

4759 next_indexes[indexes_below_bounds] = 0 

4760 if np.issubdtype(arr.dtype, np.inexact): 

4761 # After the sort, slices having NaNs will have for last element a NaN 

4762 virtual_indexes_nans = np.isnan(virtual_indexes) 

4763 if virtual_indexes_nans.any(): 

4764 previous_indexes[virtual_indexes_nans] = -1 

4765 next_indexes[virtual_indexes_nans] = -1 

4766 previous_indexes = previous_indexes.astype(np.intp) 

4767 next_indexes = next_indexes.astype(np.intp) 

4768 return previous_indexes, next_indexes 

4769 

4770 

4771def _quantile( 

4772 arr: np.array, 

4773 quantiles: np.array, 

4774 axis: int = -1, 

4775 method="linear", 

4776 out=None, 

4777 weights=None, 

4778): 

4779 """ 

4780 Private function that doesn't support extended axis or keepdims. 

4781 These methods are extended to this function using _ureduce 

4782 See nanpercentile for parameter usage 

4783 It computes the quantiles of the array for the given axis. 

4784 A linear interpolation is performed based on the `interpolation`. 

4785 

4786 By default, the method is "linear" where alpha == beta == 1 which 

4787 performs the 7th method of Hyndman&Fan. 

4788 With "median_unbiased" we get alpha == beta == 1/3 

4789 thus the 8th method of Hyndman&Fan. 

4790 """ 

4791 # --- Setup 

4792 arr = np.asanyarray(arr) 

4793 values_count = arr.shape[axis] 

4794 # The dimensions of `q` are prepended to the output shape, so we need the 

4795 # axis being sampled from `arr` to be last. 

4796 if axis != 0: # But moveaxis is slow, so only call it if necessary. 

4797 arr = np.moveaxis(arr, axis, destination=0) 

4798 supports_nans = ( 

4799 np.issubdtype(arr.dtype, np.inexact) or arr.dtype.kind in 'Mm' 

4800 ) 

4801 

4802 if weights is None: 

4803 # --- Computation of indexes 

4804 # Index where to find the value in the sorted array. 

4805 # Virtual because it is a floating point value, not an valid index. 

4806 # The nearest neighbours are used for interpolation 

4807 try: 

4808 method_props = _QuantileMethods[method] 

4809 except KeyError: 

4810 raise ValueError( 

4811 f"{method!r} is not a valid method. Use one of: " 

4812 f"{_QuantileMethods.keys()}") from None 

4813 virtual_indexes = method_props["get_virtual_index"](values_count, 

4814 quantiles) 

4815 virtual_indexes = np.asanyarray(virtual_indexes) 

4816 

4817 if method_props["fix_gamma"] is None: 

4818 supports_integers = True 

4819 else: 

4820 int_virtual_indices = np.issubdtype(virtual_indexes.dtype, 

4821 np.integer) 

4822 supports_integers = method == 'linear' and int_virtual_indices 

4823 

4824 if supports_integers: 

4825 # No interpolation needed, take the points along axis 

4826 if supports_nans: 

4827 # may contain nan, which would sort to the end 

4828 arr.partition( 

4829 concatenate((virtual_indexes.ravel(), [-1])), axis=0, 

4830 ) 

4831 slices_having_nans = np.isnan(arr[-1, ...]) 

4832 else: 

4833 # cannot contain nan 

4834 arr.partition(virtual_indexes.ravel(), axis=0) 

4835 slices_having_nans = np.array(False, dtype=bool) 

4836 result = take(arr, virtual_indexes, axis=0, out=out) 

4837 else: 

4838 previous_indexes, next_indexes = _get_indexes(arr, 

4839 virtual_indexes, 

4840 values_count) 

4841 # --- Sorting 

4842 arr.partition( 

4843 np.unique(np.concatenate(([0, -1], 

4844 previous_indexes.ravel(), 

4845 next_indexes.ravel(), 

4846 ))), 

4847 axis=0) 

4848 if supports_nans: 

4849 slices_having_nans = np.isnan(arr[-1, ...]) 

4850 else: 

4851 slices_having_nans = None 

4852 # --- Get values from indexes 

4853 previous = arr[previous_indexes] 

4854 next = arr[next_indexes] 

4855 # --- Linear interpolation 

4856 gamma = _get_gamma(virtual_indexes, previous_indexes, method_props) 

4857 result_shape = virtual_indexes.shape + (1,) * (arr.ndim - 1) 

4858 gamma = gamma.reshape(result_shape) 

4859 result = _lerp(previous, 

4860 next, 

4861 gamma, 

4862 out=out) 

4863 else: 

4864 # Weighted case 

4865 # This implements method="inverted_cdf", the only supported weighted 

4866 # method, which needs to sort anyway. 

4867 weights = np.asanyarray(weights) 

4868 if axis != 0: 

4869 weights = np.moveaxis(weights, axis, destination=0) 

4870 index_array = np.argsort(arr, axis=0, kind="stable") 

4871 

4872 # arr = arr[index_array, ...] # but this adds trailing dimensions of 

4873 # 1. 

4874 arr = np.take_along_axis(arr, index_array, axis=0) 

4875 if weights.shape == arr.shape: 

4876 weights = np.take_along_axis(weights, index_array, axis=0) 

4877 else: 

4878 # weights is 1d 

4879 weights = weights.reshape(-1)[index_array, ...] 

4880 

4881 if supports_nans: 

4882 # may contain nan, which would sort to the end 

4883 slices_having_nans = np.isnan(arr[-1, ...]) 

4884 else: 

4885 # cannot contain nan 

4886 slices_having_nans = np.array(False, dtype=bool) 

4887 

4888 # We use the weights to calculate the empirical cumulative 

4889 # distribution function cdf 

4890 cdf = weights.cumsum(axis=0, dtype=np.float64) 

4891 cdf /= cdf[-1, ...] # normalization to 1 

4892 # Search index i such that 

4893 # sum(weights[j], j=0..i-1) < quantile <= sum(weights[j], j=0..i) 

4894 # is then equivalent to 

4895 # cdf[i-1] < quantile <= cdf[i] 

4896 # Unfortunately, searchsorted only accepts 1-d arrays as first 

4897 # argument, so we will need to iterate over dimensions. 

4898 

4899 # Without the following cast, searchsorted can return surprising 

4900 # results, e.g. 

4901 # np.searchsorted(np.array([0.2, 0.4, 0.6, 0.8, 1.]), 

4902 # np.array(0.4, dtype=np.float32), side="left") 

4903 # returns 2 instead of 1 because 0.4 is not binary representable. 

4904 if quantiles.dtype.kind == "f": 

4905 cdf = cdf.astype(quantiles.dtype) 

4906 # Weights must be non-negative, so we might have zero weights at the 

4907 # beginning leading to some leading zeros in cdf. The call to 

4908 # np.searchsorted for quantiles=0 will then pick the first element, 

4909 # but should pick the first one larger than zero. We 

4910 # therefore simply set 0 values in cdf to -1. 

4911 if np.any(cdf[0, ...] == 0): 

4912 cdf[cdf == 0] = -1 

4913 

4914 def find_cdf_1d(arr, cdf): 

4915 indices = np.searchsorted(cdf, quantiles, side="left") 

4916 # We might have reached the maximum with i = len(arr), e.g. for 

4917 # quantiles = 1, and need to cut it to len(arr) - 1. 

4918 indices = minimum(indices, values_count - 1) 

4919 result = take(arr, indices, axis=0) 

4920 return result 

4921 

4922 r_shape = arr.shape[1:] 

4923 if quantiles.ndim > 0: 

4924 r_shape = quantiles.shape + r_shape 

4925 if out is None: 

4926 result = np.empty_like(arr, shape=r_shape) 

4927 else: 

4928 if out.shape != r_shape: 

4929 msg = (f"Wrong shape of argument 'out', shape={r_shape} is " 

4930 f"required; got shape={out.shape}.") 

4931 raise ValueError(msg) 

4932 result = out 

4933 

4934 # See apply_along_axis, which we do for axis=0. Note that Ni = (,) 

4935 # always, so we remove it here. 

4936 Nk = arr.shape[1:] 

4937 for kk in np.ndindex(Nk): 

4938 result[(...,) + kk] = find_cdf_1d( 

4939 arr[np.s_[:, ] + kk], cdf[np.s_[:, ] + kk] 

4940 ) 

4941 

4942 # Make result the same as in unweighted inverted_cdf. 

4943 if result.shape == () and result.dtype == np.dtype("O"): 

4944 result = result.item() 

4945 

4946 if np.any(slices_having_nans): 

4947 if result.ndim == 0 and out is None: 

4948 # can't write to a scalar, but indexing will be correct 

4949 result = arr[-1] 

4950 else: 

4951 np.copyto(result, arr[-1, ...], where=slices_having_nans) 

4952 return result 

4953 

4954 

4955def _trapezoid_dispatcher(y, x=None, dx=None, axis=None): 

4956 return (y, x) 

4957 

4958 

4959@array_function_dispatch(_trapezoid_dispatcher) 

4960def trapezoid(y, x=None, dx=1.0, axis=-1): 

4961 r""" 

4962 Integrate along the given axis using the composite trapezoidal rule. 

4963 

4964 If `x` is provided, the integration happens in sequence along its 

4965 elements - they are not sorted. 

4966 

4967 Integrate `y` (`x`) along each 1d slice on the given axis, compute 

4968 :math:`\int y(x) dx`. 

4969 When `x` is specified, this integrates along the parametric curve, 

4970 computing :math:`\int_t y(t) dt = 

4971 \int_t y(t) \left.\frac{dx}{dt}\right|_{x=x(t)} dt`. 

4972 

4973 .. versionadded:: 2.0.0 

4974 

4975 Parameters 

4976 ---------- 

4977 y : array_like 

4978 Input array to integrate. 

4979 x : array_like, optional 

4980 The sample points corresponding to the `y` values. If `x` is None, 

4981 the sample points are assumed to be evenly spaced `dx` apart. The 

4982 default is None. 

4983 dx : scalar, optional 

4984 The spacing between sample points when `x` is None. The default is 1. 

4985 axis : int, optional 

4986 The axis along which to integrate. 

4987 

4988 Returns 

4989 ------- 

4990 trapezoid : float or ndarray 

4991 Definite integral of `y` = n-dimensional array as approximated along 

4992 a single axis by the trapezoidal rule. If `y` is a 1-dimensional array, 

4993 then the result is a float. If `n` is greater than 1, then the result 

4994 is an `n`-1 dimensional array. 

4995 

4996 See Also 

4997 -------- 

4998 sum, cumsum 

4999 

5000 Notes 

5001 ----- 

5002 Image [2]_ illustrates trapezoidal rule -- y-axis locations of points 

5003 will be taken from `y` array, by default x-axis distances between 

5004 points will be 1.0, alternatively they can be provided with `x` array 

5005 or with `dx` scalar. Return value will be equal to combined area under 

5006 the red lines. 

5007 

5008 

5009 References 

5010 ---------- 

5011 .. [1] Wikipedia page: https://en.wikipedia.org/wiki/Trapezoidal_rule 

5012 

5013 .. [2] Illustration image: 

5014 https://en.wikipedia.org/wiki/File:Composite_trapezoidal_rule_illustration.png 

5015 

5016 Examples 

5017 -------- 

5018 >>> import numpy as np 

5019 

5020 Use the trapezoidal rule on evenly spaced points: 

5021 

5022 >>> np.trapezoid([1, 2, 3]) 

5023 4.0 

5024 

5025 The spacing between sample points can be selected by either the 

5026 ``x`` or ``dx`` arguments: 

5027 

5028 >>> np.trapezoid([1, 2, 3], x=[4, 6, 8]) 

5029 8.0 

5030 >>> np.trapezoid([1, 2, 3], dx=2) 

5031 8.0 

5032 

5033 Using a decreasing ``x`` corresponds to integrating in reverse: 

5034 

5035 >>> np.trapezoid([1, 2, 3], x=[8, 6, 4]) 

5036 -8.0 

5037 

5038 More generally ``x`` is used to integrate along a parametric curve. We can 

5039 estimate the integral :math:`\int_0^1 x^2 = 1/3` using: 

5040 

5041 >>> x = np.linspace(0, 1, num=50) 

5042 >>> y = x**2 

5043 >>> np.trapezoid(y, x) 

5044 0.33340274885464394 

5045 

5046 Or estimate the area of a circle, noting we repeat the sample which closes 

5047 the curve: 

5048 

5049 >>> theta = np.linspace(0, 2 * np.pi, num=1000, endpoint=True) 

5050 >>> np.trapezoid(np.cos(theta), x=np.sin(theta)) 

5051 3.141571941375841 

5052 

5053 ``np.trapezoid`` can be applied along a specified axis to do multiple 

5054 computations in one call: 

5055 

5056 >>> a = np.arange(6).reshape(2, 3) 

5057 >>> a 

5058 array([[0, 1, 2], 

5059 [3, 4, 5]]) 

5060 >>> np.trapezoid(a, axis=0) 

5061 array([1.5, 2.5, 3.5]) 

5062 >>> np.trapezoid(a, axis=1) 

5063 array([2., 8.]) 

5064 """ 

5065 

5066 y = asanyarray(y) 

5067 if x is None: 

5068 d = dx 

5069 else: 

5070 x = asanyarray(x) 

5071 if x.ndim == 1: 

5072 d = diff(x) 

5073 # reshape to correct shape 

5074 shape = [1]*y.ndim 

5075 shape[axis] = d.shape[0] 

5076 d = d.reshape(shape) 

5077 else: 

5078 d = diff(x, axis=axis) 

5079 nd = y.ndim 

5080 slice1 = [slice(None)]*nd 

5081 slice2 = [slice(None)]*nd 

5082 slice1[axis] = slice(1, None) 

5083 slice2[axis] = slice(None, -1) 

5084 try: 

5085 ret = (d * (y[tuple(slice1)] + y[tuple(slice2)]) / 2.0).sum(axis) 

5086 except ValueError: 

5087 # Operations didn't work, cast to ndarray 

5088 d = np.asarray(d) 

5089 y = np.asarray(y) 

5090 ret = add.reduce(d * (y[tuple(slice1)]+y[tuple(slice2)])/2.0, axis) 

5091 return ret 

5092 

5093 

5094@set_module('numpy') 

5095def trapz(y, x=None, dx=1.0, axis=-1): 

5096 """ 

5097 `trapz` is deprecated in NumPy 2.0. 

5098 

5099 Please use `trapezoid` instead, or one of the numerical integration 

5100 functions in `scipy.integrate`. 

5101 """ 

5102 # Deprecated in NumPy 2.0, 2023-08-18 

5103 warnings.warn( 

5104 "`trapz` is deprecated. Use `trapezoid` instead, or one of the " 

5105 "numerical integration functions in `scipy.integrate`.", 

5106 DeprecationWarning, 

5107 stacklevel=2 

5108 ) 

5109 return trapezoid(y, x=x, dx=dx, axis=axis) 

5110 

5111 

5112def _meshgrid_dispatcher(*xi, copy=None, sparse=None, indexing=None): 

5113 return xi 

5114 

5115 

5116# Based on scitools meshgrid 

5117@array_function_dispatch(_meshgrid_dispatcher) 

5118def meshgrid(*xi, copy=True, sparse=False, indexing='xy'): 

5119 """ 

5120 Return a tuple of coordinate matrices from coordinate vectors. 

5121 

5122 Make N-D coordinate arrays for vectorized evaluations of 

5123 N-D scalar/vector fields over N-D grids, given 

5124 one-dimensional coordinate arrays x1, x2,..., xn. 

5125 

5126 Parameters 

5127 ---------- 

5128 x1, x2,..., xn : array_like 

5129 1-D arrays representing the coordinates of a grid. 

5130 indexing : {'xy', 'ij'}, optional 

5131 Cartesian ('xy', default) or matrix ('ij') indexing of output. 

5132 See Notes for more details. 

5133 sparse : bool, optional 

5134 If True the shape of the returned coordinate array for dimension *i* 

5135 is reduced from ``(N1, ..., Ni, ... Nn)`` to 

5136 ``(1, ..., 1, Ni, 1, ..., 1)``. These sparse coordinate grids are 

5137 intended to be use with :ref:`basics.broadcasting`. When all 

5138 coordinates are used in an expression, broadcasting still leads to a 

5139 fully-dimensonal result array. 

5140 

5141 Default is False. 

5142 

5143 copy : bool, optional 

5144 If False, a view into the original arrays are returned in order to 

5145 conserve memory. Default is True. Please note that 

5146 ``sparse=False, copy=False`` will likely return non-contiguous 

5147 arrays. Furthermore, more than one element of a broadcast array 

5148 may refer to a single memory location. If you need to write to the 

5149 arrays, make copies first. 

5150 

5151 Returns 

5152 ------- 

5153 X1, X2,..., XN : tuple of ndarrays 

5154 For vectors `x1`, `x2`,..., `xn` with lengths ``Ni=len(xi)``, 

5155 returns ``(N1, N2, N3,..., Nn)`` shaped arrays if indexing='ij' 

5156 or ``(N2, N1, N3,..., Nn)`` shaped arrays if indexing='xy' 

5157 with the elements of `xi` repeated to fill the matrix along 

5158 the first dimension for `x1`, the second for `x2` and so on. 

5159 

5160 Notes 

5161 ----- 

5162 This function supports both indexing conventions through the indexing 

5163 keyword argument. Giving the string 'ij' returns a meshgrid with 

5164 matrix indexing, while 'xy' returns a meshgrid with Cartesian indexing. 

5165 In the 2-D case with inputs of length M and N, the outputs are of shape 

5166 (N, M) for 'xy' indexing and (M, N) for 'ij' indexing. In the 3-D case 

5167 with inputs of length M, N and P, outputs are of shape (N, M, P) for 

5168 'xy' indexing and (M, N, P) for 'ij' indexing. The difference is 

5169 illustrated by the following code snippet:: 

5170 

5171 xv, yv = np.meshgrid(x, y, indexing='ij') 

5172 for i in range(nx): 

5173 for j in range(ny): 

5174 # treat xv[i,j], yv[i,j] 

5175 

5176 xv, yv = np.meshgrid(x, y, indexing='xy') 

5177 for i in range(nx): 

5178 for j in range(ny): 

5179 # treat xv[j,i], yv[j,i] 

5180 

5181 In the 1-D and 0-D case, the indexing and sparse keywords have no effect. 

5182 

5183 See Also 

5184 -------- 

5185 mgrid : Construct a multi-dimensional "meshgrid" using indexing notation. 

5186 ogrid : Construct an open multi-dimensional "meshgrid" using indexing 

5187 notation. 

5188 :ref:`how-to-index` 

5189 

5190 Examples 

5191 -------- 

5192 >>> import numpy as np 

5193 >>> nx, ny = (3, 2) 

5194 >>> x = np.linspace(0, 1, nx) 

5195 >>> y = np.linspace(0, 1, ny) 

5196 >>> xv, yv = np.meshgrid(x, y) 

5197 >>> xv 

5198 array([[0. , 0.5, 1. ], 

5199 [0. , 0.5, 1. ]]) 

5200 >>> yv 

5201 array([[0., 0., 0.], 

5202 [1., 1., 1.]]) 

5203 

5204 The result of `meshgrid` is a coordinate grid: 

5205 

5206 >>> import matplotlib.pyplot as plt 

5207 >>> plt.plot(xv, yv, marker='o', color='k', linestyle='none') 

5208 >>> plt.show() 

5209 

5210 You can create sparse output arrays to save memory and computation time. 

5211 

5212 >>> xv, yv = np.meshgrid(x, y, sparse=True) 

5213 >>> xv 

5214 array([[0. , 0.5, 1. ]]) 

5215 >>> yv 

5216 array([[0.], 

5217 [1.]]) 

5218 

5219 `meshgrid` is very useful to evaluate functions on a grid. If the 

5220 function depends on all coordinates, both dense and sparse outputs can be 

5221 used. 

5222 

5223 >>> x = np.linspace(-5, 5, 101) 

5224 >>> y = np.linspace(-5, 5, 101) 

5225 >>> # full coordinate arrays 

5226 >>> xx, yy = np.meshgrid(x, y) 

5227 >>> zz = np.sqrt(xx**2 + yy**2) 

5228 >>> xx.shape, yy.shape, zz.shape 

5229 ((101, 101), (101, 101), (101, 101)) 

5230 >>> # sparse coordinate arrays 

5231 >>> xs, ys = np.meshgrid(x, y, sparse=True) 

5232 >>> zs = np.sqrt(xs**2 + ys**2) 

5233 >>> xs.shape, ys.shape, zs.shape 

5234 ((1, 101), (101, 1), (101, 101)) 

5235 >>> np.array_equal(zz, zs) 

5236 True 

5237 

5238 >>> h = plt.contourf(x, y, zs) 

5239 >>> plt.axis('scaled') 

5240 >>> plt.colorbar() 

5241 >>> plt.show() 

5242 """ 

5243 ndim = len(xi) 

5244 

5245 if indexing not in ['xy', 'ij']: 

5246 raise ValueError( 

5247 "Valid values for `indexing` are 'xy' and 'ij'.") 

5248 

5249 s0 = (1,) * ndim 

5250 output = [np.asanyarray(x).reshape(s0[:i] + (-1,) + s0[i + 1:]) 

5251 for i, x in enumerate(xi)] 

5252 

5253 if indexing == 'xy' and ndim > 1: 

5254 # switch first and second axis 

5255 output[0].shape = (1, -1) + s0[2:] 

5256 output[1].shape = (-1, 1) + s0[2:] 

5257 

5258 if not sparse: 

5259 # Return the full N-D matrix (not only the 1-D vector) 

5260 output = np.broadcast_arrays(*output, subok=True) 

5261 

5262 if copy: 

5263 output = tuple(x.copy() for x in output) 

5264 

5265 return output 

5266 

5267 

5268def _delete_dispatcher(arr, obj, axis=None): 

5269 return (arr, obj) 

5270 

5271 

5272@array_function_dispatch(_delete_dispatcher) 

5273def delete(arr, obj, axis=None): 

5274 """ 

5275 Return a new array with sub-arrays along an axis deleted. For a one 

5276 dimensional array, this returns those entries not returned by 

5277 `arr[obj]`. 

5278 

5279 Parameters 

5280 ---------- 

5281 arr : array_like 

5282 Input array. 

5283 obj : slice, int, array-like of ints or bools 

5284 Indicate indices of sub-arrays to remove along the specified axis. 

5285 

5286 .. versionchanged:: 1.19.0 

5287 Boolean indices are now treated as a mask of elements to remove, 

5288 rather than being cast to the integers 0 and 1. 

5289 

5290 axis : int, optional 

5291 The axis along which to delete the subarray defined by `obj`. 

5292 If `axis` is None, `obj` is applied to the flattened array. 

5293 

5294 Returns 

5295 ------- 

5296 out : ndarray 

5297 A copy of `arr` with the elements specified by `obj` removed. Note 

5298 that `delete` does not occur in-place. If `axis` is None, `out` is 

5299 a flattened array. 

5300 

5301 See Also 

5302 -------- 

5303 insert : Insert elements into an array. 

5304 append : Append elements at the end of an array. 

5305 

5306 Notes 

5307 ----- 

5308 Often it is preferable to use a boolean mask. For example: 

5309 

5310 >>> arr = np.arange(12) + 1 

5311 >>> mask = np.ones(len(arr), dtype=bool) 

5312 >>> mask[[0,2,4]] = False 

5313 >>> result = arr[mask,...] 

5314 

5315 Is equivalent to ``np.delete(arr, [0,2,4], axis=0)``, but allows further 

5316 use of `mask`. 

5317 

5318 Examples 

5319 -------- 

5320 >>> import numpy as np 

5321 >>> arr = np.array([[1,2,3,4], [5,6,7,8], [9,10,11,12]]) 

5322 >>> arr 

5323 array([[ 1, 2, 3, 4], 

5324 [ 5, 6, 7, 8], 

5325 [ 9, 10, 11, 12]]) 

5326 >>> np.delete(arr, 1, 0) 

5327 array([[ 1, 2, 3, 4], 

5328 [ 9, 10, 11, 12]]) 

5329 

5330 >>> np.delete(arr, np.s_[::2], 1) 

5331 array([[ 2, 4], 

5332 [ 6, 8], 

5333 [10, 12]]) 

5334 >>> np.delete(arr, [1,3,5], None) 

5335 array([ 1, 3, 5, 7, 8, 9, 10, 11, 12]) 

5336 

5337 """ 

5338 conv = _array_converter(arr) 

5339 arr, = conv.as_arrays(subok=False) 

5340 

5341 ndim = arr.ndim 

5342 arrorder = 'F' if arr.flags.fnc else 'C' 

5343 if axis is None: 

5344 if ndim != 1: 

5345 arr = arr.ravel() 

5346 # needed for np.matrix, which is still not 1d after being ravelled 

5347 ndim = arr.ndim 

5348 axis = ndim - 1 

5349 else: 

5350 axis = normalize_axis_index(axis, ndim) 

5351 

5352 slobj = [slice(None)]*ndim 

5353 N = arr.shape[axis] 

5354 newshape = list(arr.shape) 

5355 

5356 if isinstance(obj, slice): 

5357 start, stop, step = obj.indices(N) 

5358 xr = range(start, stop, step) 

5359 numtodel = len(xr) 

5360 

5361 if numtodel <= 0: 

5362 return conv.wrap(arr.copy(order=arrorder), to_scalar=False) 

5363 

5364 # Invert if step is negative: 

5365 if step < 0: 

5366 step = -step 

5367 start = xr[-1] 

5368 stop = xr[0] + 1 

5369 

5370 newshape[axis] -= numtodel 

5371 new = empty(newshape, arr.dtype, arrorder) 

5372 # copy initial chunk 

5373 if start == 0: 

5374 pass 

5375 else: 

5376 slobj[axis] = slice(None, start) 

5377 new[tuple(slobj)] = arr[tuple(slobj)] 

5378 # copy end chunk 

5379 if stop == N: 

5380 pass 

5381 else: 

5382 slobj[axis] = slice(stop-numtodel, None) 

5383 slobj2 = [slice(None)]*ndim 

5384 slobj2[axis] = slice(stop, None) 

5385 new[tuple(slobj)] = arr[tuple(slobj2)] 

5386 # copy middle pieces 

5387 if step == 1: 

5388 pass 

5389 else: # use array indexing. 

5390 keep = ones(stop-start, dtype=bool) 

5391 keep[:stop-start:step] = False 

5392 slobj[axis] = slice(start, stop-numtodel) 

5393 slobj2 = [slice(None)]*ndim 

5394 slobj2[axis] = slice(start, stop) 

5395 arr = arr[tuple(slobj2)] 

5396 slobj2[axis] = keep 

5397 new[tuple(slobj)] = arr[tuple(slobj2)] 

5398 

5399 return conv.wrap(new, to_scalar=False) 

5400 

5401 if isinstance(obj, (int, integer)) and not isinstance(obj, bool): 

5402 single_value = True 

5403 else: 

5404 single_value = False 

5405 _obj = obj 

5406 obj = np.asarray(obj) 

5407 # `size == 0` to allow empty lists similar to indexing, but (as there) 

5408 # is really too generic: 

5409 if obj.size == 0 and not isinstance(_obj, np.ndarray): 

5410 obj = obj.astype(intp) 

5411 elif obj.size == 1 and obj.dtype.kind in "ui": 

5412 # For a size 1 integer array we can use the single-value path 

5413 # (most dtypes, except boolean, should just fail later). 

5414 obj = obj.item() 

5415 single_value = True 

5416 

5417 if single_value: 

5418 # optimization for a single value 

5419 if (obj < -N or obj >= N): 

5420 raise IndexError( 

5421 "index %i is out of bounds for axis %i with " 

5422 "size %i" % (obj, axis, N)) 

5423 if (obj < 0): 

5424 obj += N 

5425 newshape[axis] -= 1 

5426 new = empty(newshape, arr.dtype, arrorder) 

5427 slobj[axis] = slice(None, obj) 

5428 new[tuple(slobj)] = arr[tuple(slobj)] 

5429 slobj[axis] = slice(obj, None) 

5430 slobj2 = [slice(None)]*ndim 

5431 slobj2[axis] = slice(obj+1, None) 

5432 new[tuple(slobj)] = arr[tuple(slobj2)] 

5433 else: 

5434 if obj.dtype == bool: 

5435 if obj.shape != (N,): 

5436 raise ValueError('boolean array argument obj to delete ' 

5437 'must be one dimensional and match the axis ' 

5438 'length of {}'.format(N)) 

5439 

5440 # optimization, the other branch is slower 

5441 keep = ~obj 

5442 else: 

5443 keep = ones(N, dtype=bool) 

5444 keep[obj,] = False 

5445 

5446 slobj[axis] = keep 

5447 new = arr[tuple(slobj)] 

5448 

5449 return conv.wrap(new, to_scalar=False) 

5450 

5451 

5452def _insert_dispatcher(arr, obj, values, axis=None): 

5453 return (arr, obj, values) 

5454 

5455 

5456@array_function_dispatch(_insert_dispatcher) 

5457def insert(arr, obj, values, axis=None): 

5458 """ 

5459 Insert values along the given axis before the given indices. 

5460 

5461 Parameters 

5462 ---------- 

5463 arr : array_like 

5464 Input array. 

5465 obj : slice, int, array-like of ints or bools 

5466 Object that defines the index or indices before which `values` is 

5467 inserted. 

5468 

5469 .. versionchanged:: 2.1.2 

5470 Boolean indices are now treated as a mask of elements to insert, 

5471 rather than being cast to the integers 0 and 1. 

5472 

5473 Support for multiple insertions when `obj` is a single scalar or a 

5474 sequence with one element (similar to calling insert multiple 

5475 times). 

5476 values : array_like 

5477 Values to insert into `arr`. If the type of `values` is different 

5478 from that of `arr`, `values` is converted to the type of `arr`. 

5479 `values` should be shaped so that ``arr[...,obj,...] = values`` 

5480 is legal. 

5481 axis : int, optional 

5482 Axis along which to insert `values`. If `axis` is None then `arr` 

5483 is flattened first. 

5484 

5485 Returns 

5486 ------- 

5487 out : ndarray 

5488 A copy of `arr` with `values` inserted. Note that `insert` 

5489 does not occur in-place: a new array is returned. If 

5490 `axis` is None, `out` is a flattened array. 

5491 

5492 See Also 

5493 -------- 

5494 append : Append elements at the end of an array. 

5495 concatenate : Join a sequence of arrays along an existing axis. 

5496 delete : Delete elements from an array. 

5497 

5498 Notes 

5499 ----- 

5500 Note that for higher dimensional inserts ``obj=0`` behaves very different 

5501 from ``obj=[0]`` just like ``arr[:,0,:] = values`` is different from 

5502 ``arr[:,[0],:] = values``. This is because of the difference between basic 

5503 and advanced :ref:`indexing <basics.indexing>`. 

5504 

5505 Examples 

5506 -------- 

5507 >>> import numpy as np 

5508 >>> a = np.arange(6).reshape(3, 2) 

5509 >>> a 

5510 array([[0, 1], 

5511 [2, 3], 

5512 [4, 5]]) 

5513 >>> np.insert(a, 1, 6) 

5514 array([0, 6, 1, 2, 3, 4, 5]) 

5515 >>> np.insert(a, 1, 6, axis=1) 

5516 array([[0, 6, 1], 

5517 [2, 6, 3], 

5518 [4, 6, 5]]) 

5519 

5520 Difference between sequence and scalars, 

5521 showing how ``obj=[1]`` behaves different from ``obj=1``: 

5522 

5523 >>> np.insert(a, [1], [[7],[8],[9]], axis=1) 

5524 array([[0, 7, 1], 

5525 [2, 8, 3], 

5526 [4, 9, 5]]) 

5527 >>> np.insert(a, 1, [[7],[8],[9]], axis=1) 

5528 array([[0, 7, 8, 9, 1], 

5529 [2, 7, 8, 9, 3], 

5530 [4, 7, 8, 9, 5]]) 

5531 >>> np.array_equal(np.insert(a, 1, [7, 8, 9], axis=1), 

5532 ... np.insert(a, [1], [[7],[8],[9]], axis=1)) 

5533 True 

5534 

5535 >>> b = a.flatten() 

5536 >>> b 

5537 array([0, 1, 2, 3, 4, 5]) 

5538 >>> np.insert(b, [2, 2], [6, 7]) 

5539 array([0, 1, 6, 7, 2, 3, 4, 5]) 

5540 

5541 >>> np.insert(b, slice(2, 4), [7, 8]) 

5542 array([0, 1, 7, 2, 8, 3, 4, 5]) 

5543 

5544 >>> np.insert(b, [2, 2], [7.13, False]) # type casting 

5545 array([0, 1, 7, 0, 2, 3, 4, 5]) 

5546 

5547 >>> x = np.arange(8).reshape(2, 4) 

5548 >>> idx = (1, 3) 

5549 >>> np.insert(x, idx, 999, axis=1) 

5550 array([[ 0, 999, 1, 2, 999, 3], 

5551 [ 4, 999, 5, 6, 999, 7]]) 

5552 

5553 """ 

5554 conv = _array_converter(arr) 

5555 arr, = conv.as_arrays(subok=False) 

5556 

5557 ndim = arr.ndim 

5558 arrorder = 'F' if arr.flags.fnc else 'C' 

5559 if axis is None: 

5560 if ndim != 1: 

5561 arr = arr.ravel() 

5562 # needed for np.matrix, which is still not 1d after being ravelled 

5563 ndim = arr.ndim 

5564 axis = ndim - 1 

5565 else: 

5566 axis = normalize_axis_index(axis, ndim) 

5567 slobj = [slice(None)]*ndim 

5568 N = arr.shape[axis] 

5569 newshape = list(arr.shape) 

5570 

5571 if isinstance(obj, slice): 

5572 # turn it into a range object 

5573 indices = arange(*obj.indices(N), dtype=intp) 

5574 else: 

5575 # need to copy obj, because indices will be changed in-place 

5576 indices = np.array(obj) 

5577 if indices.dtype == bool: 

5578 if obj.ndim != 1: 

5579 raise ValueError('boolean array argument obj to insert ' 

5580 'must be one dimensional') 

5581 indices = np.flatnonzero(obj) 

5582 elif indices.ndim > 1: 

5583 raise ValueError( 

5584 "index array argument obj to insert must be one dimensional " 

5585 "or scalar") 

5586 if indices.size == 1: 

5587 index = indices.item() 

5588 if index < -N or index > N: 

5589 raise IndexError(f"index {obj} is out of bounds for axis {axis} " 

5590 f"with size {N}") 

5591 if (index < 0): 

5592 index += N 

5593 

5594 # There are some object array corner cases here, but we cannot avoid 

5595 # that: 

5596 values = array(values, copy=None, ndmin=arr.ndim, dtype=arr.dtype) 

5597 if indices.ndim == 0: 

5598 # broadcasting is very different here, since a[:,0,:] = ... behaves 

5599 # very different from a[:,[0],:] = ...! This changes values so that 

5600 # it works likes the second case. (here a[:,0:1,:]) 

5601 values = np.moveaxis(values, 0, axis) 

5602 numnew = values.shape[axis] 

5603 newshape[axis] += numnew 

5604 new = empty(newshape, arr.dtype, arrorder) 

5605 slobj[axis] = slice(None, index) 

5606 new[tuple(slobj)] = arr[tuple(slobj)] 

5607 slobj[axis] = slice(index, index+numnew) 

5608 new[tuple(slobj)] = values 

5609 slobj[axis] = slice(index+numnew, None) 

5610 slobj2 = [slice(None)] * ndim 

5611 slobj2[axis] = slice(index, None) 

5612 new[tuple(slobj)] = arr[tuple(slobj2)] 

5613 

5614 return conv.wrap(new, to_scalar=False) 

5615 

5616 elif indices.size == 0 and not isinstance(obj, np.ndarray): 

5617 # Can safely cast the empty list to intp 

5618 indices = indices.astype(intp) 

5619 

5620 indices[indices < 0] += N 

5621 

5622 numnew = len(indices) 

5623 order = indices.argsort(kind='mergesort') # stable sort 

5624 indices[order] += np.arange(numnew) 

5625 

5626 newshape[axis] += numnew 

5627 old_mask = ones(newshape[axis], dtype=bool) 

5628 old_mask[indices] = False 

5629 

5630 new = empty(newshape, arr.dtype, arrorder) 

5631 slobj2 = [slice(None)]*ndim 

5632 slobj[axis] = indices 

5633 slobj2[axis] = old_mask 

5634 new[tuple(slobj)] = values 

5635 new[tuple(slobj2)] = arr 

5636 

5637 return conv.wrap(new, to_scalar=False) 

5638 

5639 

5640def _append_dispatcher(arr, values, axis=None): 

5641 return (arr, values) 

5642 

5643 

5644@array_function_dispatch(_append_dispatcher) 

5645def append(arr, values, axis=None): 

5646 """ 

5647 Append values to the end of an array. 

5648 

5649 Parameters 

5650 ---------- 

5651 arr : array_like 

5652 Values are appended to a copy of this array. 

5653 values : array_like 

5654 These values are appended to a copy of `arr`. It must be of the 

5655 correct shape (the same shape as `arr`, excluding `axis`). If 

5656 `axis` is not specified, `values` can be any shape and will be 

5657 flattened before use. 

5658 axis : int, optional 

5659 The axis along which `values` are appended. If `axis` is not 

5660 given, both `arr` and `values` are flattened before use. 

5661 

5662 Returns 

5663 ------- 

5664 append : ndarray 

5665 A copy of `arr` with `values` appended to `axis`. Note that 

5666 `append` does not occur in-place: a new array is allocated and 

5667 filled. If `axis` is None, `out` is a flattened array. 

5668 

5669 See Also 

5670 -------- 

5671 insert : Insert elements into an array. 

5672 delete : Delete elements from an array. 

5673 

5674 Examples 

5675 -------- 

5676 >>> import numpy as np 

5677 >>> np.append([1, 2, 3], [[4, 5, 6], [7, 8, 9]]) 

5678 array([1, 2, 3, ..., 7, 8, 9]) 

5679 

5680 When `axis` is specified, `values` must have the correct shape. 

5681 

5682 >>> np.append([[1, 2, 3], [4, 5, 6]], [[7, 8, 9]], axis=0) 

5683 array([[1, 2, 3], 

5684 [4, 5, 6], 

5685 [7, 8, 9]]) 

5686 

5687 >>> np.append([[1, 2, 3], [4, 5, 6]], [7, 8, 9], axis=0) 

5688 Traceback (most recent call last): 

5689 ... 

5690 ValueError: all the input arrays must have same number of dimensions, but 

5691 the array at index 0 has 2 dimension(s) and the array at index 1 has 1 

5692 dimension(s) 

5693 

5694 >>> a = np.array([1, 2], dtype=int) 

5695 >>> c = np.append(a, []) 

5696 >>> c 

5697 array([1., 2.]) 

5698 >>> c.dtype 

5699 float64 

5700 

5701 Default dtype for empty ndarrays is `float64` thus making the output of dtype 

5702 `float64` when appended with dtype `int64` 

5703 

5704 """ 

5705 arr = asanyarray(arr) 

5706 if axis is None: 

5707 if arr.ndim != 1: 

5708 arr = arr.ravel() 

5709 values = ravel(values) 

5710 axis = arr.ndim-1 

5711 return concatenate((arr, values), axis=axis) 

5712 

5713 

5714def _digitize_dispatcher(x, bins, right=None): 

5715 return (x, bins) 

5716 

5717 

5718@array_function_dispatch(_digitize_dispatcher) 

5719def digitize(x, bins, right=False): 

5720 """ 

5721 Return the indices of the bins to which each value in input array belongs. 

5722 

5723 ========= ============= ============================ 

5724 `right` order of bins returned index `i` satisfies 

5725 ========= ============= ============================ 

5726 ``False`` increasing ``bins[i-1] <= x < bins[i]`` 

5727 ``True`` increasing ``bins[i-1] < x <= bins[i]`` 

5728 ``False`` decreasing ``bins[i-1] > x >= bins[i]`` 

5729 ``True`` decreasing ``bins[i-1] >= x > bins[i]`` 

5730 ========= ============= ============================ 

5731 

5732 If values in `x` are beyond the bounds of `bins`, 0 or ``len(bins)`` is 

5733 returned as appropriate. 

5734 

5735 Parameters 

5736 ---------- 

5737 x : array_like 

5738 Input array to be binned. Prior to NumPy 1.10.0, this array had to 

5739 be 1-dimensional, but can now have any shape. 

5740 bins : array_like 

5741 Array of bins. It has to be 1-dimensional and monotonic. 

5742 right : bool, optional 

5743 Indicating whether the intervals include the right or the left bin 

5744 edge. Default behavior is (right==False) indicating that the interval 

5745 does not include the right edge. The left bin end is open in this 

5746 case, i.e., bins[i-1] <= x < bins[i] is the default behavior for 

5747 monotonically increasing bins. 

5748 

5749 Returns 

5750 ------- 

5751 indices : ndarray of ints 

5752 Output array of indices, of same shape as `x`. 

5753 

5754 Raises 

5755 ------ 

5756 ValueError 

5757 If `bins` is not monotonic. 

5758 TypeError 

5759 If the type of the input is complex. 

5760 

5761 See Also 

5762 -------- 

5763 bincount, histogram, unique, searchsorted 

5764 

5765 Notes 

5766 ----- 

5767 If values in `x` are such that they fall outside the bin range, 

5768 attempting to index `bins` with the indices that `digitize` returns 

5769 will result in an IndexError. 

5770 

5771 .. versionadded:: 1.10.0 

5772 

5773 `numpy.digitize` is implemented in terms of `numpy.searchsorted`. 

5774 This means that a binary search is used to bin the values, which scales 

5775 much better for larger number of bins than the previous linear search. 

5776 It also removes the requirement for the input array to be 1-dimensional. 

5777 

5778 For monotonically *increasing* `bins`, the following are equivalent:: 

5779 

5780 np.digitize(x, bins, right=True) 

5781 np.searchsorted(bins, x, side='left') 

5782 

5783 Note that as the order of the arguments are reversed, the side must be too. 

5784 The `searchsorted` call is marginally faster, as it does not do any 

5785 monotonicity checks. Perhaps more importantly, it supports all dtypes. 

5786 

5787 Examples 

5788 -------- 

5789 >>> import numpy as np 

5790 >>> x = np.array([0.2, 6.4, 3.0, 1.6]) 

5791 >>> bins = np.array([0.0, 1.0, 2.5, 4.0, 10.0]) 

5792 >>> inds = np.digitize(x, bins) 

5793 >>> inds 

5794 array([1, 4, 3, 2]) 

5795 >>> for n in range(x.size): 

5796 ... print(bins[inds[n]-1], "<=", x[n], "<", bins[inds[n]]) 

5797 ... 

5798 0.0 <= 0.2 < 1.0 

5799 4.0 <= 6.4 < 10.0 

5800 2.5 <= 3.0 < 4.0 

5801 1.0 <= 1.6 < 2.5 

5802 

5803 >>> x = np.array([1.2, 10.0, 12.4, 15.5, 20.]) 

5804 >>> bins = np.array([0, 5, 10, 15, 20]) 

5805 >>> np.digitize(x,bins,right=True) 

5806 array([1, 2, 3, 4, 4]) 

5807 >>> np.digitize(x,bins,right=False) 

5808 array([1, 3, 3, 4, 5]) 

5809 """ 

5810 x = _nx.asarray(x) 

5811 bins = _nx.asarray(bins) 

5812 

5813 # here for compatibility, searchsorted below is happy to take this 

5814 if np.issubdtype(x.dtype, _nx.complexfloating): 

5815 raise TypeError("x may not be complex") 

5816 

5817 mono = _monotonicity(bins) 

5818 if mono == 0: 

5819 raise ValueError("bins must be monotonically increasing or decreasing") 

5820 

5821 # this is backwards because the arguments below are swapped 

5822 side = 'left' if right else 'right' 

5823 if mono == -1: 

5824 # reverse the bins, and invert the results 

5825 return len(bins) - _nx.searchsorted(bins[::-1], x, side=side) 

5826 else: 

5827 return _nx.searchsorted(bins, x, side=side)