Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/numpy/_core/shape_base.py: 24%

185 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-04-09 06:12 +0000

1__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack', 

2 'stack', 'vstack'] 

3 

4import functools 

5import itertools 

6import operator 

7import warnings 

8 

9from . import numeric as _nx 

10from . import overrides 

11from .multiarray import array, asanyarray, normalize_axis_index 

12from . import fromnumeric as _from_nx 

13 

14 

15array_function_dispatch = functools.partial( 

16 overrides.array_function_dispatch, module='numpy') 

17 

18 

19def _atleast_1d_dispatcher(*arys): 

20 return arys 

21 

22 

23@array_function_dispatch(_atleast_1d_dispatcher) 

24def atleast_1d(*arys): 

25 """ 

26 Convert inputs to arrays with at least one dimension. 

27 

28 Scalar inputs are converted to 1-dimensional arrays, whilst 

29 higher-dimensional inputs are preserved. 

30 

31 Parameters 

32 ---------- 

33 arys1, arys2, ... : array_like 

34 One or more input arrays. 

35 

36 Returns 

37 ------- 

38 ret : ndarray 

39 An array, or tuple of arrays, each with ``a.ndim >= 1``. 

40 Copies are made only if necessary. 

41 

42 See Also 

43 -------- 

44 atleast_2d, atleast_3d 

45 

46 Examples 

47 -------- 

48 >>> np.atleast_1d(1.0) 

49 array([1.]) 

50 

51 >>> x = np.arange(9.0).reshape(3,3) 

52 >>> np.atleast_1d(x) 

53 array([[0., 1., 2.], 

54 [3., 4., 5.], 

55 [6., 7., 8.]]) 

56 >>> np.atleast_1d(x) is x 

57 True 

58 

59 >>> np.atleast_1d(1, [3, 4]) 

60 (array([1]), array([3, 4])) 

61 

62 """ 

63 res = [] 

64 for ary in arys: 

65 ary = asanyarray(ary) 

66 if ary.ndim == 0: 

67 result = ary.reshape(1) 

68 else: 

69 result = ary 

70 res.append(result) 

71 if len(res) == 1: 

72 return res[0] 

73 else: 

74 return tuple(res) 

75 

76 

77def _atleast_2d_dispatcher(*arys): 

78 return arys 

79 

80 

81@array_function_dispatch(_atleast_2d_dispatcher) 

82def atleast_2d(*arys): 

83 """ 

84 View inputs as arrays with at least two dimensions. 

85 

86 Parameters 

87 ---------- 

88 arys1, arys2, ... : array_like 

89 One or more array-like sequences. Non-array inputs are converted 

90 to arrays. Arrays that already have two or more dimensions are 

91 preserved. 

92 

93 Returns 

94 ------- 

95 res, res2, ... : ndarray 

96 An array, or tuple of arrays, each with ``a.ndim >= 2``. 

97 Copies are avoided where possible, and views with two or more 

98 dimensions are returned. 

99 

100 See Also 

101 -------- 

102 atleast_1d, atleast_3d 

103 

104 Examples 

105 -------- 

106 >>> np.atleast_2d(3.0) 

107 array([[3.]]) 

108 

109 >>> x = np.arange(3.0) 

110 >>> np.atleast_2d(x) 

111 array([[0., 1., 2.]]) 

112 >>> np.atleast_2d(x).base is x 

113 True 

114 

115 >>> np.atleast_2d(1, [1, 2], [[1, 2]]) 

116 (array([[1]]), array([[1, 2]]), array([[1, 2]])) 

117 

118 """ 

119 res = [] 

120 for ary in arys: 

121 ary = asanyarray(ary) 

122 if ary.ndim == 0: 

123 result = ary.reshape(1, 1) 

124 elif ary.ndim == 1: 

125 result = ary[_nx.newaxis, :] 

126 else: 

127 result = ary 

128 res.append(result) 

129 if len(res) == 1: 

130 return res[0] 

131 else: 

132 return tuple(res) 

133 

134 

135def _atleast_3d_dispatcher(*arys): 

136 return arys 

137 

138 

139@array_function_dispatch(_atleast_3d_dispatcher) 

140def atleast_3d(*arys): 

141 """ 

142 View inputs as arrays with at least three dimensions. 

143 

144 Parameters 

145 ---------- 

146 arys1, arys2, ... : array_like 

147 One or more array-like sequences. Non-array inputs are converted to 

148 arrays. Arrays that already have three or more dimensions are 

149 preserved. 

150 

151 Returns 

152 ------- 

153 res1, res2, ... : ndarray 

154 An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are 

155 avoided where possible, and views with three or more dimensions are 

156 returned. For example, a 1-D array of shape ``(N,)`` becomes a view 

157 of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a 

158 view of shape ``(M, N, 1)``. 

159 

160 See Also 

161 -------- 

162 atleast_1d, atleast_2d 

163 

164 Examples 

165 -------- 

166 >>> np.atleast_3d(3.0) 

167 array([[[3.]]]) 

168 

169 >>> x = np.arange(3.0) 

170 >>> np.atleast_3d(x).shape 

171 (1, 3, 1) 

172 

173 >>> x = np.arange(12.0).reshape(4,3) 

174 >>> np.atleast_3d(x).shape 

175 (4, 3, 1) 

176 >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself 

177 True 

178 

179 >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]): 

180 ... print(arr, arr.shape) # doctest: +SKIP 

181 ... 

182 [[[1] 

183 [2]]] (1, 2, 1) 

184 [[[1] 

185 [2]]] (1, 2, 1) 

186 [[[1 2]]] (1, 1, 2) 

187 

188 """ 

189 res = [] 

190 for ary in arys: 

191 ary = asanyarray(ary) 

192 if ary.ndim == 0: 

193 result = ary.reshape(1, 1, 1) 

194 elif ary.ndim == 1: 

195 result = ary[_nx.newaxis, :, _nx.newaxis] 

196 elif ary.ndim == 2: 

197 result = ary[:, :, _nx.newaxis] 

198 else: 

199 result = ary 

200 res.append(result) 

201 if len(res) == 1: 

202 return res[0] 

203 else: 

204 return tuple(res) 

205 

206 

207def _arrays_for_stack_dispatcher(arrays): 

208 if not hasattr(arrays, "__getitem__"): 

209 raise TypeError('arrays to stack must be passed as a "sequence" type ' 

210 'such as list or tuple.') 

211 

212 return tuple(arrays) 

213 

214 

215def _vhstack_dispatcher(tup, *, dtype=None, casting=None): 

216 return _arrays_for_stack_dispatcher(tup) 

217 

218 

219@array_function_dispatch(_vhstack_dispatcher) 

220def vstack(tup, *, dtype=None, casting="same_kind"): 

221 """ 

222 Stack arrays in sequence vertically (row wise). 

223 

224 This is equivalent to concatenation along the first axis after 1-D arrays 

225 of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by 

226 `vsplit`. 

227 

228 This function makes most sense for arrays with up to 3 dimensions. For 

229 instance, for pixel-data with a height (first axis), width (second axis), 

230 and r/g/b channels (third axis). The functions `concatenate`, `stack` and 

231 `block` provide more general stacking and concatenation operations. 

232 

233 Parameters 

234 ---------- 

235 tup : sequence of ndarrays 

236 The arrays must have the same shape along all but the first axis. 

237 1-D arrays must have the same length. 

238 

239 dtype : str or dtype 

240 If provided, the destination array will have this dtype. Cannot be 

241 provided together with `out`. 

242 

243 .. versionadded:: 1.24 

244 

245 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional 

246 Controls what kind of data casting may occur. Defaults to 'same_kind'. 

247 

248 .. versionadded:: 1.24 

249 

250 Returns 

251 ------- 

252 stacked : ndarray 

253 The array formed by stacking the given arrays, will be at least 2-D. 

254 

255 See Also 

256 -------- 

257 concatenate : Join a sequence of arrays along an existing axis. 

258 stack : Join a sequence of arrays along a new axis. 

259 block : Assemble an nd-array from nested lists of blocks. 

260 hstack : Stack arrays in sequence horizontally (column wise). 

261 dstack : Stack arrays in sequence depth wise (along third axis). 

262 column_stack : Stack 1-D arrays as columns into a 2-D array. 

263 vsplit : Split an array into multiple sub-arrays vertically (row-wise). 

264 

265 Examples 

266 -------- 

267 >>> a = np.array([1, 2, 3]) 

268 >>> b = np.array([4, 5, 6]) 

269 >>> np.vstack((a,b)) 

270 array([[1, 2, 3], 

271 [4, 5, 6]]) 

272 

273 >>> a = np.array([[1], [2], [3]]) 

274 >>> b = np.array([[4], [5], [6]]) 

275 >>> np.vstack((a,b)) 

276 array([[1], 

277 [2], 

278 [3], 

279 [4], 

280 [5], 

281 [6]]) 

282 

283 """ 

284 arrs = atleast_2d(*tup) 

285 if not isinstance(arrs, tuple): 

286 arrs = (arrs,) 

287 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) 

288 

289 

290@array_function_dispatch(_vhstack_dispatcher) 

291def hstack(tup, *, dtype=None, casting="same_kind"): 

292 """ 

293 Stack arrays in sequence horizontally (column wise). 

294 

295 This is equivalent to concatenation along the second axis, except for 1-D 

296 arrays where it concatenates along the first axis. Rebuilds arrays divided 

297 by `hsplit`. 

298 

299 This function makes most sense for arrays with up to 3 dimensions. For 

300 instance, for pixel-data with a height (first axis), width (second axis), 

301 and r/g/b channels (third axis). The functions `concatenate`, `stack` and 

302 `block` provide more general stacking and concatenation operations. 

303 

304 Parameters 

305 ---------- 

306 tup : sequence of ndarrays 

307 The arrays must have the same shape along all but the second axis, 

308 except 1-D arrays which can be any length. 

309 

310 dtype : str or dtype 

311 If provided, the destination array will have this dtype. Cannot be 

312 provided together with `out`. 

313 

314 .. versionadded:: 1.24 

315 

316 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional 

317 Controls what kind of data casting may occur. Defaults to 'same_kind'. 

318 

319 .. versionadded:: 1.24 

320 

321 Returns 

322 ------- 

323 stacked : ndarray 

324 The array formed by stacking the given arrays. 

325 

326 See Also 

327 -------- 

328 concatenate : Join a sequence of arrays along an existing axis. 

329 stack : Join a sequence of arrays along a new axis. 

330 block : Assemble an nd-array from nested lists of blocks. 

331 vstack : Stack arrays in sequence vertically (row wise). 

332 dstack : Stack arrays in sequence depth wise (along third axis). 

333 column_stack : Stack 1-D arrays as columns into a 2-D array. 

334 hsplit : Split an array into multiple sub-arrays  

335 horizontally (column-wise). 

336 

337 Examples 

338 -------- 

339 >>> a = np.array((1,2,3)) 

340 >>> b = np.array((4,5,6)) 

341 >>> np.hstack((a,b)) 

342 array([1, 2, 3, 4, 5, 6]) 

343 >>> a = np.array([[1],[2],[3]]) 

344 >>> b = np.array([[4],[5],[6]]) 

345 >>> np.hstack((a,b)) 

346 array([[1, 4], 

347 [2, 5], 

348 [3, 6]]) 

349 

350 """ 

351 arrs = atleast_1d(*tup) 

352 if not isinstance(arrs, tuple): 

353 arrs = (arrs,) 

354 # As a special case, dimension 0 of 1-dimensional arrays is "horizontal" 

355 if arrs and arrs[0].ndim == 1: 

356 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting) 

357 else: 

358 return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting) 

359 

360 

361def _stack_dispatcher(arrays, axis=None, out=None, *, 

362 dtype=None, casting=None): 

363 arrays = _arrays_for_stack_dispatcher(arrays) 

364 if out is not None: 

365 # optimize for the typical case where only arrays is provided 

366 arrays = list(arrays) 

367 arrays.append(out) 

368 return arrays 

369 

370 

371@array_function_dispatch(_stack_dispatcher) 

372def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"): 

373 """ 

374 Join a sequence of arrays along a new axis. 

375 

376 The ``axis`` parameter specifies the index of the new axis in the 

377 dimensions of the result. For example, if ``axis=0`` it will be the first 

378 dimension and if ``axis=-1`` it will be the last dimension. 

379 

380 .. versionadded:: 1.10.0 

381 

382 Parameters 

383 ---------- 

384 arrays : sequence of array_like 

385 Each array must have the same shape. 

386 

387 axis : int, optional 

388 The axis in the result array along which the input arrays are stacked. 

389 

390 out : ndarray, optional 

391 If provided, the destination to place the result. The shape must be 

392 correct, matching that of what stack would have returned if no 

393 out argument were specified. 

394 

395 dtype : str or dtype 

396 If provided, the destination array will have this dtype. Cannot be 

397 provided together with `out`. 

398 

399 .. versionadded:: 1.24 

400 

401 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional 

402 Controls what kind of data casting may occur. Defaults to 'same_kind'. 

403 

404 .. versionadded:: 1.24 

405 

406 

407 Returns 

408 ------- 

409 stacked : ndarray 

410 The stacked array has one more dimension than the input arrays. 

411 

412 See Also 

413 -------- 

414 concatenate : Join a sequence of arrays along an existing axis. 

415 block : Assemble an nd-array from nested lists of blocks. 

416 split : Split array into a list of multiple sub-arrays of equal size. 

417 

418 Examples 

419 -------- 

420 >>> arrays = [np.random.randn(3, 4) for _ in range(10)] 

421 >>> np.stack(arrays, axis=0).shape 

422 (10, 3, 4) 

423 

424 >>> np.stack(arrays, axis=1).shape 

425 (3, 10, 4) 

426 

427 >>> np.stack(arrays, axis=2).shape 

428 (3, 4, 10) 

429 

430 >>> a = np.array([1, 2, 3]) 

431 >>> b = np.array([4, 5, 6]) 

432 >>> np.stack((a, b)) 

433 array([[1, 2, 3], 

434 [4, 5, 6]]) 

435 

436 >>> np.stack((a, b), axis=-1) 

437 array([[1, 4], 

438 [2, 5], 

439 [3, 6]]) 

440 

441 """ 

442 arrays = [asanyarray(arr) for arr in arrays] 

443 if not arrays: 

444 raise ValueError('need at least one array to stack') 

445 

446 shapes = {arr.shape for arr in arrays} 

447 if len(shapes) != 1: 

448 raise ValueError('all input arrays must have the same shape') 

449 

450 result_ndim = arrays[0].ndim + 1 

451 axis = normalize_axis_index(axis, result_ndim) 

452 

453 sl = (slice(None),) * axis + (_nx.newaxis,) 

454 expanded_arrays = [arr[sl] for arr in arrays] 

455 return _nx.concatenate(expanded_arrays, axis=axis, out=out, 

456 dtype=dtype, casting=casting) 

457 

458 

459# Internal functions to eliminate the overhead of repeated dispatch in one of 

460# the two possible paths inside np.block. 

461# Use getattr to protect against __array_function__ being disabled. 

462_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size) 

463_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim) 

464_concatenate = getattr(_from_nx.concatenate, 

465 '__wrapped__', _from_nx.concatenate) 

466 

467 

468def _block_format_index(index): 

469 """ 

470 Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``. 

471 """ 

472 idx_str = ''.join('[{}]'.format(i) for i in index if i is not None) 

473 return 'arrays' + idx_str 

474 

475 

476def _block_check_depths_match(arrays, parent_index=[]): 

477 """ 

478 Recursive function checking that the depths of nested lists in `arrays` 

479 all match. Mismatch raises a ValueError as described in the block 

480 docstring below. 

481 

482 The entire index (rather than just the depth) needs to be calculated 

483 for each innermost list, in case an error needs to be raised, so that 

484 the index of the offending list can be printed as part of the error. 

485 

486 Parameters 

487 ---------- 

488 arrays : nested list of arrays 

489 The arrays to check 

490 parent_index : list of int 

491 The full index of `arrays` within the nested lists passed to 

492 `_block_check_depths_match` at the top of the recursion. 

493 

494 Returns 

495 ------- 

496 first_index : list of int 

497 The full index of an element from the bottom of the nesting in 

498 `arrays`. If any element at the bottom is an empty list, this will 

499 refer to it, and the last index along the empty axis will be None. 

500 max_arr_ndim : int 

501 The maximum of the ndims of the arrays nested in `arrays`. 

502 final_size: int 

503 The number of elements in the final array. This is used the motivate 

504 the choice of algorithm used using benchmarking wisdom. 

505 

506 """ 

507 if type(arrays) is tuple: 

508 # not strictly necessary, but saves us from: 

509 # - more than one way to do things - no point treating tuples like 

510 # lists 

511 # - horribly confusing behaviour that results when tuples are 

512 # treated like ndarray 

513 raise TypeError( 

514 '{} is a tuple. ' 

515 'Only lists can be used to arrange blocks, and np.block does ' 

516 'not allow implicit conversion from tuple to ndarray.'.format( 

517 _block_format_index(parent_index) 

518 ) 

519 ) 

520 elif type(arrays) is list and len(arrays) > 0: 

521 idxs_ndims = (_block_check_depths_match(arr, parent_index + [i]) 

522 for i, arr in enumerate(arrays)) 

523 

524 first_index, max_arr_ndim, final_size = next(idxs_ndims) 

525 for index, ndim, size in idxs_ndims: 

526 final_size += size 

527 if ndim > max_arr_ndim: 

528 max_arr_ndim = ndim 

529 if len(index) != len(first_index): 

530 raise ValueError( 

531 "List depths are mismatched. First element was at depth " 

532 "{}, but there is an element at depth {} ({})".format( 

533 len(first_index), 

534 len(index), 

535 _block_format_index(index) 

536 ) 

537 ) 

538 # propagate our flag that indicates an empty list at the bottom 

539 if index[-1] is None: 

540 first_index = index 

541 

542 return first_index, max_arr_ndim, final_size 

543 elif type(arrays) is list and len(arrays) == 0: 

544 # We've 'bottomed out' on an empty list 

545 return parent_index + [None], 0, 0 

546 else: 

547 # We've 'bottomed out' - arrays is either a scalar or an array 

548 size = _size(arrays) 

549 return parent_index, _ndim(arrays), size 

550 

551 

552def _atleast_nd(a, ndim): 

553 # Ensures `a` has at least `ndim` dimensions by prepending 

554 # ones to `a.shape` as necessary 

555 return array(a, ndmin=ndim, copy=None, subok=True) 

556 

557 

558def _accumulate(values): 

559 return list(itertools.accumulate(values)) 

560 

561 

562def _concatenate_shapes(shapes, axis): 

563 """Given array shapes, return the resulting shape and slices prefixes. 

564 

565 These help in nested concatenation. 

566 

567 Returns 

568 ------- 

569 shape: tuple of int 

570 This tuple satisfies:: 

571 

572 shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis) 

573 shape == concatenate(arrs, axis).shape 

574 

575 slice_prefixes: tuple of (slice(start, end), ) 

576 For a list of arrays being concatenated, this returns the slice 

577 in the larger array at axis that needs to be sliced into. 

578 

579 For example, the following holds:: 

580 

581 ret = concatenate([a, b, c], axis) 

582 _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis) 

583 

584 ret[(slice(None),) * axis + sl_a] == a 

585 ret[(slice(None),) * axis + sl_b] == b 

586 ret[(slice(None),) * axis + sl_c] == c 

587 

588 These are called slice prefixes since they are used in the recursive 

589 blocking algorithm to compute the left-most slices during the 

590 recursion. Therefore, they must be prepended to rest of the slice 

591 that was computed deeper in the recursion. 

592 

593 These are returned as tuples to ensure that they can quickly be added 

594 to existing slice tuple without creating a new tuple every time. 

595 

596 """ 

597 # Cache a result that will be reused. 

598 shape_at_axis = [shape[axis] for shape in shapes] 

599 

600 # Take a shape, any shape 

601 first_shape = shapes[0] 

602 first_shape_pre = first_shape[:axis] 

603 first_shape_post = first_shape[axis+1:] 

604 

605 if any(shape[:axis] != first_shape_pre or 

606 shape[axis+1:] != first_shape_post for shape in shapes): 

607 raise ValueError( 

608 'Mismatched array shapes in block along axis {}.'.format(axis)) 

609 

610 shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:]) 

611 

612 offsets_at_axis = _accumulate(shape_at_axis) 

613 slice_prefixes = [(slice(start, end),) 

614 for start, end in zip([0] + offsets_at_axis, 

615 offsets_at_axis)] 

616 return shape, slice_prefixes 

617 

618 

619def _block_info_recursion(arrays, max_depth, result_ndim, depth=0): 

620 """ 

621 Returns the shape of the final array, along with a list 

622 of slices and a list of arrays that can be used for assignment inside the 

623 new array 

624 

625 Parameters 

626 ---------- 

627 arrays : nested list of arrays 

628 The arrays to check 

629 max_depth : list of int 

630 The number of nested lists 

631 result_ndim : int 

632 The number of dimensions in thefinal array. 

633 

634 Returns 

635 ------- 

636 shape : tuple of int 

637 The shape that the final array will take on. 

638 slices: list of tuple of slices 

639 The slices into the full array required for assignment. These are 

640 required to be prepended with ``(Ellipsis, )`` to obtain to correct 

641 final index. 

642 arrays: list of ndarray 

643 The data to assign to each slice of the full array 

644 

645 """ 

646 if depth < max_depth: 

647 shapes, slices, arrays = zip( 

648 *[_block_info_recursion(arr, max_depth, result_ndim, depth+1) 

649 for arr in arrays]) 

650 

651 axis = result_ndim - max_depth + depth 

652 shape, slice_prefixes = _concatenate_shapes(shapes, axis) 

653 

654 # Prepend the slice prefix and flatten the slices 

655 slices = [slice_prefix + the_slice 

656 for slice_prefix, inner_slices in zip(slice_prefixes, slices) 

657 for the_slice in inner_slices] 

658 

659 # Flatten the array list 

660 arrays = functools.reduce(operator.add, arrays) 

661 

662 return shape, slices, arrays 

663 else: 

664 # We've 'bottomed out' - arrays is either a scalar or an array 

665 # type(arrays) is not list 

666 # Return the slice and the array inside a list to be consistent with 

667 # the recursive case. 

668 arr = _atleast_nd(arrays, result_ndim) 

669 return arr.shape, [()], [arr] 

670 

671 

672def _block(arrays, max_depth, result_ndim, depth=0): 

673 """ 

674 Internal implementation of block based on repeated concatenation. 

675 `arrays` is the argument passed to 

676 block. `max_depth` is the depth of nested lists within `arrays` and 

677 `result_ndim` is the greatest of the dimensions of the arrays in 

678 `arrays` and the depth of the lists in `arrays` (see block docstring 

679 for details). 

680 """ 

681 if depth < max_depth: 

682 arrs = [_block(arr, max_depth, result_ndim, depth+1) 

683 for arr in arrays] 

684 return _concatenate(arrs, axis=-(max_depth-depth)) 

685 else: 

686 # We've 'bottomed out' - arrays is either a scalar or an array 

687 # type(arrays) is not list 

688 return _atleast_nd(arrays, result_ndim) 

689 

690 

691def _block_dispatcher(arrays): 

692 # Use type(...) is list to match the behavior of np.block(), which special 

693 # cases list specifically rather than allowing for generic iterables or 

694 # tuple. Also, we know that list.__array_function__ will never exist. 

695 if type(arrays) is list: 

696 for subarrays in arrays: 

697 yield from _block_dispatcher(subarrays) 

698 else: 

699 yield arrays 

700 

701 

702@array_function_dispatch(_block_dispatcher) 

703def block(arrays): 

704 """ 

705 Assemble an nd-array from nested lists of blocks. 

706 

707 Blocks in the innermost lists are concatenated (see `concatenate`) along 

708 the last dimension (-1), then these are concatenated along the 

709 second-last dimension (-2), and so on until the outermost list is reached. 

710 

711 Blocks can be of any dimension, but will not be broadcasted using 

712 the normal rules. Instead, leading axes of size 1 are inserted,  

713 to make ``block.ndim`` the same for all blocks. This is primarily useful 

714 for working with scalars, and means that code like ``np.block([v, 1])`` 

715 is valid, where ``v.ndim == 1``. 

716 

717 When the nested list is two levels deep, this allows block matrices to be 

718 constructed from their components. 

719 

720 .. versionadded:: 1.13.0 

721 

722 Parameters 

723 ---------- 

724 arrays : nested list of array_like or scalars (but not tuples) 

725 If passed a single ndarray or scalar (a nested list of depth 0), this 

726 is returned unmodified (and not copied). 

727 

728 Elements shapes must match along the appropriate axes (without 

729 broadcasting), but leading 1s will be prepended to the shape as 

730 necessary to make the dimensions match. 

731 

732 Returns 

733 ------- 

734 block_array : ndarray 

735 The array assembled from the given blocks. 

736 

737 The dimensionality of the output is equal to the greatest of: 

738 

739 * the dimensionality of all the inputs 

740 * the depth to which the input list is nested 

741 

742 Raises 

743 ------ 

744 ValueError 

745 * If list depths are mismatched - for instance, ``[[a, b], c]`` is 

746 illegal, and should be spelt ``[[a, b], [c]]`` 

747 * If lists are empty - for instance, ``[[a, b], []]`` 

748 

749 See Also 

750 -------- 

751 concatenate : Join a sequence of arrays along an existing axis. 

752 stack : Join a sequence of arrays along a new axis. 

753 vstack : Stack arrays in sequence vertically (row wise). 

754 hstack : Stack arrays in sequence horizontally (column wise). 

755 dstack : Stack arrays in sequence depth wise (along third axis). 

756 column_stack : Stack 1-D arrays as columns into a 2-D array. 

757 vsplit : Split an array into multiple sub-arrays vertically (row-wise). 

758 

759 Notes 

760 ----- 

761 

762 When called with only scalars, ``np.block`` is equivalent to an ndarray 

763 call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to 

764 ``np.array([[1, 2], [3, 4]])``. 

765 

766 This function does not enforce that the blocks lie on a fixed grid. 

767 ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form:: 

768 

769 AAAbb 

770 AAAbb 

771 cccDD 

772 

773 But is also allowed to produce, for some ``a, b, c, d``:: 

774 

775 AAAbb 

776 AAAbb 

777 cDDDD 

778 

779 Since concatenation happens along the last axis first, `block` is *not* 

780 capable of producing the following directly:: 

781 

782 AAAbb 

783 cccbb 

784 cccDD 

785 

786 Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is 

787 equivalent to ``np.block([[A, B, ...], [p, q, ...]])``. 

788 

789 Examples 

790 -------- 

791 The most common use of this function is to build a block matrix 

792 

793 >>> A = np.eye(2) * 2 

794 >>> B = np.eye(3) * 3 

795 >>> np.block([ 

796 ... [A, np.zeros((2, 3))], 

797 ... [np.ones((3, 2)), B ] 

798 ... ]) 

799 array([[2., 0., 0., 0., 0.], 

800 [0., 2., 0., 0., 0.], 

801 [1., 1., 3., 0., 0.], 

802 [1., 1., 0., 3., 0.], 

803 [1., 1., 0., 0., 3.]]) 

804 

805 With a list of depth 1, `block` can be used as `hstack` 

806 

807 >>> np.block([1, 2, 3]) # hstack([1, 2, 3]) 

808 array([1, 2, 3]) 

809 

810 >>> a = np.array([1, 2, 3]) 

811 >>> b = np.array([4, 5, 6]) 

812 >>> np.block([a, b, 10]) # hstack([a, b, 10]) 

813 array([ 1, 2, 3, 4, 5, 6, 10]) 

814 

815 >>> A = np.ones((2, 2), int) 

816 >>> B = 2 * A 

817 >>> np.block([A, B]) # hstack([A, B]) 

818 array([[1, 1, 2, 2], 

819 [1, 1, 2, 2]]) 

820 

821 With a list of depth 2, `block` can be used in place of `vstack`: 

822 

823 >>> a = np.array([1, 2, 3]) 

824 >>> b = np.array([4, 5, 6]) 

825 >>> np.block([[a], [b]]) # vstack([a, b]) 

826 array([[1, 2, 3], 

827 [4, 5, 6]]) 

828 

829 >>> A = np.ones((2, 2), int) 

830 >>> B = 2 * A 

831 >>> np.block([[A], [B]]) # vstack([A, B]) 

832 array([[1, 1], 

833 [1, 1], 

834 [2, 2], 

835 [2, 2]]) 

836 

837 It can also be used in places of `atleast_1d` and `atleast_2d` 

838 

839 >>> a = np.array(0) 

840 >>> b = np.array([1]) 

841 >>> np.block([a]) # atleast_1d(a) 

842 array([0]) 

843 >>> np.block([b]) # atleast_1d(b) 

844 array([1]) 

845 

846 >>> np.block([[a]]) # atleast_2d(a) 

847 array([[0]]) 

848 >>> np.block([[b]]) # atleast_2d(b) 

849 array([[1]]) 

850 

851 

852 """ 

853 arrays, list_ndim, result_ndim, final_size = _block_setup(arrays) 

854 

855 # It was found through benchmarking that making an array of final size 

856 # around 256x256 was faster by straight concatenation on a 

857 # i7-7700HQ processor and dual channel ram 2400MHz. 

858 # It didn't seem to matter heavily on the dtype used. 

859 # 

860 # A 2D array using repeated concatenation requires 2 copies of the array. 

861 # 

862 # The fastest algorithm will depend on the ratio of CPU power to memory 

863 # speed. 

864 # One can monitor the results of the benchmark 

865 # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d 

866 # to tune this parameter until a C version of the `_block_info_recursion` 

867 # algorithm is implemented which would likely be faster than the python 

868 # version. 

869 if list_ndim * final_size > (2 * 512 * 512): 

870 return _block_slicing(arrays, list_ndim, result_ndim) 

871 else: 

872 return _block_concatenate(arrays, list_ndim, result_ndim) 

873 

874 

875# These helper functions are mostly used for testing. 

876# They allow us to write tests that directly call `_block_slicing` 

877# or `_block_concatenate` without blocking large arrays to force the wisdom 

878# to trigger the desired path. 

879def _block_setup(arrays): 

880 """ 

881 Returns 

882 (`arrays`, list_ndim, result_ndim, final_size) 

883 """ 

884 bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays) 

885 list_ndim = len(bottom_index) 

886 if bottom_index and bottom_index[-1] is None: 

887 raise ValueError( 

888 'List at {} cannot be empty'.format( 

889 _block_format_index(bottom_index) 

890 ) 

891 ) 

892 result_ndim = max(arr_ndim, list_ndim) 

893 return arrays, list_ndim, result_ndim, final_size 

894 

895 

896def _block_slicing(arrays, list_ndim, result_ndim): 

897 shape, slices, arrays = _block_info_recursion( 

898 arrays, list_ndim, result_ndim) 

899 dtype = _nx.result_type(*[arr.dtype for arr in arrays]) 

900 

901 # Test preferring F only in the case that all input arrays are F 

902 F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays) 

903 C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays) 

904 order = 'F' if F_order and not C_order else 'C' 

905 result = _nx.empty(shape=shape, dtype=dtype, order=order) 

906 # Note: In a c implementation, the function 

907 # PyArray_CreateMultiSortedStridePerm could be used for more advanced 

908 # guessing of the desired order. 

909 

910 for the_slice, arr in zip(slices, arrays): 

911 result[(Ellipsis,) + the_slice] = arr 

912 return result 

913 

914 

915def _block_concatenate(arrays, list_ndim, result_ndim): 

916 result = _block(arrays, list_ndim, result_ndim) 

917 if list_ndim == 0: 

918 # Catch an edge case where _block returns a view because 

919 # `arrays` is a single numpy array and not a list of numpy arrays. 

920 # This might copy scalars or lists twice, but this isn't a likely 

921 # usecase for those interested in performance 

922 result = result.copy() 

923 return result