1__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
2 'stack', 'unstack', 'vstack']
3
4import functools
5import itertools
6import operator
7
8from . import numeric as _nx
9from . import overrides
10from .multiarray import array, asanyarray, normalize_axis_index
11from . import fromnumeric as _from_nx
12
13array_function_dispatch = functools.partial(
14 overrides.array_function_dispatch, module='numpy')
15
16
17def _atleast_1d_dispatcher(*arys):
18 return arys
19
20
21@array_function_dispatch(_atleast_1d_dispatcher)
22def atleast_1d(*arys):
23 """
24 Convert inputs to arrays with at least one dimension.
25
26 Scalar inputs are converted to 1-dimensional arrays, whilst
27 higher-dimensional inputs are preserved.
28
29 Parameters
30 ----------
31 arys1, arys2, ... : array_like
32 One or more input arrays.
33
34 Returns
35 -------
36 ret : ndarray
37 An array, or tuple of arrays, each with ``a.ndim >= 1``.
38 Copies are made only if necessary.
39
40 See Also
41 --------
42 atleast_2d, atleast_3d
43
44 Examples
45 --------
46 >>> import numpy as np
47 >>> np.atleast_1d(1.0)
48 array([1.])
49
50 >>> x = np.arange(9.0).reshape(3,3)
51 >>> np.atleast_1d(x)
52 array([[0., 1., 2.],
53 [3., 4., 5.],
54 [6., 7., 8.]])
55 >>> np.atleast_1d(x) is x
56 True
57
58 >>> np.atleast_1d(1, [3, 4])
59 (array([1]), array([3, 4]))
60
61 """
62 if len(arys) == 1:
63 result = asanyarray(arys[0])
64 if result.ndim == 0:
65 result = result.reshape(1)
66 return result
67 res = []
68 for ary in arys:
69 result = asanyarray(ary)
70 if result.ndim == 0:
71 result = result.reshape(1)
72 res.append(result)
73 return tuple(res)
74
75
76def _atleast_2d_dispatcher(*arys):
77 return arys
78
79
80@array_function_dispatch(_atleast_2d_dispatcher)
81def atleast_2d(*arys):
82 """
83 View inputs as arrays with at least two dimensions.
84
85 Parameters
86 ----------
87 arys1, arys2, ... : array_like
88 One or more array-like sequences. Non-array inputs are converted
89 to arrays. Arrays that already have two or more dimensions are
90 preserved.
91
92 Returns
93 -------
94 res, res2, ... : ndarray
95 An array, or tuple of arrays, each with ``a.ndim >= 2``.
96 Copies are avoided where possible, and views with two or more
97 dimensions are returned.
98
99 See Also
100 --------
101 atleast_1d, atleast_3d
102
103 Examples
104 --------
105 >>> import numpy as np
106 >>> np.atleast_2d(3.0)
107 array([[3.]])
108
109 >>> x = np.arange(3.0)
110 >>> np.atleast_2d(x)
111 array([[0., 1., 2.]])
112 >>> np.atleast_2d(x).base is x
113 True
114
115 >>> np.atleast_2d(1, [1, 2], [[1, 2]])
116 (array([[1]]), array([[1, 2]]), array([[1, 2]]))
117
118 """
119 res = []
120 for ary in arys:
121 ary = asanyarray(ary)
122 if ary.ndim == 0:
123 result = ary.reshape(1, 1)
124 elif ary.ndim == 1:
125 result = ary[_nx.newaxis, :]
126 else:
127 result = ary
128 res.append(result)
129 if len(res) == 1:
130 return res[0]
131 else:
132 return tuple(res)
133
134
135def _atleast_3d_dispatcher(*arys):
136 return arys
137
138
139@array_function_dispatch(_atleast_3d_dispatcher)
140def atleast_3d(*arys):
141 """
142 View inputs as arrays with at least three dimensions.
143
144 Parameters
145 ----------
146 arys1, arys2, ... : array_like
147 One or more array-like sequences. Non-array inputs are converted to
148 arrays. Arrays that already have three or more dimensions are
149 preserved.
150
151 Returns
152 -------
153 res1, res2, ... : ndarray
154 An array, or tuple of arrays, each with ``a.ndim >= 3``. Copies are
155 avoided where possible, and views with three or more dimensions are
156 returned. For example, a 1-D array of shape ``(N,)`` becomes a view
157 of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
158 view of shape ``(M, N, 1)``.
159
160 See Also
161 --------
162 atleast_1d, atleast_2d
163
164 Examples
165 --------
166 >>> import numpy as np
167 >>> np.atleast_3d(3.0)
168 array([[[3.]]])
169
170 >>> x = np.arange(3.0)
171 >>> np.atleast_3d(x).shape
172 (1, 3, 1)
173
174 >>> x = np.arange(12.0).reshape(4,3)
175 >>> np.atleast_3d(x).shape
176 (4, 3, 1)
177 >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
178 True
179
180 >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
181 ... print(arr, arr.shape) # doctest: +SKIP
182 ...
183 [[[1]
184 [2]]] (1, 2, 1)
185 [[[1]
186 [2]]] (1, 2, 1)
187 [[[1 2]]] (1, 1, 2)
188
189 """
190 res = []
191 for ary in arys:
192 ary = asanyarray(ary)
193 if ary.ndim == 0:
194 result = ary.reshape(1, 1, 1)
195 elif ary.ndim == 1:
196 result = ary[_nx.newaxis, :, _nx.newaxis]
197 elif ary.ndim == 2:
198 result = ary[:, :, _nx.newaxis]
199 else:
200 result = ary
201 res.append(result)
202 if len(res) == 1:
203 return res[0]
204 else:
205 return tuple(res)
206
207
208def _arrays_for_stack_dispatcher(arrays):
209 if not hasattr(arrays, "__getitem__"):
210 raise TypeError('arrays to stack must be passed as a "sequence" type '
211 'such as list or tuple.')
212
213 return tuple(arrays)
214
215
216def _vhstack_dispatcher(tup, *, dtype=None, casting=None):
217 return _arrays_for_stack_dispatcher(tup)
218
219
220@array_function_dispatch(_vhstack_dispatcher)
221def vstack(tup, *, dtype=None, casting="same_kind"):
222 """
223 Stack arrays in sequence vertically (row wise).
224
225 This is equivalent to concatenation along the first axis after 1-D arrays
226 of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
227 `vsplit`.
228
229 This function makes most sense for arrays with up to 3 dimensions. For
230 instance, for pixel-data with a height (first axis), width (second axis),
231 and r/g/b channels (third axis). The functions `concatenate`, `stack` and
232 `block` provide more general stacking and concatenation operations.
233
234 Parameters
235 ----------
236 tup : sequence of ndarrays
237 The arrays must have the same shape along all but the first axis.
238 1-D arrays must have the same length. In the case of a single
239 array_like input, it will be treated as a sequence of arrays; i.e.,
240 each element along the zeroth axis is treated as a separate array.
241
242 dtype : str or dtype
243 If provided, the destination array will have this dtype. Cannot be
244 provided together with `out`.
245
246 .. versionadded:: 1.24
247
248 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
249 Controls what kind of data casting may occur. Defaults to 'same_kind'.
250
251 .. versionadded:: 1.24
252
253 Returns
254 -------
255 stacked : ndarray
256 The array formed by stacking the given arrays, will be at least 2-D.
257
258 See Also
259 --------
260 concatenate : Join a sequence of arrays along an existing axis.
261 stack : Join a sequence of arrays along a new axis.
262 block : Assemble an nd-array from nested lists of blocks.
263 hstack : Stack arrays in sequence horizontally (column wise).
264 dstack : Stack arrays in sequence depth wise (along third axis).
265 column_stack : Stack 1-D arrays as columns into a 2-D array.
266 vsplit : Split an array into multiple sub-arrays vertically (row-wise).
267 unstack : Split an array into a tuple of sub-arrays along an axis.
268
269 Examples
270 --------
271 >>> import numpy as np
272 >>> a = np.array([1, 2, 3])
273 >>> b = np.array([4, 5, 6])
274 >>> np.vstack((a,b))
275 array([[1, 2, 3],
276 [4, 5, 6]])
277
278 >>> a = np.array([[1], [2], [3]])
279 >>> b = np.array([[4], [5], [6]])
280 >>> np.vstack((a,b))
281 array([[1],
282 [2],
283 [3],
284 [4],
285 [5],
286 [6]])
287
288 """
289 arrs = atleast_2d(*tup)
290 if not isinstance(arrs, tuple):
291 arrs = (arrs,)
292 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
293
294
295@array_function_dispatch(_vhstack_dispatcher)
296def hstack(tup, *, dtype=None, casting="same_kind"):
297 """
298 Stack arrays in sequence horizontally (column wise).
299
300 This is equivalent to concatenation along the second axis, except for 1-D
301 arrays where it concatenates along the first axis. Rebuilds arrays divided
302 by `hsplit`.
303
304 This function makes most sense for arrays with up to 3 dimensions. For
305 instance, for pixel-data with a height (first axis), width (second axis),
306 and r/g/b channels (third axis). The functions `concatenate`, `stack` and
307 `block` provide more general stacking and concatenation operations.
308
309 Parameters
310 ----------
311 tup : sequence of ndarrays
312 The arrays must have the same shape along all but the second axis,
313 except 1-D arrays which can be any length. In the case of a single
314 array_like input, it will be treated as a sequence of arrays; i.e.,
315 each element along the zeroth axis is treated as a separate array.
316
317 dtype : str or dtype
318 If provided, the destination array will have this dtype. Cannot be
319 provided together with `out`.
320
321 .. versionadded:: 1.24
322
323 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
324 Controls what kind of data casting may occur. Defaults to 'same_kind'.
325
326 .. versionadded:: 1.24
327
328 Returns
329 -------
330 stacked : ndarray
331 The array formed by stacking the given arrays.
332
333 See Also
334 --------
335 concatenate : Join a sequence of arrays along an existing axis.
336 stack : Join a sequence of arrays along a new axis.
337 block : Assemble an nd-array from nested lists of blocks.
338 vstack : Stack arrays in sequence vertically (row wise).
339 dstack : Stack arrays in sequence depth wise (along third axis).
340 column_stack : Stack 1-D arrays as columns into a 2-D array.
341 hsplit : Split an array into multiple sub-arrays
342 horizontally (column-wise).
343 unstack : Split an array into a tuple of sub-arrays along an axis.
344
345 Examples
346 --------
347 >>> import numpy as np
348 >>> a = np.array((1,2,3))
349 >>> b = np.array((4,5,6))
350 >>> np.hstack((a,b))
351 array([1, 2, 3, 4, 5, 6])
352 >>> a = np.array([[1],[2],[3]])
353 >>> b = np.array([[4],[5],[6]])
354 >>> np.hstack((a,b))
355 array([[1, 4],
356 [2, 5],
357 [3, 6]])
358
359 """
360 arrs = atleast_1d(*tup)
361 if not isinstance(arrs, tuple):
362 arrs = (arrs,)
363 # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
364 if arrs and arrs[0].ndim == 1:
365 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
366 else:
367 return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)
368
369
370def _stack_dispatcher(arrays, axis=None, out=None, *,
371 dtype=None, casting=None):
372 arrays = _arrays_for_stack_dispatcher(arrays)
373 if out is not None:
374 # optimize for the typical case where only arrays is provided
375 arrays = list(arrays)
376 arrays.append(out)
377 return arrays
378
379
380@array_function_dispatch(_stack_dispatcher)
381def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
382 """
383 Join a sequence of arrays along a new axis.
384
385 The ``axis`` parameter specifies the index of the new axis in the
386 dimensions of the result. For example, if ``axis=0`` it will be the first
387 dimension and if ``axis=-1`` it will be the last dimension.
388
389 Parameters
390 ----------
391 arrays : sequence of ndarrays
392 Each array must have the same shape. In the case of a single ndarray
393 array_like input, it will be treated as a sequence of arrays; i.e.,
394 each element along the zeroth axis is treated as a separate array.
395
396 axis : int, optional
397 The axis in the result array along which the input arrays are stacked.
398
399 out : ndarray, optional
400 If provided, the destination to place the result. The shape must be
401 correct, matching that of what stack would have returned if no
402 out argument were specified.
403
404 dtype : str or dtype
405 If provided, the destination array will have this dtype. Cannot be
406 provided together with `out`.
407
408 .. versionadded:: 1.24
409
410 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
411 Controls what kind of data casting may occur. Defaults to 'same_kind'.
412
413 .. versionadded:: 1.24
414
415
416 Returns
417 -------
418 stacked : ndarray
419 The stacked array has one more dimension than the input arrays.
420
421 See Also
422 --------
423 concatenate : Join a sequence of arrays along an existing axis.
424 block : Assemble an nd-array from nested lists of blocks.
425 split : Split array into a list of multiple sub-arrays of equal size.
426 unstack : Split an array into a tuple of sub-arrays along an axis.
427
428 Examples
429 --------
430 >>> import numpy as np
431 >>> rng = np.random.default_rng()
432 >>> arrays = [rng.normal(size=(3,4)) for _ in range(10)]
433 >>> np.stack(arrays, axis=0).shape
434 (10, 3, 4)
435
436 >>> np.stack(arrays, axis=1).shape
437 (3, 10, 4)
438
439 >>> np.stack(arrays, axis=2).shape
440 (3, 4, 10)
441
442 >>> a = np.array([1, 2, 3])
443 >>> b = np.array([4, 5, 6])
444 >>> np.stack((a, b))
445 array([[1, 2, 3],
446 [4, 5, 6]])
447
448 >>> np.stack((a, b), axis=-1)
449 array([[1, 4],
450 [2, 5],
451 [3, 6]])
452
453 """
454 arrays = [asanyarray(arr) for arr in arrays]
455 if not arrays:
456 raise ValueError('need at least one array to stack')
457
458 shapes = {arr.shape for arr in arrays}
459 if len(shapes) != 1:
460 raise ValueError('all input arrays must have the same shape')
461
462 result_ndim = arrays[0].ndim + 1
463 axis = normalize_axis_index(axis, result_ndim)
464
465 sl = (slice(None),) * axis + (_nx.newaxis,)
466 expanded_arrays = [arr[sl] for arr in arrays]
467 return _nx.concatenate(expanded_arrays, axis=axis, out=out,
468 dtype=dtype, casting=casting)
469
470def _unstack_dispatcher(x, /, *, axis=None):
471 return (x,)
472
473@array_function_dispatch(_unstack_dispatcher)
474def unstack(x, /, *, axis=0):
475 """
476 Split an array into a sequence of arrays along the given axis.
477
478 The ``axis`` parameter specifies the dimension along which the array will
479 be split. For example, if ``axis=0`` (the default) it will be the first
480 dimension and if ``axis=-1`` it will be the last dimension.
481
482 The result is a tuple of arrays split along ``axis``.
483
484 .. versionadded:: 2.1.0
485
486 Parameters
487 ----------
488 x : ndarray
489 The array to be unstacked.
490 axis : int, optional
491 Axis along which the array will be split. Default: ``0``.
492
493 Returns
494 -------
495 unstacked : tuple of ndarrays
496 The unstacked arrays.
497
498 See Also
499 --------
500 stack : Join a sequence of arrays along a new axis.
501 concatenate : Join a sequence of arrays along an existing axis.
502 block : Assemble an nd-array from nested lists of blocks.
503 split : Split array into a list of multiple sub-arrays of equal size.
504
505 Notes
506 -----
507 ``unstack`` serves as the reverse operation of :py:func:`stack`, i.e.,
508 ``stack(unstack(x, axis=axis), axis=axis) == x``.
509
510 This function is equivalent to ``tuple(np.moveaxis(x, axis, 0))``, since
511 iterating on an array iterates along the first axis.
512
513 Examples
514 --------
515 >>> arr = np.arange(24).reshape((2, 3, 4))
516 >>> np.unstack(arr)
517 (array([[ 0, 1, 2, 3],
518 [ 4, 5, 6, 7],
519 [ 8, 9, 10, 11]]),
520 array([[12, 13, 14, 15],
521 [16, 17, 18, 19],
522 [20, 21, 22, 23]]))
523 >>> np.unstack(arr, axis=1)
524 (array([[ 0, 1, 2, 3],
525 [12, 13, 14, 15]]),
526 array([[ 4, 5, 6, 7],
527 [16, 17, 18, 19]]),
528 array([[ 8, 9, 10, 11],
529 [20, 21, 22, 23]]))
530 >>> arr2 = np.stack(np.unstack(arr, axis=1), axis=1)
531 >>> arr2.shape
532 (2, 3, 4)
533 >>> np.all(arr == arr2)
534 np.True_
535
536 """
537 if x.ndim == 0:
538 raise ValueError("Input array must be at least 1-d.")
539 return tuple(_nx.moveaxis(x, axis, 0))
540
541# Internal functions to eliminate the overhead of repeated dispatch in one of
542# the two possible paths inside np.block.
543# Use getattr to protect against __array_function__ being disabled.
544_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
545_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
546_concatenate = getattr(_from_nx.concatenate,
547 '__wrapped__', _from_nx.concatenate)
548
549
550def _block_format_index(index):
551 """
552 Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
553 """
554 idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
555 return 'arrays' + idx_str
556
557
558def _block_check_depths_match(arrays, parent_index=[]):
559 """
560 Recursive function checking that the depths of nested lists in `arrays`
561 all match. Mismatch raises a ValueError as described in the block
562 docstring below.
563
564 The entire index (rather than just the depth) needs to be calculated
565 for each innermost list, in case an error needs to be raised, so that
566 the index of the offending list can be printed as part of the error.
567
568 Parameters
569 ----------
570 arrays : nested list of arrays
571 The arrays to check
572 parent_index : list of int
573 The full index of `arrays` within the nested lists passed to
574 `_block_check_depths_match` at the top of the recursion.
575
576 Returns
577 -------
578 first_index : list of int
579 The full index of an element from the bottom of the nesting in
580 `arrays`. If any element at the bottom is an empty list, this will
581 refer to it, and the last index along the empty axis will be None.
582 max_arr_ndim : int
583 The maximum of the ndims of the arrays nested in `arrays`.
584 final_size: int
585 The number of elements in the final array. This is used the motivate
586 the choice of algorithm used using benchmarking wisdom.
587
588 """
589 if type(arrays) is tuple:
590 # not strictly necessary, but saves us from:
591 # - more than one way to do things - no point treating tuples like
592 # lists
593 # - horribly confusing behaviour that results when tuples are
594 # treated like ndarray
595 raise TypeError(
596 '{} is a tuple. '
597 'Only lists can be used to arrange blocks, and np.block does '
598 'not allow implicit conversion from tuple to ndarray.'.format(
599 _block_format_index(parent_index)
600 )
601 )
602 elif type(arrays) is list and len(arrays) > 0:
603 idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
604 for i, arr in enumerate(arrays))
605
606 first_index, max_arr_ndim, final_size = next(idxs_ndims)
607 for index, ndim, size in idxs_ndims:
608 final_size += size
609 if ndim > max_arr_ndim:
610 max_arr_ndim = ndim
611 if len(index) != len(first_index):
612 raise ValueError(
613 "List depths are mismatched. First element was at depth "
614 "{}, but there is an element at depth {} ({})".format(
615 len(first_index),
616 len(index),
617 _block_format_index(index)
618 )
619 )
620 # propagate our flag that indicates an empty list at the bottom
621 if index[-1] is None:
622 first_index = index
623
624 return first_index, max_arr_ndim, final_size
625 elif type(arrays) is list and len(arrays) == 0:
626 # We've 'bottomed out' on an empty list
627 return parent_index + [None], 0, 0
628 else:
629 # We've 'bottomed out' - arrays is either a scalar or an array
630 size = _size(arrays)
631 return parent_index, _ndim(arrays), size
632
633
634def _atleast_nd(a, ndim):
635 # Ensures `a` has at least `ndim` dimensions by prepending
636 # ones to `a.shape` as necessary
637 return array(a, ndmin=ndim, copy=None, subok=True)
638
639
640def _accumulate(values):
641 return list(itertools.accumulate(values))
642
643
644def _concatenate_shapes(shapes, axis):
645 """Given array shapes, return the resulting shape and slices prefixes.
646
647 These help in nested concatenation.
648
649 Returns
650 -------
651 shape: tuple of int
652 This tuple satisfies::
653
654 shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
655 shape == concatenate(arrs, axis).shape
656
657 slice_prefixes: tuple of (slice(start, end), )
658 For a list of arrays being concatenated, this returns the slice
659 in the larger array at axis that needs to be sliced into.
660
661 For example, the following holds::
662
663 ret = concatenate([a, b, c], axis)
664 _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
665
666 ret[(slice(None),) * axis + sl_a] == a
667 ret[(slice(None),) * axis + sl_b] == b
668 ret[(slice(None),) * axis + sl_c] == c
669
670 These are called slice prefixes since they are used in the recursive
671 blocking algorithm to compute the left-most slices during the
672 recursion. Therefore, they must be prepended to rest of the slice
673 that was computed deeper in the recursion.
674
675 These are returned as tuples to ensure that they can quickly be added
676 to existing slice tuple without creating a new tuple every time.
677
678 """
679 # Cache a result that will be reused.
680 shape_at_axis = [shape[axis] for shape in shapes]
681
682 # Take a shape, any shape
683 first_shape = shapes[0]
684 first_shape_pre = first_shape[:axis]
685 first_shape_post = first_shape[axis+1:]
686
687 if any(shape[:axis] != first_shape_pre or
688 shape[axis+1:] != first_shape_post for shape in shapes):
689 raise ValueError(
690 'Mismatched array shapes in block along axis {}.'.format(axis))
691
692 shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
693
694 offsets_at_axis = _accumulate(shape_at_axis)
695 slice_prefixes = [(slice(start, end),)
696 for start, end in zip([0] + offsets_at_axis,
697 offsets_at_axis)]
698 return shape, slice_prefixes
699
700
701def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
702 """
703 Returns the shape of the final array, along with a list
704 of slices and a list of arrays that can be used for assignment inside the
705 new array
706
707 Parameters
708 ----------
709 arrays : nested list of arrays
710 The arrays to check
711 max_depth : list of int
712 The number of nested lists
713 result_ndim : int
714 The number of dimensions in thefinal array.
715
716 Returns
717 -------
718 shape : tuple of int
719 The shape that the final array will take on.
720 slices: list of tuple of slices
721 The slices into the full array required for assignment. These are
722 required to be prepended with ``(Ellipsis, )`` to obtain to correct
723 final index.
724 arrays: list of ndarray
725 The data to assign to each slice of the full array
726
727 """
728 if depth < max_depth:
729 shapes, slices, arrays = zip(
730 *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
731 for arr in arrays])
732
733 axis = result_ndim - max_depth + depth
734 shape, slice_prefixes = _concatenate_shapes(shapes, axis)
735
736 # Prepend the slice prefix and flatten the slices
737 slices = [slice_prefix + the_slice
738 for slice_prefix, inner_slices in zip(slice_prefixes, slices)
739 for the_slice in inner_slices]
740
741 # Flatten the array list
742 arrays = functools.reduce(operator.add, arrays)
743
744 return shape, slices, arrays
745 else:
746 # We've 'bottomed out' - arrays is either a scalar or an array
747 # type(arrays) is not list
748 # Return the slice and the array inside a list to be consistent with
749 # the recursive case.
750 arr = _atleast_nd(arrays, result_ndim)
751 return arr.shape, [()], [arr]
752
753
754def _block(arrays, max_depth, result_ndim, depth=0):
755 """
756 Internal implementation of block based on repeated concatenation.
757 `arrays` is the argument passed to
758 block. `max_depth` is the depth of nested lists within `arrays` and
759 `result_ndim` is the greatest of the dimensions of the arrays in
760 `arrays` and the depth of the lists in `arrays` (see block docstring
761 for details).
762 """
763 if depth < max_depth:
764 arrs = [_block(arr, max_depth, result_ndim, depth+1)
765 for arr in arrays]
766 return _concatenate(arrs, axis=-(max_depth-depth))
767 else:
768 # We've 'bottomed out' - arrays is either a scalar or an array
769 # type(arrays) is not list
770 return _atleast_nd(arrays, result_ndim)
771
772
773def _block_dispatcher(arrays):
774 # Use type(...) is list to match the behavior of np.block(), which special
775 # cases list specifically rather than allowing for generic iterables or
776 # tuple. Also, we know that list.__array_function__ will never exist.
777 if type(arrays) is list:
778 for subarrays in arrays:
779 yield from _block_dispatcher(subarrays)
780 else:
781 yield arrays
782
783
784@array_function_dispatch(_block_dispatcher)
785def block(arrays):
786 """
787 Assemble an nd-array from nested lists of blocks.
788
789 Blocks in the innermost lists are concatenated (see `concatenate`) along
790 the last dimension (-1), then these are concatenated along the
791 second-last dimension (-2), and so on until the outermost list is reached.
792
793 Blocks can be of any dimension, but will not be broadcasted using
794 the normal rules. Instead, leading axes of size 1 are inserted,
795 to make ``block.ndim`` the same for all blocks. This is primarily useful
796 for working with scalars, and means that code like ``np.block([v, 1])``
797 is valid, where ``v.ndim == 1``.
798
799 When the nested list is two levels deep, this allows block matrices to be
800 constructed from their components.
801
802 Parameters
803 ----------
804 arrays : nested list of array_like or scalars (but not tuples)
805 If passed a single ndarray or scalar (a nested list of depth 0), this
806 is returned unmodified (and not copied).
807
808 Elements shapes must match along the appropriate axes (without
809 broadcasting), but leading 1s will be prepended to the shape as
810 necessary to make the dimensions match.
811
812 Returns
813 -------
814 block_array : ndarray
815 The array assembled from the given blocks.
816
817 The dimensionality of the output is equal to the greatest of:
818
819 * the dimensionality of all the inputs
820 * the depth to which the input list is nested
821
822 Raises
823 ------
824 ValueError
825 * If list depths are mismatched - for instance, ``[[a, b], c]`` is
826 illegal, and should be spelt ``[[a, b], [c]]``
827 * If lists are empty - for instance, ``[[a, b], []]``
828
829 See Also
830 --------
831 concatenate : Join a sequence of arrays along an existing axis.
832 stack : Join a sequence of arrays along a new axis.
833 vstack : Stack arrays in sequence vertically (row wise).
834 hstack : Stack arrays in sequence horizontally (column wise).
835 dstack : Stack arrays in sequence depth wise (along third axis).
836 column_stack : Stack 1-D arrays as columns into a 2-D array.
837 vsplit : Split an array into multiple sub-arrays vertically (row-wise).
838 unstack : Split an array into a tuple of sub-arrays along an axis.
839
840 Notes
841 -----
842 When called with only scalars, ``np.block`` is equivalent to an ndarray
843 call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
844 ``np.array([[1, 2], [3, 4]])``.
845
846 This function does not enforce that the blocks lie on a fixed grid.
847 ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
848
849 AAAbb
850 AAAbb
851 cccDD
852
853 But is also allowed to produce, for some ``a, b, c, d``::
854
855 AAAbb
856 AAAbb
857 cDDDD
858
859 Since concatenation happens along the last axis first, `block` is *not*
860 capable of producing the following directly::
861
862 AAAbb
863 cccbb
864 cccDD
865
866 Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
867 equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
868
869 Examples
870 --------
871 The most common use of this function is to build a block matrix:
872
873 >>> import numpy as np
874 >>> A = np.eye(2) * 2
875 >>> B = np.eye(3) * 3
876 >>> np.block([
877 ... [A, np.zeros((2, 3))],
878 ... [np.ones((3, 2)), B ]
879 ... ])
880 array([[2., 0., 0., 0., 0.],
881 [0., 2., 0., 0., 0.],
882 [1., 1., 3., 0., 0.],
883 [1., 1., 0., 3., 0.],
884 [1., 1., 0., 0., 3.]])
885
886 With a list of depth 1, `block` can be used as `hstack`:
887
888 >>> np.block([1, 2, 3]) # hstack([1, 2, 3])
889 array([1, 2, 3])
890
891 >>> a = np.array([1, 2, 3])
892 >>> b = np.array([4, 5, 6])
893 >>> np.block([a, b, 10]) # hstack([a, b, 10])
894 array([ 1, 2, 3, 4, 5, 6, 10])
895
896 >>> A = np.ones((2, 2), int)
897 >>> B = 2 * A
898 >>> np.block([A, B]) # hstack([A, B])
899 array([[1, 1, 2, 2],
900 [1, 1, 2, 2]])
901
902 With a list of depth 2, `block` can be used in place of `vstack`:
903
904 >>> a = np.array([1, 2, 3])
905 >>> b = np.array([4, 5, 6])
906 >>> np.block([[a], [b]]) # vstack([a, b])
907 array([[1, 2, 3],
908 [4, 5, 6]])
909
910 >>> A = np.ones((2, 2), int)
911 >>> B = 2 * A
912 >>> np.block([[A], [B]]) # vstack([A, B])
913 array([[1, 1],
914 [1, 1],
915 [2, 2],
916 [2, 2]])
917
918 It can also be used in place of `atleast_1d` and `atleast_2d`:
919
920 >>> a = np.array(0)
921 >>> b = np.array([1])
922 >>> np.block([a]) # atleast_1d(a)
923 array([0])
924 >>> np.block([b]) # atleast_1d(b)
925 array([1])
926
927 >>> np.block([[a]]) # atleast_2d(a)
928 array([[0]])
929 >>> np.block([[b]]) # atleast_2d(b)
930 array([[1]])
931
932
933 """
934 arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
935
936 # It was found through benchmarking that making an array of final size
937 # around 256x256 was faster by straight concatenation on a
938 # i7-7700HQ processor and dual channel ram 2400MHz.
939 # It didn't seem to matter heavily on the dtype used.
940 #
941 # A 2D array using repeated concatenation requires 2 copies of the array.
942 #
943 # The fastest algorithm will depend on the ratio of CPU power to memory
944 # speed.
945 # One can monitor the results of the benchmark
946 # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
947 # to tune this parameter until a C version of the `_block_info_recursion`
948 # algorithm is implemented which would likely be faster than the python
949 # version.
950 if list_ndim * final_size > (2 * 512 * 512):
951 return _block_slicing(arrays, list_ndim, result_ndim)
952 else:
953 return _block_concatenate(arrays, list_ndim, result_ndim)
954
955
956# These helper functions are mostly used for testing.
957# They allow us to write tests that directly call `_block_slicing`
958# or `_block_concatenate` without blocking large arrays to force the wisdom
959# to trigger the desired path.
960def _block_setup(arrays):
961 """
962 Returns
963 (`arrays`, list_ndim, result_ndim, final_size)
964 """
965 bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
966 list_ndim = len(bottom_index)
967 if bottom_index and bottom_index[-1] is None:
968 raise ValueError(
969 'List at {} cannot be empty'.format(
970 _block_format_index(bottom_index)
971 )
972 )
973 result_ndim = max(arr_ndim, list_ndim)
974 return arrays, list_ndim, result_ndim, final_size
975
976
977def _block_slicing(arrays, list_ndim, result_ndim):
978 shape, slices, arrays = _block_info_recursion(
979 arrays, list_ndim, result_ndim)
980 dtype = _nx.result_type(*[arr.dtype for arr in arrays])
981
982 # Test preferring F only in the case that all input arrays are F
983 F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
984 C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
985 order = 'F' if F_order and not C_order else 'C'
986 result = _nx.empty(shape=shape, dtype=dtype, order=order)
987 # Note: In a c implementation, the function
988 # PyArray_CreateMultiSortedStridePerm could be used for more advanced
989 # guessing of the desired order.
990
991 for the_slice, arr in zip(slices, arrays):
992 result[(Ellipsis,) + the_slice] = arr
993 return result
994
995
996def _block_concatenate(arrays, list_ndim, result_ndim):
997 result = _block(arrays, list_ndim, result_ndim)
998 if list_ndim == 0:
999 # Catch an edge case where _block returns a view because
1000 # `arrays` is a single numpy array and not a list of numpy arrays.
1001 # This might copy scalars or lists twice, but this isn't a likely
1002 # usecase for those interested in performance
1003 result = result.copy()
1004 return result