1__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
2 'stack', 'vstack']
3
4import functools
5import itertools
6import operator
7import warnings
8
9from . import numeric as _nx
10from . import overrides
11from .multiarray import array, asanyarray, normalize_axis_index
12from . import fromnumeric as _from_nx
13
14
15array_function_dispatch = functools.partial(
16 overrides.array_function_dispatch, module='numpy')
17
18
19def _atleast_1d_dispatcher(*arys):
20 return arys
21
22
23@array_function_dispatch(_atleast_1d_dispatcher)
24def atleast_1d(*arys):
25 """
26 Convert inputs to arrays with at least one dimension.
27
28 Scalar inputs are converted to 1-dimensional arrays, whilst
29 higher-dimensional inputs are preserved.
30
31 Parameters
32 ----------
33 arys1, arys2, ... : array_like
34 One or more input arrays.
35
36 Returns
37 -------
38 ret : ndarray
39 An array, or list of arrays, each with ``a.ndim >= 1``.
40 Copies are made only if necessary.
41
42 See Also
43 --------
44 atleast_2d, atleast_3d
45
46 Examples
47 --------
48 >>> np.atleast_1d(1.0)
49 array([1.])
50
51 >>> x = np.arange(9.0).reshape(3,3)
52 >>> np.atleast_1d(x)
53 array([[0., 1., 2.],
54 [3., 4., 5.],
55 [6., 7., 8.]])
56 >>> np.atleast_1d(x) is x
57 True
58
59 >>> np.atleast_1d(1, [3, 4])
60 [array([1]), array([3, 4])]
61
62 """
63 res = []
64 for ary in arys:
65 ary = asanyarray(ary)
66 if ary.ndim == 0:
67 result = ary.reshape(1)
68 else:
69 result = ary
70 res.append(result)
71 if len(res) == 1:
72 return res[0]
73 else:
74 return res
75
76
77def _atleast_2d_dispatcher(*arys):
78 return arys
79
80
81@array_function_dispatch(_atleast_2d_dispatcher)
82def atleast_2d(*arys):
83 """
84 View inputs as arrays with at least two dimensions.
85
86 Parameters
87 ----------
88 arys1, arys2, ... : array_like
89 One or more array-like sequences. Non-array inputs are converted
90 to arrays. Arrays that already have two or more dimensions are
91 preserved.
92
93 Returns
94 -------
95 res, res2, ... : ndarray
96 An array, or list of arrays, each with ``a.ndim >= 2``.
97 Copies are avoided where possible, and views with two or more
98 dimensions are returned.
99
100 See Also
101 --------
102 atleast_1d, atleast_3d
103
104 Examples
105 --------
106 >>> np.atleast_2d(3.0)
107 array([[3.]])
108
109 >>> x = np.arange(3.0)
110 >>> np.atleast_2d(x)
111 array([[0., 1., 2.]])
112 >>> np.atleast_2d(x).base is x
113 True
114
115 >>> np.atleast_2d(1, [1, 2], [[1, 2]])
116 [array([[1]]), array([[1, 2]]), array([[1, 2]])]
117
118 """
119 res = []
120 for ary in arys:
121 ary = asanyarray(ary)
122 if ary.ndim == 0:
123 result = ary.reshape(1, 1)
124 elif ary.ndim == 1:
125 result = ary[_nx.newaxis, :]
126 else:
127 result = ary
128 res.append(result)
129 if len(res) == 1:
130 return res[0]
131 else:
132 return res
133
134
135def _atleast_3d_dispatcher(*arys):
136 return arys
137
138
139@array_function_dispatch(_atleast_3d_dispatcher)
140def atleast_3d(*arys):
141 """
142 View inputs as arrays with at least three dimensions.
143
144 Parameters
145 ----------
146 arys1, arys2, ... : array_like
147 One or more array-like sequences. Non-array inputs are converted to
148 arrays. Arrays that already have three or more dimensions are
149 preserved.
150
151 Returns
152 -------
153 res1, res2, ... : ndarray
154 An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
155 avoided where possible, and views with three or more dimensions are
156 returned. For example, a 1-D array of shape ``(N,)`` becomes a view
157 of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
158 view of shape ``(M, N, 1)``.
159
160 See Also
161 --------
162 atleast_1d, atleast_2d
163
164 Examples
165 --------
166 >>> np.atleast_3d(3.0)
167 array([[[3.]]])
168
169 >>> x = np.arange(3.0)
170 >>> np.atleast_3d(x).shape
171 (1, 3, 1)
172
173 >>> x = np.arange(12.0).reshape(4,3)
174 >>> np.atleast_3d(x).shape
175 (4, 3, 1)
176 >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
177 True
178
179 >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
180 ... print(arr, arr.shape) # doctest: +SKIP
181 ...
182 [[[1]
183 [2]]] (1, 2, 1)
184 [[[1]
185 [2]]] (1, 2, 1)
186 [[[1 2]]] (1, 1, 2)
187
188 """
189 res = []
190 for ary in arys:
191 ary = asanyarray(ary)
192 if ary.ndim == 0:
193 result = ary.reshape(1, 1, 1)
194 elif ary.ndim == 1:
195 result = ary[_nx.newaxis, :, _nx.newaxis]
196 elif ary.ndim == 2:
197 result = ary[:, :, _nx.newaxis]
198 else:
199 result = ary
200 res.append(result)
201 if len(res) == 1:
202 return res[0]
203 else:
204 return res
205
206
207def _arrays_for_stack_dispatcher(arrays, stacklevel=4):
208 if not hasattr(arrays, '__getitem__') and hasattr(arrays, '__iter__'):
209 warnings.warn('arrays to stack must be passed as a "sequence" type '
210 'such as list or tuple. Support for non-sequence '
211 'iterables such as generators is deprecated as of '
212 'NumPy 1.16 and will raise an error in the future.',
213 FutureWarning, stacklevel=stacklevel)
214 return ()
215 return arrays
216
217
218def _vhstack_dispatcher(tup, *,
219 dtype=None, casting=None):
220 return _arrays_for_stack_dispatcher(tup)
221
222
223@array_function_dispatch(_vhstack_dispatcher)
224def vstack(tup, *, dtype=None, casting="same_kind"):
225 """
226 Stack arrays in sequence vertically (row wise).
227
228 This is equivalent to concatenation along the first axis after 1-D arrays
229 of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
230 `vsplit`.
231
232 This function makes most sense for arrays with up to 3 dimensions. For
233 instance, for pixel-data with a height (first axis), width (second axis),
234 and r/g/b channels (third axis). The functions `concatenate`, `stack` and
235 `block` provide more general stacking and concatenation operations.
236
237 ``np.row_stack`` is an alias for `vstack`. They are the same function.
238
239 Parameters
240 ----------
241 tup : sequence of ndarrays
242 The arrays must have the same shape along all but the first axis.
243 1-D arrays must have the same length.
244
245 dtype : str or dtype
246 If provided, the destination array will have this dtype. Cannot be
247 provided together with `out`.
248
249 .. versionadded:: 1.24
250
251 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
252 Controls what kind of data casting may occur. Defaults to 'same_kind'.
253
254 .. versionadded:: 1.24
255
256 Returns
257 -------
258 stacked : ndarray
259 The array formed by stacking the given arrays, will be at least 2-D.
260
261 See Also
262 --------
263 concatenate : Join a sequence of arrays along an existing axis.
264 stack : Join a sequence of arrays along a new axis.
265 block : Assemble an nd-array from nested lists of blocks.
266 hstack : Stack arrays in sequence horizontally (column wise).
267 dstack : Stack arrays in sequence depth wise (along third axis).
268 column_stack : Stack 1-D arrays as columns into a 2-D array.
269 vsplit : Split an array into multiple sub-arrays vertically (row-wise).
270
271 Examples
272 --------
273 >>> a = np.array([1, 2, 3])
274 >>> b = np.array([4, 5, 6])
275 >>> np.vstack((a,b))
276 array([[1, 2, 3],
277 [4, 5, 6]])
278
279 >>> a = np.array([[1], [2], [3]])
280 >>> b = np.array([[4], [5], [6]])
281 >>> np.vstack((a,b))
282 array([[1],
283 [2],
284 [3],
285 [4],
286 [5],
287 [6]])
288
289 """
290 if not overrides.ARRAY_FUNCTION_ENABLED:
291 # raise warning if necessary
292 _arrays_for_stack_dispatcher(tup, stacklevel=2)
293 arrs = atleast_2d(*tup)
294 if not isinstance(arrs, list):
295 arrs = [arrs]
296 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
297
298
299@array_function_dispatch(_vhstack_dispatcher)
300def hstack(tup, *, dtype=None, casting="same_kind"):
301 """
302 Stack arrays in sequence horizontally (column wise).
303
304 This is equivalent to concatenation along the second axis, except for 1-D
305 arrays where it concatenates along the first axis. Rebuilds arrays divided
306 by `hsplit`.
307
308 This function makes most sense for arrays with up to 3 dimensions. For
309 instance, for pixel-data with a height (first axis), width (second axis),
310 and r/g/b channels (third axis). The functions `concatenate`, `stack` and
311 `block` provide more general stacking and concatenation operations.
312
313 Parameters
314 ----------
315 tup : sequence of ndarrays
316 The arrays must have the same shape along all but the second axis,
317 except 1-D arrays which can be any length.
318
319 dtype : str or dtype
320 If provided, the destination array will have this dtype. Cannot be
321 provided together with `out`.
322
323 .. versionadded:: 1.24
324
325 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
326 Controls what kind of data casting may occur. Defaults to 'same_kind'.
327
328 .. versionadded:: 1.24
329
330 Returns
331 -------
332 stacked : ndarray
333 The array formed by stacking the given arrays.
334
335 See Also
336 --------
337 concatenate : Join a sequence of arrays along an existing axis.
338 stack : Join a sequence of arrays along a new axis.
339 block : Assemble an nd-array from nested lists of blocks.
340 vstack : Stack arrays in sequence vertically (row wise).
341 dstack : Stack arrays in sequence depth wise (along third axis).
342 column_stack : Stack 1-D arrays as columns into a 2-D array.
343 hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
344
345 Examples
346 --------
347 >>> a = np.array((1,2,3))
348 >>> b = np.array((4,5,6))
349 >>> np.hstack((a,b))
350 array([1, 2, 3, 4, 5, 6])
351 >>> a = np.array([[1],[2],[3]])
352 >>> b = np.array([[4],[5],[6]])
353 >>> np.hstack((a,b))
354 array([[1, 4],
355 [2, 5],
356 [3, 6]])
357
358 """
359 if not overrides.ARRAY_FUNCTION_ENABLED:
360 # raise warning if necessary
361 _arrays_for_stack_dispatcher(tup, stacklevel=2)
362
363 arrs = atleast_1d(*tup)
364 if not isinstance(arrs, list):
365 arrs = [arrs]
366 # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
367 if arrs and arrs[0].ndim == 1:
368 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
369 else:
370 return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)
371
372
373def _stack_dispatcher(arrays, axis=None, out=None, *,
374 dtype=None, casting=None):
375 arrays = _arrays_for_stack_dispatcher(arrays, stacklevel=6)
376 if out is not None:
377 # optimize for the typical case where only arrays is provided
378 arrays = list(arrays)
379 arrays.append(out)
380 return arrays
381
382
383@array_function_dispatch(_stack_dispatcher)
384def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
385 """
386 Join a sequence of arrays along a new axis.
387
388 The ``axis`` parameter specifies the index of the new axis in the
389 dimensions of the result. For example, if ``axis=0`` it will be the first
390 dimension and if ``axis=-1`` it will be the last dimension.
391
392 .. versionadded:: 1.10.0
393
394 Parameters
395 ----------
396 arrays : sequence of array_like
397 Each array must have the same shape.
398
399 axis : int, optional
400 The axis in the result array along which the input arrays are stacked.
401
402 out : ndarray, optional
403 If provided, the destination to place the result. The shape must be
404 correct, matching that of what stack would have returned if no
405 out argument were specified.
406
407 dtype : str or dtype
408 If provided, the destination array will have this dtype. Cannot be
409 provided together with `out`.
410
411 .. versionadded:: 1.24
412
413 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
414 Controls what kind of data casting may occur. Defaults to 'same_kind'.
415
416 .. versionadded:: 1.24
417
418
419 Returns
420 -------
421 stacked : ndarray
422 The stacked array has one more dimension than the input arrays.
423
424 See Also
425 --------
426 concatenate : Join a sequence of arrays along an existing axis.
427 block : Assemble an nd-array from nested lists of blocks.
428 split : Split array into a list of multiple sub-arrays of equal size.
429
430 Examples
431 --------
432 >>> arrays = [np.random.randn(3, 4) for _ in range(10)]
433 >>> np.stack(arrays, axis=0).shape
434 (10, 3, 4)
435
436 >>> np.stack(arrays, axis=1).shape
437 (3, 10, 4)
438
439 >>> np.stack(arrays, axis=2).shape
440 (3, 4, 10)
441
442 >>> a = np.array([1, 2, 3])
443 >>> b = np.array([4, 5, 6])
444 >>> np.stack((a, b))
445 array([[1, 2, 3],
446 [4, 5, 6]])
447
448 >>> np.stack((a, b), axis=-1)
449 array([[1, 4],
450 [2, 5],
451 [3, 6]])
452
453 """
454 if not overrides.ARRAY_FUNCTION_ENABLED:
455 # raise warning if necessary
456 _arrays_for_stack_dispatcher(arrays, stacklevel=2)
457
458 arrays = [asanyarray(arr) for arr in arrays]
459 if not arrays:
460 raise ValueError('need at least one array to stack')
461
462 shapes = {arr.shape for arr in arrays}
463 if len(shapes) != 1:
464 raise ValueError('all input arrays must have the same shape')
465
466 result_ndim = arrays[0].ndim + 1
467 axis = normalize_axis_index(axis, result_ndim)
468
469 sl = (slice(None),) * axis + (_nx.newaxis,)
470 expanded_arrays = [arr[sl] for arr in arrays]
471 return _nx.concatenate(expanded_arrays, axis=axis, out=out,
472 dtype=dtype, casting=casting)
473
474
475# Internal functions to eliminate the overhead of repeated dispatch in one of
476# the two possible paths inside np.block.
477# Use getattr to protect against __array_function__ being disabled.
478_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
479_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
480_concatenate = getattr(_from_nx.concatenate,
481 '__wrapped__', _from_nx.concatenate)
482
483
484def _block_format_index(index):
485 """
486 Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
487 """
488 idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
489 return 'arrays' + idx_str
490
491
492def _block_check_depths_match(arrays, parent_index=[]):
493 """
494 Recursive function checking that the depths of nested lists in `arrays`
495 all match. Mismatch raises a ValueError as described in the block
496 docstring below.
497
498 The entire index (rather than just the depth) needs to be calculated
499 for each innermost list, in case an error needs to be raised, so that
500 the index of the offending list can be printed as part of the error.
501
502 Parameters
503 ----------
504 arrays : nested list of arrays
505 The arrays to check
506 parent_index : list of int
507 The full index of `arrays` within the nested lists passed to
508 `_block_check_depths_match` at the top of the recursion.
509
510 Returns
511 -------
512 first_index : list of int
513 The full index of an element from the bottom of the nesting in
514 `arrays`. If any element at the bottom is an empty list, this will
515 refer to it, and the last index along the empty axis will be None.
516 max_arr_ndim : int
517 The maximum of the ndims of the arrays nested in `arrays`.
518 final_size: int
519 The number of elements in the final array. This is used the motivate
520 the choice of algorithm used using benchmarking wisdom.
521
522 """
523 if type(arrays) is tuple:
524 # not strictly necessary, but saves us from:
525 # - more than one way to do things - no point treating tuples like
526 # lists
527 # - horribly confusing behaviour that results when tuples are
528 # treated like ndarray
529 raise TypeError(
530 '{} is a tuple. '
531 'Only lists can be used to arrange blocks, and np.block does '
532 'not allow implicit conversion from tuple to ndarray.'.format(
533 _block_format_index(parent_index)
534 )
535 )
536 elif type(arrays) is list and len(arrays) > 0:
537 idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
538 for i, arr in enumerate(arrays))
539
540 first_index, max_arr_ndim, final_size = next(idxs_ndims)
541 for index, ndim, size in idxs_ndims:
542 final_size += size
543 if ndim > max_arr_ndim:
544 max_arr_ndim = ndim
545 if len(index) != len(first_index):
546 raise ValueError(
547 "List depths are mismatched. First element was at depth "
548 "{}, but there is an element at depth {} ({})".format(
549 len(first_index),
550 len(index),
551 _block_format_index(index)
552 )
553 )
554 # propagate our flag that indicates an empty list at the bottom
555 if index[-1] is None:
556 first_index = index
557
558 return first_index, max_arr_ndim, final_size
559 elif type(arrays) is list and len(arrays) == 0:
560 # We've 'bottomed out' on an empty list
561 return parent_index + [None], 0, 0
562 else:
563 # We've 'bottomed out' - arrays is either a scalar or an array
564 size = _size(arrays)
565 return parent_index, _ndim(arrays), size
566
567
568def _atleast_nd(a, ndim):
569 # Ensures `a` has at least `ndim` dimensions by prepending
570 # ones to `a.shape` as necessary
571 return array(a, ndmin=ndim, copy=False, subok=True)
572
573
574def _accumulate(values):
575 return list(itertools.accumulate(values))
576
577
578def _concatenate_shapes(shapes, axis):
579 """Given array shapes, return the resulting shape and slices prefixes.
580
581 These help in nested concatenation.
582
583 Returns
584 -------
585 shape: tuple of int
586 This tuple satisfies::
587
588 shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
589 shape == concatenate(arrs, axis).shape
590
591 slice_prefixes: tuple of (slice(start, end), )
592 For a list of arrays being concatenated, this returns the slice
593 in the larger array at axis that needs to be sliced into.
594
595 For example, the following holds::
596
597 ret = concatenate([a, b, c], axis)
598 _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
599
600 ret[(slice(None),) * axis + sl_a] == a
601 ret[(slice(None),) * axis + sl_b] == b
602 ret[(slice(None),) * axis + sl_c] == c
603
604 These are called slice prefixes since they are used in the recursive
605 blocking algorithm to compute the left-most slices during the
606 recursion. Therefore, they must be prepended to rest of the slice
607 that was computed deeper in the recursion.
608
609 These are returned as tuples to ensure that they can quickly be added
610 to existing slice tuple without creating a new tuple every time.
611
612 """
613 # Cache a result that will be reused.
614 shape_at_axis = [shape[axis] for shape in shapes]
615
616 # Take a shape, any shape
617 first_shape = shapes[0]
618 first_shape_pre = first_shape[:axis]
619 first_shape_post = first_shape[axis+1:]
620
621 if any(shape[:axis] != first_shape_pre or
622 shape[axis+1:] != first_shape_post for shape in shapes):
623 raise ValueError(
624 'Mismatched array shapes in block along axis {}.'.format(axis))
625
626 shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
627
628 offsets_at_axis = _accumulate(shape_at_axis)
629 slice_prefixes = [(slice(start, end),)
630 for start, end in zip([0] + offsets_at_axis,
631 offsets_at_axis)]
632 return shape, slice_prefixes
633
634
635def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
636 """
637 Returns the shape of the final array, along with a list
638 of slices and a list of arrays that can be used for assignment inside the
639 new array
640
641 Parameters
642 ----------
643 arrays : nested list of arrays
644 The arrays to check
645 max_depth : list of int
646 The number of nested lists
647 result_ndim : int
648 The number of dimensions in thefinal array.
649
650 Returns
651 -------
652 shape : tuple of int
653 The shape that the final array will take on.
654 slices: list of tuple of slices
655 The slices into the full array required for assignment. These are
656 required to be prepended with ``(Ellipsis, )`` to obtain to correct
657 final index.
658 arrays: list of ndarray
659 The data to assign to each slice of the full array
660
661 """
662 if depth < max_depth:
663 shapes, slices, arrays = zip(
664 *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
665 for arr in arrays])
666
667 axis = result_ndim - max_depth + depth
668 shape, slice_prefixes = _concatenate_shapes(shapes, axis)
669
670 # Prepend the slice prefix and flatten the slices
671 slices = [slice_prefix + the_slice
672 for slice_prefix, inner_slices in zip(slice_prefixes, slices)
673 for the_slice in inner_slices]
674
675 # Flatten the array list
676 arrays = functools.reduce(operator.add, arrays)
677
678 return shape, slices, arrays
679 else:
680 # We've 'bottomed out' - arrays is either a scalar or an array
681 # type(arrays) is not list
682 # Return the slice and the array inside a list to be consistent with
683 # the recursive case.
684 arr = _atleast_nd(arrays, result_ndim)
685 return arr.shape, [()], [arr]
686
687
688def _block(arrays, max_depth, result_ndim, depth=0):
689 """
690 Internal implementation of block based on repeated concatenation.
691 `arrays` is the argument passed to
692 block. `max_depth` is the depth of nested lists within `arrays` and
693 `result_ndim` is the greatest of the dimensions of the arrays in
694 `arrays` and the depth of the lists in `arrays` (see block docstring
695 for details).
696 """
697 if depth < max_depth:
698 arrs = [_block(arr, max_depth, result_ndim, depth+1)
699 for arr in arrays]
700 return _concatenate(arrs, axis=-(max_depth-depth))
701 else:
702 # We've 'bottomed out' - arrays is either a scalar or an array
703 # type(arrays) is not list
704 return _atleast_nd(arrays, result_ndim)
705
706
707def _block_dispatcher(arrays):
708 # Use type(...) is list to match the behavior of np.block(), which special
709 # cases list specifically rather than allowing for generic iterables or
710 # tuple. Also, we know that list.__array_function__ will never exist.
711 if type(arrays) is list:
712 for subarrays in arrays:
713 yield from _block_dispatcher(subarrays)
714 else:
715 yield arrays
716
717
718@array_function_dispatch(_block_dispatcher)
719def block(arrays):
720 """
721 Assemble an nd-array from nested lists of blocks.
722
723 Blocks in the innermost lists are concatenated (see `concatenate`) along
724 the last dimension (-1), then these are concatenated along the
725 second-last dimension (-2), and so on until the outermost list is reached.
726
727 Blocks can be of any dimension, but will not be broadcasted using the normal
728 rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
729 the same for all blocks. This is primarily useful for working with scalars,
730 and means that code like ``np.block([v, 1])`` is valid, where
731 ``v.ndim == 1``.
732
733 When the nested list is two levels deep, this allows block matrices to be
734 constructed from their components.
735
736 .. versionadded:: 1.13.0
737
738 Parameters
739 ----------
740 arrays : nested list of array_like or scalars (but not tuples)
741 If passed a single ndarray or scalar (a nested list of depth 0), this
742 is returned unmodified (and not copied).
743
744 Elements shapes must match along the appropriate axes (without
745 broadcasting), but leading 1s will be prepended to the shape as
746 necessary to make the dimensions match.
747
748 Returns
749 -------
750 block_array : ndarray
751 The array assembled from the given blocks.
752
753 The dimensionality of the output is equal to the greatest of:
754 * the dimensionality of all the inputs
755 * the depth to which the input list is nested
756
757 Raises
758 ------
759 ValueError
760 * If list depths are mismatched - for instance, ``[[a, b], c]`` is
761 illegal, and should be spelt ``[[a, b], [c]]``
762 * If lists are empty - for instance, ``[[a, b], []]``
763
764 See Also
765 --------
766 concatenate : Join a sequence of arrays along an existing axis.
767 stack : Join a sequence of arrays along a new axis.
768 vstack : Stack arrays in sequence vertically (row wise).
769 hstack : Stack arrays in sequence horizontally (column wise).
770 dstack : Stack arrays in sequence depth wise (along third axis).
771 column_stack : Stack 1-D arrays as columns into a 2-D array.
772 vsplit : Split an array into multiple sub-arrays vertically (row-wise).
773
774 Notes
775 -----
776
777 When called with only scalars, ``np.block`` is equivalent to an ndarray
778 call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
779 ``np.array([[1, 2], [3, 4]])``.
780
781 This function does not enforce that the blocks lie on a fixed grid.
782 ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
783
784 AAAbb
785 AAAbb
786 cccDD
787
788 But is also allowed to produce, for some ``a, b, c, d``::
789
790 AAAbb
791 AAAbb
792 cDDDD
793
794 Since concatenation happens along the last axis first, `block` is _not_
795 capable of producing the following directly::
796
797 AAAbb
798 cccbb
799 cccDD
800
801 Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
802 equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
803
804 Examples
805 --------
806 The most common use of this function is to build a block matrix
807
808 >>> A = np.eye(2) * 2
809 >>> B = np.eye(3) * 3
810 >>> np.block([
811 ... [A, np.zeros((2, 3))],
812 ... [np.ones((3, 2)), B ]
813 ... ])
814 array([[2., 0., 0., 0., 0.],
815 [0., 2., 0., 0., 0.],
816 [1., 1., 3., 0., 0.],
817 [1., 1., 0., 3., 0.],
818 [1., 1., 0., 0., 3.]])
819
820 With a list of depth 1, `block` can be used as `hstack`
821
822 >>> np.block([1, 2, 3]) # hstack([1, 2, 3])
823 array([1, 2, 3])
824
825 >>> a = np.array([1, 2, 3])
826 >>> b = np.array([4, 5, 6])
827 >>> np.block([a, b, 10]) # hstack([a, b, 10])
828 array([ 1, 2, 3, 4, 5, 6, 10])
829
830 >>> A = np.ones((2, 2), int)
831 >>> B = 2 * A
832 >>> np.block([A, B]) # hstack([A, B])
833 array([[1, 1, 2, 2],
834 [1, 1, 2, 2]])
835
836 With a list of depth 2, `block` can be used in place of `vstack`:
837
838 >>> a = np.array([1, 2, 3])
839 >>> b = np.array([4, 5, 6])
840 >>> np.block([[a], [b]]) # vstack([a, b])
841 array([[1, 2, 3],
842 [4, 5, 6]])
843
844 >>> A = np.ones((2, 2), int)
845 >>> B = 2 * A
846 >>> np.block([[A], [B]]) # vstack([A, B])
847 array([[1, 1],
848 [1, 1],
849 [2, 2],
850 [2, 2]])
851
852 It can also be used in places of `atleast_1d` and `atleast_2d`
853
854 >>> a = np.array(0)
855 >>> b = np.array([1])
856 >>> np.block([a]) # atleast_1d(a)
857 array([0])
858 >>> np.block([b]) # atleast_1d(b)
859 array([1])
860
861 >>> np.block([[a]]) # atleast_2d(a)
862 array([[0]])
863 >>> np.block([[b]]) # atleast_2d(b)
864 array([[1]])
865
866
867 """
868 arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
869
870 # It was found through benchmarking that making an array of final size
871 # around 256x256 was faster by straight concatenation on a
872 # i7-7700HQ processor and dual channel ram 2400MHz.
873 # It didn't seem to matter heavily on the dtype used.
874 #
875 # A 2D array using repeated concatenation requires 2 copies of the array.
876 #
877 # The fastest algorithm will depend on the ratio of CPU power to memory
878 # speed.
879 # One can monitor the results of the benchmark
880 # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
881 # to tune this parameter until a C version of the `_block_info_recursion`
882 # algorithm is implemented which would likely be faster than the python
883 # version.
884 if list_ndim * final_size > (2 * 512 * 512):
885 return _block_slicing(arrays, list_ndim, result_ndim)
886 else:
887 return _block_concatenate(arrays, list_ndim, result_ndim)
888
889
890# These helper functions are mostly used for testing.
891# They allow us to write tests that directly call `_block_slicing`
892# or `_block_concatenate` without blocking large arrays to force the wisdom
893# to trigger the desired path.
894def _block_setup(arrays):
895 """
896 Returns
897 (`arrays`, list_ndim, result_ndim, final_size)
898 """
899 bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
900 list_ndim = len(bottom_index)
901 if bottom_index and bottom_index[-1] is None:
902 raise ValueError(
903 'List at {} cannot be empty'.format(
904 _block_format_index(bottom_index)
905 )
906 )
907 result_ndim = max(arr_ndim, list_ndim)
908 return arrays, list_ndim, result_ndim, final_size
909
910
911def _block_slicing(arrays, list_ndim, result_ndim):
912 shape, slices, arrays = _block_info_recursion(
913 arrays, list_ndim, result_ndim)
914 dtype = _nx.result_type(*[arr.dtype for arr in arrays])
915
916 # Test preferring F only in the case that all input arrays are F
917 F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
918 C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
919 order = 'F' if F_order and not C_order else 'C'
920 result = _nx.empty(shape=shape, dtype=dtype, order=order)
921 # Note: In a c implementation, the function
922 # PyArray_CreateMultiSortedStridePerm could be used for more advanced
923 # guessing of the desired order.
924
925 for the_slice, arr in zip(slices, arrays):
926 result[(Ellipsis,) + the_slice] = arr
927 return result
928
929
930def _block_concatenate(arrays, list_ndim, result_ndim):
931 result = _block(arrays, list_ndim, result_ndim)
932 if list_ndim == 0:
933 # Catch an edge case where _block returns a view because
934 # `arrays` is a single numpy array and not a list of numpy arrays.
935 # This might copy scalars or lists twice, but this isn't a likely
936 # usecase for those interested in performance
937 result = result.copy()
938 return result