1__all__ = ['atleast_1d', 'atleast_2d', 'atleast_3d', 'block', 'hstack',
2 'stack', 'vstack']
3
4import functools
5import itertools
6import operator
7import warnings
8
9from . import numeric as _nx
10from . import overrides
11from .multiarray import array, asanyarray, normalize_axis_index
12from . import fromnumeric as _from_nx
13
14
15array_function_dispatch = functools.partial(
16 overrides.array_function_dispatch, module='numpy')
17
18
19def _atleast_1d_dispatcher(*arys):
20 return arys
21
22
23@array_function_dispatch(_atleast_1d_dispatcher)
24def atleast_1d(*arys):
25 """
26 Convert inputs to arrays with at least one dimension.
27
28 Scalar inputs are converted to 1-dimensional arrays, whilst
29 higher-dimensional inputs are preserved.
30
31 Parameters
32 ----------
33 arys1, arys2, ... : array_like
34 One or more input arrays.
35
36 Returns
37 -------
38 ret : ndarray
39 An array, or list of arrays, each with ``a.ndim >= 1``.
40 Copies are made only if necessary.
41
42 See Also
43 --------
44 atleast_2d, atleast_3d
45
46 Examples
47 --------
48 >>> np.atleast_1d(1.0)
49 array([1.])
50
51 >>> x = np.arange(9.0).reshape(3,3)
52 >>> np.atleast_1d(x)
53 array([[0., 1., 2.],
54 [3., 4., 5.],
55 [6., 7., 8.]])
56 >>> np.atleast_1d(x) is x
57 True
58
59 >>> np.atleast_1d(1, [3, 4])
60 [array([1]), array([3, 4])]
61
62 """
63 res = []
64 for ary in arys:
65 ary = asanyarray(ary)
66 if ary.ndim == 0:
67 result = ary.reshape(1)
68 else:
69 result = ary
70 res.append(result)
71 if len(res) == 1:
72 return res[0]
73 else:
74 return res
75
76
77def _atleast_2d_dispatcher(*arys):
78 return arys
79
80
81@array_function_dispatch(_atleast_2d_dispatcher)
82def atleast_2d(*arys):
83 """
84 View inputs as arrays with at least two dimensions.
85
86 Parameters
87 ----------
88 arys1, arys2, ... : array_like
89 One or more array-like sequences. Non-array inputs are converted
90 to arrays. Arrays that already have two or more dimensions are
91 preserved.
92
93 Returns
94 -------
95 res, res2, ... : ndarray
96 An array, or list of arrays, each with ``a.ndim >= 2``.
97 Copies are avoided where possible, and views with two or more
98 dimensions are returned.
99
100 See Also
101 --------
102 atleast_1d, atleast_3d
103
104 Examples
105 --------
106 >>> np.atleast_2d(3.0)
107 array([[3.]])
108
109 >>> x = np.arange(3.0)
110 >>> np.atleast_2d(x)
111 array([[0., 1., 2.]])
112 >>> np.atleast_2d(x).base is x
113 True
114
115 >>> np.atleast_2d(1, [1, 2], [[1, 2]])
116 [array([[1]]), array([[1, 2]]), array([[1, 2]])]
117
118 """
119 res = []
120 for ary in arys:
121 ary = asanyarray(ary)
122 if ary.ndim == 0:
123 result = ary.reshape(1, 1)
124 elif ary.ndim == 1:
125 result = ary[_nx.newaxis, :]
126 else:
127 result = ary
128 res.append(result)
129 if len(res) == 1:
130 return res[0]
131 else:
132 return res
133
134
135def _atleast_3d_dispatcher(*arys):
136 return arys
137
138
139@array_function_dispatch(_atleast_3d_dispatcher)
140def atleast_3d(*arys):
141 """
142 View inputs as arrays with at least three dimensions.
143
144 Parameters
145 ----------
146 arys1, arys2, ... : array_like
147 One or more array-like sequences. Non-array inputs are converted to
148 arrays. Arrays that already have three or more dimensions are
149 preserved.
150
151 Returns
152 -------
153 res1, res2, ... : ndarray
154 An array, or list of arrays, each with ``a.ndim >= 3``. Copies are
155 avoided where possible, and views with three or more dimensions are
156 returned. For example, a 1-D array of shape ``(N,)`` becomes a view
157 of shape ``(1, N, 1)``, and a 2-D array of shape ``(M, N)`` becomes a
158 view of shape ``(M, N, 1)``.
159
160 See Also
161 --------
162 atleast_1d, atleast_2d
163
164 Examples
165 --------
166 >>> np.atleast_3d(3.0)
167 array([[[3.]]])
168
169 >>> x = np.arange(3.0)
170 >>> np.atleast_3d(x).shape
171 (1, 3, 1)
172
173 >>> x = np.arange(12.0).reshape(4,3)
174 >>> np.atleast_3d(x).shape
175 (4, 3, 1)
176 >>> np.atleast_3d(x).base is x.base # x is a reshape, so not base itself
177 True
178
179 >>> for arr in np.atleast_3d([1, 2], [[1, 2]], [[[1, 2]]]):
180 ... print(arr, arr.shape) # doctest: +SKIP
181 ...
182 [[[1]
183 [2]]] (1, 2, 1)
184 [[[1]
185 [2]]] (1, 2, 1)
186 [[[1 2]]] (1, 1, 2)
187
188 """
189 res = []
190 for ary in arys:
191 ary = asanyarray(ary)
192 if ary.ndim == 0:
193 result = ary.reshape(1, 1, 1)
194 elif ary.ndim == 1:
195 result = ary[_nx.newaxis, :, _nx.newaxis]
196 elif ary.ndim == 2:
197 result = ary[:, :, _nx.newaxis]
198 else:
199 result = ary
200 res.append(result)
201 if len(res) == 1:
202 return res[0]
203 else:
204 return res
205
206
207def _arrays_for_stack_dispatcher(arrays):
208 if not hasattr(arrays, "__getitem__"):
209 raise TypeError('arrays to stack must be passed as a "sequence" type '
210 'such as list or tuple.')
211
212 return tuple(arrays)
213
214
215def _vhstack_dispatcher(tup, *, dtype=None, casting=None):
216 return _arrays_for_stack_dispatcher(tup)
217
218
219@array_function_dispatch(_vhstack_dispatcher)
220def vstack(tup, *, dtype=None, casting="same_kind"):
221 """
222 Stack arrays in sequence vertically (row wise).
223
224 This is equivalent to concatenation along the first axis after 1-D arrays
225 of shape `(N,)` have been reshaped to `(1,N)`. Rebuilds arrays divided by
226 `vsplit`.
227
228 This function makes most sense for arrays with up to 3 dimensions. For
229 instance, for pixel-data with a height (first axis), width (second axis),
230 and r/g/b channels (third axis). The functions `concatenate`, `stack` and
231 `block` provide more general stacking and concatenation operations.
232
233 ``np.row_stack`` is an alias for `vstack`. They are the same function.
234
235 Parameters
236 ----------
237 tup : sequence of ndarrays
238 The arrays must have the same shape along all but the first axis.
239 1-D arrays must have the same length.
240
241 dtype : str or dtype
242 If provided, the destination array will have this dtype. Cannot be
243 provided together with `out`.
244
245 .. versionadded:: 1.24
246
247 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
248 Controls what kind of data casting may occur. Defaults to 'same_kind'.
249
250 .. versionadded:: 1.24
251
252 Returns
253 -------
254 stacked : ndarray
255 The array formed by stacking the given arrays, will be at least 2-D.
256
257 See Also
258 --------
259 concatenate : Join a sequence of arrays along an existing axis.
260 stack : Join a sequence of arrays along a new axis.
261 block : Assemble an nd-array from nested lists of blocks.
262 hstack : Stack arrays in sequence horizontally (column wise).
263 dstack : Stack arrays in sequence depth wise (along third axis).
264 column_stack : Stack 1-D arrays as columns into a 2-D array.
265 vsplit : Split an array into multiple sub-arrays vertically (row-wise).
266
267 Examples
268 --------
269 >>> a = np.array([1, 2, 3])
270 >>> b = np.array([4, 5, 6])
271 >>> np.vstack((a,b))
272 array([[1, 2, 3],
273 [4, 5, 6]])
274
275 >>> a = np.array([[1], [2], [3]])
276 >>> b = np.array([[4], [5], [6]])
277 >>> np.vstack((a,b))
278 array([[1],
279 [2],
280 [3],
281 [4],
282 [5],
283 [6]])
284
285 """
286 arrs = atleast_2d(*tup)
287 if not isinstance(arrs, list):
288 arrs = [arrs]
289 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
290
291
292@array_function_dispatch(_vhstack_dispatcher)
293def hstack(tup, *, dtype=None, casting="same_kind"):
294 """
295 Stack arrays in sequence horizontally (column wise).
296
297 This is equivalent to concatenation along the second axis, except for 1-D
298 arrays where it concatenates along the first axis. Rebuilds arrays divided
299 by `hsplit`.
300
301 This function makes most sense for arrays with up to 3 dimensions. For
302 instance, for pixel-data with a height (first axis), width (second axis),
303 and r/g/b channels (third axis). The functions `concatenate`, `stack` and
304 `block` provide more general stacking and concatenation operations.
305
306 Parameters
307 ----------
308 tup : sequence of ndarrays
309 The arrays must have the same shape along all but the second axis,
310 except 1-D arrays which can be any length.
311
312 dtype : str or dtype
313 If provided, the destination array will have this dtype. Cannot be
314 provided together with `out`.
315
316 .. versionadded:: 1.24
317
318 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
319 Controls what kind of data casting may occur. Defaults to 'same_kind'.
320
321 .. versionadded:: 1.24
322
323 Returns
324 -------
325 stacked : ndarray
326 The array formed by stacking the given arrays.
327
328 See Also
329 --------
330 concatenate : Join a sequence of arrays along an existing axis.
331 stack : Join a sequence of arrays along a new axis.
332 block : Assemble an nd-array from nested lists of blocks.
333 vstack : Stack arrays in sequence vertically (row wise).
334 dstack : Stack arrays in sequence depth wise (along third axis).
335 column_stack : Stack 1-D arrays as columns into a 2-D array.
336 hsplit : Split an array into multiple sub-arrays horizontally (column-wise).
337
338 Examples
339 --------
340 >>> a = np.array((1,2,3))
341 >>> b = np.array((4,5,6))
342 >>> np.hstack((a,b))
343 array([1, 2, 3, 4, 5, 6])
344 >>> a = np.array([[1],[2],[3]])
345 >>> b = np.array([[4],[5],[6]])
346 >>> np.hstack((a,b))
347 array([[1, 4],
348 [2, 5],
349 [3, 6]])
350
351 """
352 arrs = atleast_1d(*tup)
353 if not isinstance(arrs, list):
354 arrs = [arrs]
355 # As a special case, dimension 0 of 1-dimensional arrays is "horizontal"
356 if arrs and arrs[0].ndim == 1:
357 return _nx.concatenate(arrs, 0, dtype=dtype, casting=casting)
358 else:
359 return _nx.concatenate(arrs, 1, dtype=dtype, casting=casting)
360
361
362def _stack_dispatcher(arrays, axis=None, out=None, *,
363 dtype=None, casting=None):
364 arrays = _arrays_for_stack_dispatcher(arrays)
365 if out is not None:
366 # optimize for the typical case where only arrays is provided
367 arrays = list(arrays)
368 arrays.append(out)
369 return arrays
370
371
372@array_function_dispatch(_stack_dispatcher)
373def stack(arrays, axis=0, out=None, *, dtype=None, casting="same_kind"):
374 """
375 Join a sequence of arrays along a new axis.
376
377 The ``axis`` parameter specifies the index of the new axis in the
378 dimensions of the result. For example, if ``axis=0`` it will be the first
379 dimension and if ``axis=-1`` it will be the last dimension.
380
381 .. versionadded:: 1.10.0
382
383 Parameters
384 ----------
385 arrays : sequence of array_like
386 Each array must have the same shape.
387
388 axis : int, optional
389 The axis in the result array along which the input arrays are stacked.
390
391 out : ndarray, optional
392 If provided, the destination to place the result. The shape must be
393 correct, matching that of what stack would have returned if no
394 out argument were specified.
395
396 dtype : str or dtype
397 If provided, the destination array will have this dtype. Cannot be
398 provided together with `out`.
399
400 .. versionadded:: 1.24
401
402 casting : {'no', 'equiv', 'safe', 'same_kind', 'unsafe'}, optional
403 Controls what kind of data casting may occur. Defaults to 'same_kind'.
404
405 .. versionadded:: 1.24
406
407
408 Returns
409 -------
410 stacked : ndarray
411 The stacked array has one more dimension than the input arrays.
412
413 See Also
414 --------
415 concatenate : Join a sequence of arrays along an existing axis.
416 block : Assemble an nd-array from nested lists of blocks.
417 split : Split array into a list of multiple sub-arrays of equal size.
418
419 Examples
420 --------
421 >>> arrays = [np.random.randn(3, 4) for _ in range(10)]
422 >>> np.stack(arrays, axis=0).shape
423 (10, 3, 4)
424
425 >>> np.stack(arrays, axis=1).shape
426 (3, 10, 4)
427
428 >>> np.stack(arrays, axis=2).shape
429 (3, 4, 10)
430
431 >>> a = np.array([1, 2, 3])
432 >>> b = np.array([4, 5, 6])
433 >>> np.stack((a, b))
434 array([[1, 2, 3],
435 [4, 5, 6]])
436
437 >>> np.stack((a, b), axis=-1)
438 array([[1, 4],
439 [2, 5],
440 [3, 6]])
441
442 """
443 arrays = [asanyarray(arr) for arr in arrays]
444 if not arrays:
445 raise ValueError('need at least one array to stack')
446
447 shapes = {arr.shape for arr in arrays}
448 if len(shapes) != 1:
449 raise ValueError('all input arrays must have the same shape')
450
451 result_ndim = arrays[0].ndim + 1
452 axis = normalize_axis_index(axis, result_ndim)
453
454 sl = (slice(None),) * axis + (_nx.newaxis,)
455 expanded_arrays = [arr[sl] for arr in arrays]
456 return _nx.concatenate(expanded_arrays, axis=axis, out=out,
457 dtype=dtype, casting=casting)
458
459
460# Internal functions to eliminate the overhead of repeated dispatch in one of
461# the two possible paths inside np.block.
462# Use getattr to protect against __array_function__ being disabled.
463_size = getattr(_from_nx.size, '__wrapped__', _from_nx.size)
464_ndim = getattr(_from_nx.ndim, '__wrapped__', _from_nx.ndim)
465_concatenate = getattr(_from_nx.concatenate,
466 '__wrapped__', _from_nx.concatenate)
467
468
469def _block_format_index(index):
470 """
471 Convert a list of indices ``[0, 1, 2]`` into ``"arrays[0][1][2]"``.
472 """
473 idx_str = ''.join('[{}]'.format(i) for i in index if i is not None)
474 return 'arrays' + idx_str
475
476
477def _block_check_depths_match(arrays, parent_index=[]):
478 """
479 Recursive function checking that the depths of nested lists in `arrays`
480 all match. Mismatch raises a ValueError as described in the block
481 docstring below.
482
483 The entire index (rather than just the depth) needs to be calculated
484 for each innermost list, in case an error needs to be raised, so that
485 the index of the offending list can be printed as part of the error.
486
487 Parameters
488 ----------
489 arrays : nested list of arrays
490 The arrays to check
491 parent_index : list of int
492 The full index of `arrays` within the nested lists passed to
493 `_block_check_depths_match` at the top of the recursion.
494
495 Returns
496 -------
497 first_index : list of int
498 The full index of an element from the bottom of the nesting in
499 `arrays`. If any element at the bottom is an empty list, this will
500 refer to it, and the last index along the empty axis will be None.
501 max_arr_ndim : int
502 The maximum of the ndims of the arrays nested in `arrays`.
503 final_size: int
504 The number of elements in the final array. This is used the motivate
505 the choice of algorithm used using benchmarking wisdom.
506
507 """
508 if type(arrays) is tuple:
509 # not strictly necessary, but saves us from:
510 # - more than one way to do things - no point treating tuples like
511 # lists
512 # - horribly confusing behaviour that results when tuples are
513 # treated like ndarray
514 raise TypeError(
515 '{} is a tuple. '
516 'Only lists can be used to arrange blocks, and np.block does '
517 'not allow implicit conversion from tuple to ndarray.'.format(
518 _block_format_index(parent_index)
519 )
520 )
521 elif type(arrays) is list and len(arrays) > 0:
522 idxs_ndims = (_block_check_depths_match(arr, parent_index + [i])
523 for i, arr in enumerate(arrays))
524
525 first_index, max_arr_ndim, final_size = next(idxs_ndims)
526 for index, ndim, size in idxs_ndims:
527 final_size += size
528 if ndim > max_arr_ndim:
529 max_arr_ndim = ndim
530 if len(index) != len(first_index):
531 raise ValueError(
532 "List depths are mismatched. First element was at depth "
533 "{}, but there is an element at depth {} ({})".format(
534 len(first_index),
535 len(index),
536 _block_format_index(index)
537 )
538 )
539 # propagate our flag that indicates an empty list at the bottom
540 if index[-1] is None:
541 first_index = index
542
543 return first_index, max_arr_ndim, final_size
544 elif type(arrays) is list and len(arrays) == 0:
545 # We've 'bottomed out' on an empty list
546 return parent_index + [None], 0, 0
547 else:
548 # We've 'bottomed out' - arrays is either a scalar or an array
549 size = _size(arrays)
550 return parent_index, _ndim(arrays), size
551
552
553def _atleast_nd(a, ndim):
554 # Ensures `a` has at least `ndim` dimensions by prepending
555 # ones to `a.shape` as necessary
556 return array(a, ndmin=ndim, copy=False, subok=True)
557
558
559def _accumulate(values):
560 return list(itertools.accumulate(values))
561
562
563def _concatenate_shapes(shapes, axis):
564 """Given array shapes, return the resulting shape and slices prefixes.
565
566 These help in nested concatenation.
567
568 Returns
569 -------
570 shape: tuple of int
571 This tuple satisfies::
572
573 shape, _ = _concatenate_shapes([arr.shape for shape in arrs], axis)
574 shape == concatenate(arrs, axis).shape
575
576 slice_prefixes: tuple of (slice(start, end), )
577 For a list of arrays being concatenated, this returns the slice
578 in the larger array at axis that needs to be sliced into.
579
580 For example, the following holds::
581
582 ret = concatenate([a, b, c], axis)
583 _, (sl_a, sl_b, sl_c) = concatenate_slices([a, b, c], axis)
584
585 ret[(slice(None),) * axis + sl_a] == a
586 ret[(slice(None),) * axis + sl_b] == b
587 ret[(slice(None),) * axis + sl_c] == c
588
589 These are called slice prefixes since they are used in the recursive
590 blocking algorithm to compute the left-most slices during the
591 recursion. Therefore, they must be prepended to rest of the slice
592 that was computed deeper in the recursion.
593
594 These are returned as tuples to ensure that they can quickly be added
595 to existing slice tuple without creating a new tuple every time.
596
597 """
598 # Cache a result that will be reused.
599 shape_at_axis = [shape[axis] for shape in shapes]
600
601 # Take a shape, any shape
602 first_shape = shapes[0]
603 first_shape_pre = first_shape[:axis]
604 first_shape_post = first_shape[axis+1:]
605
606 if any(shape[:axis] != first_shape_pre or
607 shape[axis+1:] != first_shape_post for shape in shapes):
608 raise ValueError(
609 'Mismatched array shapes in block along axis {}.'.format(axis))
610
611 shape = (first_shape_pre + (sum(shape_at_axis),) + first_shape[axis+1:])
612
613 offsets_at_axis = _accumulate(shape_at_axis)
614 slice_prefixes = [(slice(start, end),)
615 for start, end in zip([0] + offsets_at_axis,
616 offsets_at_axis)]
617 return shape, slice_prefixes
618
619
620def _block_info_recursion(arrays, max_depth, result_ndim, depth=0):
621 """
622 Returns the shape of the final array, along with a list
623 of slices and a list of arrays that can be used for assignment inside the
624 new array
625
626 Parameters
627 ----------
628 arrays : nested list of arrays
629 The arrays to check
630 max_depth : list of int
631 The number of nested lists
632 result_ndim : int
633 The number of dimensions in thefinal array.
634
635 Returns
636 -------
637 shape : tuple of int
638 The shape that the final array will take on.
639 slices: list of tuple of slices
640 The slices into the full array required for assignment. These are
641 required to be prepended with ``(Ellipsis, )`` to obtain to correct
642 final index.
643 arrays: list of ndarray
644 The data to assign to each slice of the full array
645
646 """
647 if depth < max_depth:
648 shapes, slices, arrays = zip(
649 *[_block_info_recursion(arr, max_depth, result_ndim, depth+1)
650 for arr in arrays])
651
652 axis = result_ndim - max_depth + depth
653 shape, slice_prefixes = _concatenate_shapes(shapes, axis)
654
655 # Prepend the slice prefix and flatten the slices
656 slices = [slice_prefix + the_slice
657 for slice_prefix, inner_slices in zip(slice_prefixes, slices)
658 for the_slice in inner_slices]
659
660 # Flatten the array list
661 arrays = functools.reduce(operator.add, arrays)
662
663 return shape, slices, arrays
664 else:
665 # We've 'bottomed out' - arrays is either a scalar or an array
666 # type(arrays) is not list
667 # Return the slice and the array inside a list to be consistent with
668 # the recursive case.
669 arr = _atleast_nd(arrays, result_ndim)
670 return arr.shape, [()], [arr]
671
672
673def _block(arrays, max_depth, result_ndim, depth=0):
674 """
675 Internal implementation of block based on repeated concatenation.
676 `arrays` is the argument passed to
677 block. `max_depth` is the depth of nested lists within `arrays` and
678 `result_ndim` is the greatest of the dimensions of the arrays in
679 `arrays` and the depth of the lists in `arrays` (see block docstring
680 for details).
681 """
682 if depth < max_depth:
683 arrs = [_block(arr, max_depth, result_ndim, depth+1)
684 for arr in arrays]
685 return _concatenate(arrs, axis=-(max_depth-depth))
686 else:
687 # We've 'bottomed out' - arrays is either a scalar or an array
688 # type(arrays) is not list
689 return _atleast_nd(arrays, result_ndim)
690
691
692def _block_dispatcher(arrays):
693 # Use type(...) is list to match the behavior of np.block(), which special
694 # cases list specifically rather than allowing for generic iterables or
695 # tuple. Also, we know that list.__array_function__ will never exist.
696 if type(arrays) is list:
697 for subarrays in arrays:
698 yield from _block_dispatcher(subarrays)
699 else:
700 yield arrays
701
702
703@array_function_dispatch(_block_dispatcher)
704def block(arrays):
705 """
706 Assemble an nd-array from nested lists of blocks.
707
708 Blocks in the innermost lists are concatenated (see `concatenate`) along
709 the last dimension (-1), then these are concatenated along the
710 second-last dimension (-2), and so on until the outermost list is reached.
711
712 Blocks can be of any dimension, but will not be broadcasted using the normal
713 rules. Instead, leading axes of size 1 are inserted, to make ``block.ndim``
714 the same for all blocks. This is primarily useful for working with scalars,
715 and means that code like ``np.block([v, 1])`` is valid, where
716 ``v.ndim == 1``.
717
718 When the nested list is two levels deep, this allows block matrices to be
719 constructed from their components.
720
721 .. versionadded:: 1.13.0
722
723 Parameters
724 ----------
725 arrays : nested list of array_like or scalars (but not tuples)
726 If passed a single ndarray or scalar (a nested list of depth 0), this
727 is returned unmodified (and not copied).
728
729 Elements shapes must match along the appropriate axes (without
730 broadcasting), but leading 1s will be prepended to the shape as
731 necessary to make the dimensions match.
732
733 Returns
734 -------
735 block_array : ndarray
736 The array assembled from the given blocks.
737
738 The dimensionality of the output is equal to the greatest of:
739 * the dimensionality of all the inputs
740 * the depth to which the input list is nested
741
742 Raises
743 ------
744 ValueError
745 * If list depths are mismatched - for instance, ``[[a, b], c]`` is
746 illegal, and should be spelt ``[[a, b], [c]]``
747 * If lists are empty - for instance, ``[[a, b], []]``
748
749 See Also
750 --------
751 concatenate : Join a sequence of arrays along an existing axis.
752 stack : Join a sequence of arrays along a new axis.
753 vstack : Stack arrays in sequence vertically (row wise).
754 hstack : Stack arrays in sequence horizontally (column wise).
755 dstack : Stack arrays in sequence depth wise (along third axis).
756 column_stack : Stack 1-D arrays as columns into a 2-D array.
757 vsplit : Split an array into multiple sub-arrays vertically (row-wise).
758
759 Notes
760 -----
761
762 When called with only scalars, ``np.block`` is equivalent to an ndarray
763 call. So ``np.block([[1, 2], [3, 4]])`` is equivalent to
764 ``np.array([[1, 2], [3, 4]])``.
765
766 This function does not enforce that the blocks lie on a fixed grid.
767 ``np.block([[a, b], [c, d]])`` is not restricted to arrays of the form::
768
769 AAAbb
770 AAAbb
771 cccDD
772
773 But is also allowed to produce, for some ``a, b, c, d``::
774
775 AAAbb
776 AAAbb
777 cDDDD
778
779 Since concatenation happens along the last axis first, `block` is _not_
780 capable of producing the following directly::
781
782 AAAbb
783 cccbb
784 cccDD
785
786 Matlab's "square bracket stacking", ``[A, B, ...; p, q, ...]``, is
787 equivalent to ``np.block([[A, B, ...], [p, q, ...]])``.
788
789 Examples
790 --------
791 The most common use of this function is to build a block matrix
792
793 >>> A = np.eye(2) * 2
794 >>> B = np.eye(3) * 3
795 >>> np.block([
796 ... [A, np.zeros((2, 3))],
797 ... [np.ones((3, 2)), B ]
798 ... ])
799 array([[2., 0., 0., 0., 0.],
800 [0., 2., 0., 0., 0.],
801 [1., 1., 3., 0., 0.],
802 [1., 1., 0., 3., 0.],
803 [1., 1., 0., 0., 3.]])
804
805 With a list of depth 1, `block` can be used as `hstack`
806
807 >>> np.block([1, 2, 3]) # hstack([1, 2, 3])
808 array([1, 2, 3])
809
810 >>> a = np.array([1, 2, 3])
811 >>> b = np.array([4, 5, 6])
812 >>> np.block([a, b, 10]) # hstack([a, b, 10])
813 array([ 1, 2, 3, 4, 5, 6, 10])
814
815 >>> A = np.ones((2, 2), int)
816 >>> B = 2 * A
817 >>> np.block([A, B]) # hstack([A, B])
818 array([[1, 1, 2, 2],
819 [1, 1, 2, 2]])
820
821 With a list of depth 2, `block` can be used in place of `vstack`:
822
823 >>> a = np.array([1, 2, 3])
824 >>> b = np.array([4, 5, 6])
825 >>> np.block([[a], [b]]) # vstack([a, b])
826 array([[1, 2, 3],
827 [4, 5, 6]])
828
829 >>> A = np.ones((2, 2), int)
830 >>> B = 2 * A
831 >>> np.block([[A], [B]]) # vstack([A, B])
832 array([[1, 1],
833 [1, 1],
834 [2, 2],
835 [2, 2]])
836
837 It can also be used in places of `atleast_1d` and `atleast_2d`
838
839 >>> a = np.array(0)
840 >>> b = np.array([1])
841 >>> np.block([a]) # atleast_1d(a)
842 array([0])
843 >>> np.block([b]) # atleast_1d(b)
844 array([1])
845
846 >>> np.block([[a]]) # atleast_2d(a)
847 array([[0]])
848 >>> np.block([[b]]) # atleast_2d(b)
849 array([[1]])
850
851
852 """
853 arrays, list_ndim, result_ndim, final_size = _block_setup(arrays)
854
855 # It was found through benchmarking that making an array of final size
856 # around 256x256 was faster by straight concatenation on a
857 # i7-7700HQ processor and dual channel ram 2400MHz.
858 # It didn't seem to matter heavily on the dtype used.
859 #
860 # A 2D array using repeated concatenation requires 2 copies of the array.
861 #
862 # The fastest algorithm will depend on the ratio of CPU power to memory
863 # speed.
864 # One can monitor the results of the benchmark
865 # https://pv.github.io/numpy-bench/#bench_shape_base.Block2D.time_block2d
866 # to tune this parameter until a C version of the `_block_info_recursion`
867 # algorithm is implemented which would likely be faster than the python
868 # version.
869 if list_ndim * final_size > (2 * 512 * 512):
870 return _block_slicing(arrays, list_ndim, result_ndim)
871 else:
872 return _block_concatenate(arrays, list_ndim, result_ndim)
873
874
875# These helper functions are mostly used for testing.
876# They allow us to write tests that directly call `_block_slicing`
877# or `_block_concatenate` without blocking large arrays to force the wisdom
878# to trigger the desired path.
879def _block_setup(arrays):
880 """
881 Returns
882 (`arrays`, list_ndim, result_ndim, final_size)
883 """
884 bottom_index, arr_ndim, final_size = _block_check_depths_match(arrays)
885 list_ndim = len(bottom_index)
886 if bottom_index and bottom_index[-1] is None:
887 raise ValueError(
888 'List at {} cannot be empty'.format(
889 _block_format_index(bottom_index)
890 )
891 )
892 result_ndim = max(arr_ndim, list_ndim)
893 return arrays, list_ndim, result_ndim, final_size
894
895
896def _block_slicing(arrays, list_ndim, result_ndim):
897 shape, slices, arrays = _block_info_recursion(
898 arrays, list_ndim, result_ndim)
899 dtype = _nx.result_type(*[arr.dtype for arr in arrays])
900
901 # Test preferring F only in the case that all input arrays are F
902 F_order = all(arr.flags['F_CONTIGUOUS'] for arr in arrays)
903 C_order = all(arr.flags['C_CONTIGUOUS'] for arr in arrays)
904 order = 'F' if F_order and not C_order else 'C'
905 result = _nx.empty(shape=shape, dtype=dtype, order=order)
906 # Note: In a c implementation, the function
907 # PyArray_CreateMultiSortedStridePerm could be used for more advanced
908 # guessing of the desired order.
909
910 for the_slice, arr in zip(slices, arrays):
911 result[(Ellipsis,) + the_slice] = arr
912 return result
913
914
915def _block_concatenate(arrays, list_ndim, result_ndim):
916 result = _block(arrays, list_ndim, result_ndim)
917 if list_ndim == 0:
918 # Catch an edge case where _block returns a view because
919 # `arrays` is a single numpy array and not a list of numpy arrays.
920 # This might copy scalars or lists twice, but this isn't a likely
921 # usecase for those interested in performance
922 result = result.copy()
923 return result