1"""
2A place for internal code
3
4Some things are more easily handled Python.
5
6"""
7import ast
8import re
9import sys
10import warnings
11
12from .multiarray import dtype, array, ndarray, promote_types
13try:
14 import ctypes
15except ImportError:
16 ctypes = None
17
18IS_PYPY = sys.implementation.name == 'pypy'
19
20if sys.byteorder == 'little':
21 _nbo = '<'
22else:
23 _nbo = '>'
24
25def _makenames_list(adict, align):
26 allfields = []
27
28 for fname, obj in adict.items():
29 n = len(obj)
30 if not isinstance(obj, tuple) or n not in (2, 3):
31 raise ValueError("entry not a 2- or 3- tuple")
32 if n > 2 and obj[2] == fname:
33 continue
34 num = int(obj[1])
35 if num < 0:
36 raise ValueError("invalid offset.")
37 format = dtype(obj[0], align=align)
38 if n > 2:
39 title = obj[2]
40 else:
41 title = None
42 allfields.append((fname, format, num, title))
43 # sort by offsets
44 allfields.sort(key=lambda x: x[2])
45 names = [x[0] for x in allfields]
46 formats = [x[1] for x in allfields]
47 offsets = [x[2] for x in allfields]
48 titles = [x[3] for x in allfields]
49
50 return names, formats, offsets, titles
51
52# Called in PyArray_DescrConverter function when
53# a dictionary without "names" and "formats"
54# fields is used as a data-type descriptor.
55def _usefields(adict, align):
56 try:
57 names = adict[-1]
58 except KeyError:
59 names = None
60 if names is None:
61 names, formats, offsets, titles = _makenames_list(adict, align)
62 else:
63 formats = []
64 offsets = []
65 titles = []
66 for name in names:
67 res = adict[name]
68 formats.append(res[0])
69 offsets.append(res[1])
70 if len(res) > 2:
71 titles.append(res[2])
72 else:
73 titles.append(None)
74
75 return dtype({"names": names,
76 "formats": formats,
77 "offsets": offsets,
78 "titles": titles}, align)
79
80
81# construct an array_protocol descriptor list
82# from the fields attribute of a descriptor
83# This calls itself recursively but should eventually hit
84# a descriptor that has no fields and then return
85# a simple typestring
86
87def _array_descr(descriptor):
88 fields = descriptor.fields
89 if fields is None:
90 subdtype = descriptor.subdtype
91 if subdtype is None:
92 if descriptor.metadata is None:
93 return descriptor.str
94 else:
95 new = descriptor.metadata.copy()
96 if new:
97 return (descriptor.str, new)
98 else:
99 return descriptor.str
100 else:
101 return (_array_descr(subdtype[0]), subdtype[1])
102
103 names = descriptor.names
104 ordered_fields = [fields[x] + (x,) for x in names]
105 result = []
106 offset = 0
107 for field in ordered_fields:
108 if field[1] > offset:
109 num = field[1] - offset
110 result.append(('', f'|V{num}'))
111 offset += num
112 elif field[1] < offset:
113 raise ValueError(
114 "dtype.descr is not defined for types with overlapping or "
115 "out-of-order fields")
116 if len(field) > 3:
117 name = (field[2], field[3])
118 else:
119 name = field[2]
120 if field[0].subdtype:
121 tup = (name, _array_descr(field[0].subdtype[0]),
122 field[0].subdtype[1])
123 else:
124 tup = (name, _array_descr(field[0]))
125 offset += field[0].itemsize
126 result.append(tup)
127
128 if descriptor.itemsize > offset:
129 num = descriptor.itemsize - offset
130 result.append(('', f'|V{num}'))
131
132 return result
133
134# Build a new array from the information in a pickle.
135# Note that the name numpy.core._internal._reconstruct is embedded in
136# pickles of ndarrays made with NumPy before release 1.0
137# so don't remove the name here, or you'll
138# break backward compatibility.
139def _reconstruct(subtype, shape, dtype):
140 return ndarray.__new__(subtype, shape, dtype)
141
142
143# format_re was originally from numarray by J. Todd Miller
144
145format_re = re.compile(r'(?P<order1>[<>|=]?)'
146 r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)'
147 r'(?P<order2>[<>|=]?)'
148 r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)')
149sep_re = re.compile(r'\s*,\s*')
150space_re = re.compile(r'\s+$')
151
152# astr is a string (perhaps comma separated)
153
154_convorder = {'=': _nbo}
155
156def _commastring(astr):
157 startindex = 0
158 result = []
159 while startindex < len(astr):
160 mo = format_re.match(astr, pos=startindex)
161 try:
162 (order1, repeats, order2, dtype) = mo.groups()
163 except (TypeError, AttributeError):
164 raise ValueError(
165 f'format number {len(result)+1} of "{astr}" is not recognized'
166 ) from None
167 startindex = mo.end()
168 # Separator or ending padding
169 if startindex < len(astr):
170 if space_re.match(astr, pos=startindex):
171 startindex = len(astr)
172 else:
173 mo = sep_re.match(astr, pos=startindex)
174 if not mo:
175 raise ValueError(
176 'format number %d of "%s" is not recognized' %
177 (len(result)+1, astr))
178 startindex = mo.end()
179
180 if order2 == '':
181 order = order1
182 elif order1 == '':
183 order = order2
184 else:
185 order1 = _convorder.get(order1, order1)
186 order2 = _convorder.get(order2, order2)
187 if (order1 != order2):
188 raise ValueError(
189 'inconsistent byte-order specification %s and %s' %
190 (order1, order2))
191 order = order1
192
193 if order in ('|', '=', _nbo):
194 order = ''
195 dtype = order + dtype
196 if (repeats == ''):
197 newitem = dtype
198 else:
199 newitem = (dtype, ast.literal_eval(repeats))
200 result.append(newitem)
201
202 return result
203
204class dummy_ctype:
205 def __init__(self, cls):
206 self._cls = cls
207 def __mul__(self, other):
208 return self
209 def __call__(self, *other):
210 return self._cls(other)
211 def __eq__(self, other):
212 return self._cls == other._cls
213 def __ne__(self, other):
214 return self._cls != other._cls
215
216def _getintp_ctype():
217 val = _getintp_ctype.cache
218 if val is not None:
219 return val
220 if ctypes is None:
221 import numpy as np
222 val = dummy_ctype(np.intp)
223 else:
224 char = dtype('p').char
225 if char == 'i':
226 val = ctypes.c_int
227 elif char == 'l':
228 val = ctypes.c_long
229 elif char == 'q':
230 val = ctypes.c_longlong
231 else:
232 val = ctypes.c_long
233 _getintp_ctype.cache = val
234 return val
235_getintp_ctype.cache = None
236
237# Used for .ctypes attribute of ndarray
238
239class _missing_ctypes:
240 def cast(self, num, obj):
241 return num.value
242
243 class c_void_p:
244 def __init__(self, ptr):
245 self.value = ptr
246
247
248class _ctypes:
249 def __init__(self, array, ptr=None):
250 self._arr = array
251
252 if ctypes:
253 self._ctypes = ctypes
254 self._data = self._ctypes.c_void_p(ptr)
255 else:
256 # fake a pointer-like object that holds onto the reference
257 self._ctypes = _missing_ctypes()
258 self._data = self._ctypes.c_void_p(ptr)
259 self._data._objects = array
260
261 if self._arr.ndim == 0:
262 self._zerod = True
263 else:
264 self._zerod = False
265
266 def data_as(self, obj):
267 """
268 Return the data pointer cast to a particular c-types object.
269 For example, calling ``self._as_parameter_`` is equivalent to
270 ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a
271 pointer to a ctypes array of floating-point data:
272 ``self.data_as(ctypes.POINTER(ctypes.c_double))``.
273
274 The returned pointer will keep a reference to the array.
275 """
276 # _ctypes.cast function causes a circular reference of self._data in
277 # self._data._objects. Attributes of self._data cannot be released
278 # until gc.collect is called. Make a copy of the pointer first then let
279 # it hold the array reference. This is a workaround to circumvent the
280 # CPython bug https://bugs.python.org/issue12836
281 ptr = self._ctypes.cast(self._data, obj)
282 ptr._arr = self._arr
283 return ptr
284
285 def shape_as(self, obj):
286 """
287 Return the shape tuple as an array of some other c-types
288 type. For example: ``self.shape_as(ctypes.c_short)``.
289 """
290 if self._zerod:
291 return None
292 return (obj*self._arr.ndim)(*self._arr.shape)
293
294 def strides_as(self, obj):
295 """
296 Return the strides tuple as an array of some other
297 c-types type. For example: ``self.strides_as(ctypes.c_longlong)``.
298 """
299 if self._zerod:
300 return None
301 return (obj*self._arr.ndim)(*self._arr.strides)
302
303 @property
304 def data(self):
305 """
306 A pointer to the memory area of the array as a Python integer.
307 This memory area may contain data that is not aligned, or not in correct
308 byte-order. The memory area may not even be writeable. The array
309 flags and data-type of this array should be respected when passing this
310 attribute to arbitrary C-code to avoid trouble that can include Python
311 crashing. User Beware! The value of this attribute is exactly the same
312 as ``self._array_interface_['data'][0]``.
313
314 Note that unlike ``data_as``, a reference will not be kept to the array:
315 code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a
316 pointer to a deallocated array, and should be spelt
317 ``(a + b).ctypes.data_as(ctypes.c_void_p)``
318 """
319 return self._data.value
320
321 @property
322 def shape(self):
323 """
324 (c_intp*self.ndim): A ctypes array of length self.ndim where
325 the basetype is the C-integer corresponding to ``dtype('p')`` on this
326 platform (see `~numpy.ctypeslib.c_intp`). This base-type could be
327 `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on
328 the platform. The ctypes array contains the shape of
329 the underlying array.
330 """
331 return self.shape_as(_getintp_ctype())
332
333 @property
334 def strides(self):
335 """
336 (c_intp*self.ndim): A ctypes array of length self.ndim where
337 the basetype is the same as for the shape attribute. This ctypes array
338 contains the strides information from the underlying array. This strides
339 information is important for showing how many bytes must be jumped to
340 get to the next element in the array.
341 """
342 return self.strides_as(_getintp_ctype())
343
344 @property
345 def _as_parameter_(self):
346 """
347 Overrides the ctypes semi-magic method
348
349 Enables `c_func(some_array.ctypes)`
350 """
351 return self.data_as(ctypes.c_void_p)
352
353 # Numpy 1.21.0, 2021-05-18
354
355 def get_data(self):
356 """Deprecated getter for the `_ctypes.data` property.
357
358 .. deprecated:: 1.21
359 """
360 warnings.warn('"get_data" is deprecated. Use "data" instead',
361 DeprecationWarning, stacklevel=2)
362 return self.data
363
364 def get_shape(self):
365 """Deprecated getter for the `_ctypes.shape` property.
366
367 .. deprecated:: 1.21
368 """
369 warnings.warn('"get_shape" is deprecated. Use "shape" instead',
370 DeprecationWarning, stacklevel=2)
371 return self.shape
372
373 def get_strides(self):
374 """Deprecated getter for the `_ctypes.strides` property.
375
376 .. deprecated:: 1.21
377 """
378 warnings.warn('"get_strides" is deprecated. Use "strides" instead',
379 DeprecationWarning, stacklevel=2)
380 return self.strides
381
382 def get_as_parameter(self):
383 """Deprecated getter for the `_ctypes._as_parameter_` property.
384
385 .. deprecated:: 1.21
386 """
387 warnings.warn(
388 '"get_as_parameter" is deprecated. Use "_as_parameter_" instead',
389 DeprecationWarning, stacklevel=2,
390 )
391 return self._as_parameter_
392
393
394def _newnames(datatype, order):
395 """
396 Given a datatype and an order object, return a new names tuple, with the
397 order indicated
398 """
399 oldnames = datatype.names
400 nameslist = list(oldnames)
401 if isinstance(order, str):
402 order = [order]
403 seen = set()
404 if isinstance(order, (list, tuple)):
405 for name in order:
406 try:
407 nameslist.remove(name)
408 except ValueError:
409 if name in seen:
410 raise ValueError(f"duplicate field name: {name}") from None
411 else:
412 raise ValueError(f"unknown field name: {name}") from None
413 seen.add(name)
414 return tuple(list(order) + nameslist)
415 raise ValueError(f"unsupported order value: {order}")
416
417def _copy_fields(ary):
418 """Return copy of structured array with padding between fields removed.
419
420 Parameters
421 ----------
422 ary : ndarray
423 Structured array from which to remove padding bytes
424
425 Returns
426 -------
427 ary_copy : ndarray
428 Copy of ary with padding bytes removed
429 """
430 dt = ary.dtype
431 copy_dtype = {'names': dt.names,
432 'formats': [dt.fields[name][0] for name in dt.names]}
433 return array(ary, dtype=copy_dtype, copy=True)
434
435def _promote_fields(dt1, dt2):
436 """ Perform type promotion for two structured dtypes.
437
438 Parameters
439 ----------
440 dt1 : structured dtype
441 First dtype.
442 dt2 : structured dtype
443 Second dtype.
444
445 Returns
446 -------
447 out : dtype
448 The promoted dtype
449
450 Notes
451 -----
452 If one of the inputs is aligned, the result will be. The titles of
453 both descriptors must match (point to the same field).
454 """
455 # Both must be structured and have the same names in the same order
456 if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names:
457 raise TypeError("invalid type promotion")
458
459 # if both are identical, we can (maybe!) just return the same dtype.
460 identical = dt1 is dt2
461 new_fields = []
462 for name in dt1.names:
463 field1 = dt1.fields[name]
464 field2 = dt2.fields[name]
465 new_descr = promote_types(field1[0], field2[0])
466 identical = identical and new_descr is field1[0]
467
468 # Check that the titles match (if given):
469 if field1[2:] != field2[2:]:
470 raise TypeError("invalid type promotion")
471 if len(field1) == 2:
472 new_fields.append((name, new_descr))
473 else:
474 new_fields.append(((field1[2], name), new_descr))
475
476 res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct)
477
478 # Might as well preserve identity (and metadata) if the dtype is identical
479 # and the itemsize, offsets are also unmodified. This could probably be
480 # sped up, but also probably just be removed entirely.
481 if identical and res.itemsize == dt1.itemsize:
482 for name in dt1.names:
483 if dt1.fields[name][1] != res.fields[name][1]:
484 return res # the dtype changed.
485 return dt1
486
487 return res
488
489
490def _getfield_is_safe(oldtype, newtype, offset):
491 """ Checks safety of getfield for object arrays.
492
493 As in _view_is_safe, we need to check that memory containing objects is not
494 reinterpreted as a non-object datatype and vice versa.
495
496 Parameters
497 ----------
498 oldtype : data-type
499 Data type of the original ndarray.
500 newtype : data-type
501 Data type of the field being accessed by ndarray.getfield
502 offset : int
503 Offset of the field being accessed by ndarray.getfield
504
505 Raises
506 ------
507 TypeError
508 If the field access is invalid
509
510 """
511 if newtype.hasobject or oldtype.hasobject:
512 if offset == 0 and newtype == oldtype:
513 return
514 if oldtype.names is not None:
515 for name in oldtype.names:
516 if (oldtype.fields[name][1] == offset and
517 oldtype.fields[name][0] == newtype):
518 return
519 raise TypeError("Cannot get/set field of an object array")
520 return
521
522def _view_is_safe(oldtype, newtype):
523 """ Checks safety of a view involving object arrays, for example when
524 doing::
525
526 np.zeros(10, dtype=oldtype).view(newtype)
527
528 Parameters
529 ----------
530 oldtype : data-type
531 Data type of original ndarray
532 newtype : data-type
533 Data type of the view
534
535 Raises
536 ------
537 TypeError
538 If the new type is incompatible with the old type.
539
540 """
541
542 # if the types are equivalent, there is no problem.
543 # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4'))
544 if oldtype == newtype:
545 return
546
547 if newtype.hasobject or oldtype.hasobject:
548 raise TypeError("Cannot change data-type for object array.")
549 return
550
551# Given a string containing a PEP 3118 format specifier,
552# construct a NumPy dtype
553
554_pep3118_native_map = {
555 '?': '?',
556 'c': 'S1',
557 'b': 'b',
558 'B': 'B',
559 'h': 'h',
560 'H': 'H',
561 'i': 'i',
562 'I': 'I',
563 'l': 'l',
564 'L': 'L',
565 'q': 'q',
566 'Q': 'Q',
567 'e': 'e',
568 'f': 'f',
569 'd': 'd',
570 'g': 'g',
571 'Zf': 'F',
572 'Zd': 'D',
573 'Zg': 'G',
574 's': 'S',
575 'w': 'U',
576 'O': 'O',
577 'x': 'V', # padding
578}
579_pep3118_native_typechars = ''.join(_pep3118_native_map.keys())
580
581_pep3118_standard_map = {
582 '?': '?',
583 'c': 'S1',
584 'b': 'b',
585 'B': 'B',
586 'h': 'i2',
587 'H': 'u2',
588 'i': 'i4',
589 'I': 'u4',
590 'l': 'i4',
591 'L': 'u4',
592 'q': 'i8',
593 'Q': 'u8',
594 'e': 'f2',
595 'f': 'f',
596 'd': 'd',
597 'Zf': 'F',
598 'Zd': 'D',
599 's': 'S',
600 'w': 'U',
601 'O': 'O',
602 'x': 'V', # padding
603}
604_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys())
605
606_pep3118_unsupported_map = {
607 'u': 'UCS-2 strings',
608 '&': 'pointers',
609 't': 'bitfields',
610 'X': 'function pointers',
611}
612
613class _Stream:
614 def __init__(self, s):
615 self.s = s
616 self.byteorder = '@'
617
618 def advance(self, n):
619 res = self.s[:n]
620 self.s = self.s[n:]
621 return res
622
623 def consume(self, c):
624 if self.s[:len(c)] == c:
625 self.advance(len(c))
626 return True
627 return False
628
629 def consume_until(self, c):
630 if callable(c):
631 i = 0
632 while i < len(self.s) and not c(self.s[i]):
633 i = i + 1
634 return self.advance(i)
635 else:
636 i = self.s.index(c)
637 res = self.advance(i)
638 self.advance(len(c))
639 return res
640
641 @property
642 def next(self):
643 return self.s[0]
644
645 def __bool__(self):
646 return bool(self.s)
647
648
649def _dtype_from_pep3118(spec):
650 stream = _Stream(spec)
651 dtype, align = __dtype_from_pep3118(stream, is_subdtype=False)
652 return dtype
653
654def __dtype_from_pep3118(stream, is_subdtype):
655 field_spec = dict(
656 names=[],
657 formats=[],
658 offsets=[],
659 itemsize=0
660 )
661 offset = 0
662 common_alignment = 1
663 is_padding = False
664
665 # Parse spec
666 while stream:
667 value = None
668
669 # End of structure, bail out to upper level
670 if stream.consume('}'):
671 break
672
673 # Sub-arrays (1)
674 shape = None
675 if stream.consume('('):
676 shape = stream.consume_until(')')
677 shape = tuple(map(int, shape.split(',')))
678
679 # Byte order
680 if stream.next in ('@', '=', '<', '>', '^', '!'):
681 byteorder = stream.advance(1)
682 if byteorder == '!':
683 byteorder = '>'
684 stream.byteorder = byteorder
685
686 # Byte order characters also control native vs. standard type sizes
687 if stream.byteorder in ('@', '^'):
688 type_map = _pep3118_native_map
689 type_map_chars = _pep3118_native_typechars
690 else:
691 type_map = _pep3118_standard_map
692 type_map_chars = _pep3118_standard_typechars
693
694 # Item sizes
695 itemsize_str = stream.consume_until(lambda c: not c.isdigit())
696 if itemsize_str:
697 itemsize = int(itemsize_str)
698 else:
699 itemsize = 1
700
701 # Data types
702 is_padding = False
703
704 if stream.consume('T{'):
705 value, align = __dtype_from_pep3118(
706 stream, is_subdtype=True)
707 elif stream.next in type_map_chars:
708 if stream.next == 'Z':
709 typechar = stream.advance(2)
710 else:
711 typechar = stream.advance(1)
712
713 is_padding = (typechar == 'x')
714 dtypechar = type_map[typechar]
715 if dtypechar in 'USV':
716 dtypechar += '%d' % itemsize
717 itemsize = 1
718 numpy_byteorder = {'@': '=', '^': '='}.get(
719 stream.byteorder, stream.byteorder)
720 value = dtype(numpy_byteorder + dtypechar)
721 align = value.alignment
722 elif stream.next in _pep3118_unsupported_map:
723 desc = _pep3118_unsupported_map[stream.next]
724 raise NotImplementedError(
725 "Unrepresentable PEP 3118 data type {!r} ({})"
726 .format(stream.next, desc))
727 else:
728 raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s)
729
730 #
731 # Native alignment may require padding
732 #
733 # Here we assume that the presence of a '@' character implicitly implies
734 # that the start of the array is *already* aligned.
735 #
736 extra_offset = 0
737 if stream.byteorder == '@':
738 start_padding = (-offset) % align
739 intra_padding = (-value.itemsize) % align
740
741 offset += start_padding
742
743 if intra_padding != 0:
744 if itemsize > 1 or (shape is not None and _prod(shape) > 1):
745 # Inject internal padding to the end of the sub-item
746 value = _add_trailing_padding(value, intra_padding)
747 else:
748 # We can postpone the injection of internal padding,
749 # as the item appears at most once
750 extra_offset += intra_padding
751
752 # Update common alignment
753 common_alignment = _lcm(align, common_alignment)
754
755 # Convert itemsize to sub-array
756 if itemsize != 1:
757 value = dtype((value, (itemsize,)))
758
759 # Sub-arrays (2)
760 if shape is not None:
761 value = dtype((value, shape))
762
763 # Field name
764 if stream.consume(':'):
765 name = stream.consume_until(':')
766 else:
767 name = None
768
769 if not (is_padding and name is None):
770 if name is not None and name in field_spec['names']:
771 raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format")
772 field_spec['names'].append(name)
773 field_spec['formats'].append(value)
774 field_spec['offsets'].append(offset)
775
776 offset += value.itemsize
777 offset += extra_offset
778
779 field_spec['itemsize'] = offset
780
781 # extra final padding for aligned types
782 if stream.byteorder == '@':
783 field_spec['itemsize'] += (-offset) % common_alignment
784
785 # Check if this was a simple 1-item type, and unwrap it
786 if (field_spec['names'] == [None]
787 and field_spec['offsets'][0] == 0
788 and field_spec['itemsize'] == field_spec['formats'][0].itemsize
789 and not is_subdtype):
790 ret = field_spec['formats'][0]
791 else:
792 _fix_names(field_spec)
793 ret = dtype(field_spec)
794
795 # Finished
796 return ret, common_alignment
797
798def _fix_names(field_spec):
799 """ Replace names which are None with the next unused f%d name """
800 names = field_spec['names']
801 for i, name in enumerate(names):
802 if name is not None:
803 continue
804
805 j = 0
806 while True:
807 name = f'f{j}'
808 if name not in names:
809 break
810 j = j + 1
811 names[i] = name
812
813def _add_trailing_padding(value, padding):
814 """Inject the specified number of padding bytes at the end of a dtype"""
815 if value.fields is None:
816 field_spec = dict(
817 names=['f0'],
818 formats=[value],
819 offsets=[0],
820 itemsize=value.itemsize
821 )
822 else:
823 fields = value.fields
824 names = value.names
825 field_spec = dict(
826 names=names,
827 formats=[fields[name][0] for name in names],
828 offsets=[fields[name][1] for name in names],
829 itemsize=value.itemsize
830 )
831
832 field_spec['itemsize'] += padding
833 return dtype(field_spec)
834
835def _prod(a):
836 p = 1
837 for x in a:
838 p *= x
839 return p
840
841def _gcd(a, b):
842 """Calculate the greatest common divisor of a and b"""
843 while b:
844 a, b = b, a % b
845 return a
846
847def _lcm(a, b):
848 return a // _gcd(a, b) * b
849
850def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs):
851 """ Format the error message for when __array_ufunc__ gives up. """
852 args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] +
853 ['{}={!r}'.format(k, v)
854 for k, v in kwargs.items()])
855 args = inputs + kwargs.get('out', ())
856 types_string = ', '.join(repr(type(arg).__name__) for arg in args)
857 return ('operand type(s) all returned NotImplemented from '
858 '__array_ufunc__({!r}, {!r}, {}): {}'
859 .format(ufunc, method, args_string, types_string))
860
861
862def array_function_errmsg_formatter(public_api, types):
863 """ Format the error message for when __array_ufunc__ gives up. """
864 func_name = '{}.{}'.format(public_api.__module__, public_api.__name__)
865 return ("no implementation found for '{}' on types that implement "
866 '__array_function__: {}'.format(func_name, list(types)))
867
868
869def _ufunc_doc_signature_formatter(ufunc):
870 """
871 Builds a signature string which resembles PEP 457
872
873 This is used to construct the first line of the docstring
874 """
875
876 # input arguments are simple
877 if ufunc.nin == 1:
878 in_args = 'x'
879 else:
880 in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin))
881
882 # output arguments are both keyword or positional
883 if ufunc.nout == 0:
884 out_args = ', /, out=()'
885 elif ufunc.nout == 1:
886 out_args = ', /, out=None'
887 else:
888 out_args = '[, {positional}], / [, out={default}]'.format(
889 positional=', '.join(
890 'out{}'.format(i+1) for i in range(ufunc.nout)),
891 default=repr((None,)*ufunc.nout)
892 )
893
894 # keyword only args depend on whether this is a gufunc
895 kwargs = (
896 ", casting='same_kind'"
897 ", order='K'"
898 ", dtype=None"
899 ", subok=True"
900 )
901
902 # NOTE: gufuncs may or may not support the `axis` parameter
903 if ufunc.signature is None:
904 kwargs = f", where=True{kwargs}[, signature, extobj]"
905 else:
906 kwargs += "[, signature, extobj, axes, axis]"
907
908 # join all the parts together
909 return '{name}({in_args}{out_args}, *{kwargs})'.format(
910 name=ufunc.__name__,
911 in_args=in_args,
912 out_args=out_args,
913 kwargs=kwargs
914 )
915
916
917def npy_ctypes_check(cls):
918 # determine if a class comes from ctypes, in order to work around
919 # a bug in the buffer protocol for those objects, bpo-10746
920 try:
921 # ctypes class are new-style, so have an __mro__. This probably fails
922 # for ctypes classes with multiple inheritance.
923 if IS_PYPY:
924 # (..., _ctypes.basics._CData, Bufferable, object)
925 ctype_base = cls.__mro__[-3]
926 else:
927 # # (..., _ctypes._CData, object)
928 ctype_base = cls.__mro__[-2]
929 # right now, they're part of the _ctypes module
930 return '_ctypes' in ctype_base.__module__
931 except Exception:
932 return False