Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/numpy/core/_internal.py: 23%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

448 statements  

1""" 

2A place for internal code 

3 

4Some things are more easily handled Python. 

5 

6""" 

7import ast 

8import re 

9import sys 

10import warnings 

11 

12from ..exceptions import DTypePromotionError 

13from .multiarray import dtype, array, ndarray, promote_types 

14try: 

15 import ctypes 

16except ImportError: 

17 ctypes = None 

18 

19IS_PYPY = sys.implementation.name == 'pypy' 

20 

21if sys.byteorder == 'little': 

22 _nbo = '<' 

23else: 

24 _nbo = '>' 

25 

26def _makenames_list(adict, align): 

27 allfields = [] 

28 

29 for fname, obj in adict.items(): 

30 n = len(obj) 

31 if not isinstance(obj, tuple) or n not in (2, 3): 

32 raise ValueError("entry not a 2- or 3- tuple") 

33 if n > 2 and obj[2] == fname: 

34 continue 

35 num = int(obj[1]) 

36 if num < 0: 

37 raise ValueError("invalid offset.") 

38 format = dtype(obj[0], align=align) 

39 if n > 2: 

40 title = obj[2] 

41 else: 

42 title = None 

43 allfields.append((fname, format, num, title)) 

44 # sort by offsets 

45 allfields.sort(key=lambda x: x[2]) 

46 names = [x[0] for x in allfields] 

47 formats = [x[1] for x in allfields] 

48 offsets = [x[2] for x in allfields] 

49 titles = [x[3] for x in allfields] 

50 

51 return names, formats, offsets, titles 

52 

53# Called in PyArray_DescrConverter function when 

54# a dictionary without "names" and "formats" 

55# fields is used as a data-type descriptor. 

56def _usefields(adict, align): 

57 try: 

58 names = adict[-1] 

59 except KeyError: 

60 names = None 

61 if names is None: 

62 names, formats, offsets, titles = _makenames_list(adict, align) 

63 else: 

64 formats = [] 

65 offsets = [] 

66 titles = [] 

67 for name in names: 

68 res = adict[name] 

69 formats.append(res[0]) 

70 offsets.append(res[1]) 

71 if len(res) > 2: 

72 titles.append(res[2]) 

73 else: 

74 titles.append(None) 

75 

76 return dtype({"names": names, 

77 "formats": formats, 

78 "offsets": offsets, 

79 "titles": titles}, align) 

80 

81 

82# construct an array_protocol descriptor list 

83# from the fields attribute of a descriptor 

84# This calls itself recursively but should eventually hit 

85# a descriptor that has no fields and then return 

86# a simple typestring 

87 

88def _array_descr(descriptor): 

89 fields = descriptor.fields 

90 if fields is None: 

91 subdtype = descriptor.subdtype 

92 if subdtype is None: 

93 if descriptor.metadata is None: 

94 return descriptor.str 

95 else: 

96 new = descriptor.metadata.copy() 

97 if new: 

98 return (descriptor.str, new) 

99 else: 

100 return descriptor.str 

101 else: 

102 return (_array_descr(subdtype[0]), subdtype[1]) 

103 

104 names = descriptor.names 

105 ordered_fields = [fields[x] + (x,) for x in names] 

106 result = [] 

107 offset = 0 

108 for field in ordered_fields: 

109 if field[1] > offset: 

110 num = field[1] - offset 

111 result.append(('', f'|V{num}')) 

112 offset += num 

113 elif field[1] < offset: 

114 raise ValueError( 

115 "dtype.descr is not defined for types with overlapping or " 

116 "out-of-order fields") 

117 if len(field) > 3: 

118 name = (field[2], field[3]) 

119 else: 

120 name = field[2] 

121 if field[0].subdtype: 

122 tup = (name, _array_descr(field[0].subdtype[0]), 

123 field[0].subdtype[1]) 

124 else: 

125 tup = (name, _array_descr(field[0])) 

126 offset += field[0].itemsize 

127 result.append(tup) 

128 

129 if descriptor.itemsize > offset: 

130 num = descriptor.itemsize - offset 

131 result.append(('', f'|V{num}')) 

132 

133 return result 

134 

135# Build a new array from the information in a pickle. 

136# Note that the name numpy.core._internal._reconstruct is embedded in 

137# pickles of ndarrays made with NumPy before release 1.0 

138# so don't remove the name here, or you'll 

139# break backward compatibility. 

140def _reconstruct(subtype, shape, dtype): 

141 return ndarray.__new__(subtype, shape, dtype) 

142 

143 

144# format_re was originally from numarray by J. Todd Miller 

145 

146format_re = re.compile(r'(?P<order1>[<>|=]?)' 

147 r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)' 

148 r'(?P<order2>[<>|=]?)' 

149 r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') 

150sep_re = re.compile(r'\s*,\s*') 

151space_re = re.compile(r'\s+$') 

152 

153# astr is a string (perhaps comma separated) 

154 

155_convorder = {'=': _nbo} 

156 

157def _commastring(astr): 

158 startindex = 0 

159 result = [] 

160 while startindex < len(astr): 

161 mo = format_re.match(astr, pos=startindex) 

162 try: 

163 (order1, repeats, order2, dtype) = mo.groups() 

164 except (TypeError, AttributeError): 

165 raise ValueError( 

166 f'format number {len(result)+1} of "{astr}" is not recognized' 

167 ) from None 

168 startindex = mo.end() 

169 # Separator or ending padding 

170 if startindex < len(astr): 

171 if space_re.match(astr, pos=startindex): 

172 startindex = len(astr) 

173 else: 

174 mo = sep_re.match(astr, pos=startindex) 

175 if not mo: 

176 raise ValueError( 

177 'format number %d of "%s" is not recognized' % 

178 (len(result)+1, astr)) 

179 startindex = mo.end() 

180 

181 if order2 == '': 

182 order = order1 

183 elif order1 == '': 

184 order = order2 

185 else: 

186 order1 = _convorder.get(order1, order1) 

187 order2 = _convorder.get(order2, order2) 

188 if (order1 != order2): 

189 raise ValueError( 

190 'inconsistent byte-order specification %s and %s' % 

191 (order1, order2)) 

192 order = order1 

193 

194 if order in ('|', '=', _nbo): 

195 order = '' 

196 dtype = order + dtype 

197 if (repeats == ''): 

198 newitem = dtype 

199 else: 

200 newitem = (dtype, ast.literal_eval(repeats)) 

201 result.append(newitem) 

202 

203 return result 

204 

205class dummy_ctype: 

206 def __init__(self, cls): 

207 self._cls = cls 

208 def __mul__(self, other): 

209 return self 

210 def __call__(self, *other): 

211 return self._cls(other) 

212 def __eq__(self, other): 

213 return self._cls == other._cls 

214 def __ne__(self, other): 

215 return self._cls != other._cls 

216 

217def _getintp_ctype(): 

218 val = _getintp_ctype.cache 

219 if val is not None: 

220 return val 

221 if ctypes is None: 

222 import numpy as np 

223 val = dummy_ctype(np.intp) 

224 else: 

225 char = dtype('p').char 

226 if char == 'i': 

227 val = ctypes.c_int 

228 elif char == 'l': 

229 val = ctypes.c_long 

230 elif char == 'q': 

231 val = ctypes.c_longlong 

232 else: 

233 val = ctypes.c_long 

234 _getintp_ctype.cache = val 

235 return val 

236_getintp_ctype.cache = None 

237 

238# Used for .ctypes attribute of ndarray 

239 

240class _missing_ctypes: 

241 def cast(self, num, obj): 

242 return num.value 

243 

244 class c_void_p: 

245 def __init__(self, ptr): 

246 self.value = ptr 

247 

248 

249class _ctypes: 

250 def __init__(self, array, ptr=None): 

251 self._arr = array 

252 

253 if ctypes: 

254 self._ctypes = ctypes 

255 self._data = self._ctypes.c_void_p(ptr) 

256 else: 

257 # fake a pointer-like object that holds onto the reference 

258 self._ctypes = _missing_ctypes() 

259 self._data = self._ctypes.c_void_p(ptr) 

260 self._data._objects = array 

261 

262 if self._arr.ndim == 0: 

263 self._zerod = True 

264 else: 

265 self._zerod = False 

266 

267 def data_as(self, obj): 

268 """ 

269 Return the data pointer cast to a particular c-types object. 

270 For example, calling ``self._as_parameter_`` is equivalent to 

271 ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use the data as a 

272 pointer to a ctypes array of floating-point data: 

273 ``self.data_as(ctypes.POINTER(ctypes.c_double))``. 

274 

275 The returned pointer will keep a reference to the array. 

276 """ 

277 # _ctypes.cast function causes a circular reference of self._data in 

278 # self._data._objects. Attributes of self._data cannot be released 

279 # until gc.collect is called. Make a copy of the pointer first then let 

280 # it hold the array reference. This is a workaround to circumvent the 

281 # CPython bug https://bugs.python.org/issue12836 

282 ptr = self._ctypes.cast(self._data, obj) 

283 ptr._arr = self._arr 

284 return ptr 

285 

286 def shape_as(self, obj): 

287 """ 

288 Return the shape tuple as an array of some other c-types 

289 type. For example: ``self.shape_as(ctypes.c_short)``. 

290 """ 

291 if self._zerod: 

292 return None 

293 return (obj*self._arr.ndim)(*self._arr.shape) 

294 

295 def strides_as(self, obj): 

296 """ 

297 Return the strides tuple as an array of some other 

298 c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. 

299 """ 

300 if self._zerod: 

301 return None 

302 return (obj*self._arr.ndim)(*self._arr.strides) 

303 

304 @property 

305 def data(self): 

306 """ 

307 A pointer to the memory area of the array as a Python integer. 

308 This memory area may contain data that is not aligned, or not in correct 

309 byte-order. The memory area may not even be writeable. The array 

310 flags and data-type of this array should be respected when passing this 

311 attribute to arbitrary C-code to avoid trouble that can include Python 

312 crashing. User Beware! The value of this attribute is exactly the same 

313 as ``self._array_interface_['data'][0]``. 

314 

315 Note that unlike ``data_as``, a reference will not be kept to the array: 

316 code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a 

317 pointer to a deallocated array, and should be spelt 

318 ``(a + b).ctypes.data_as(ctypes.c_void_p)`` 

319 """ 

320 return self._data.value 

321 

322 @property 

323 def shape(self): 

324 """ 

325 (c_intp*self.ndim): A ctypes array of length self.ndim where 

326 the basetype is the C-integer corresponding to ``dtype('p')`` on this 

327 platform (see `~numpy.ctypeslib.c_intp`). This base-type could be 

328 `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on 

329 the platform. The ctypes array contains the shape of 

330 the underlying array. 

331 """ 

332 return self.shape_as(_getintp_ctype()) 

333 

334 @property 

335 def strides(self): 

336 """ 

337 (c_intp*self.ndim): A ctypes array of length self.ndim where 

338 the basetype is the same as for the shape attribute. This ctypes array 

339 contains the strides information from the underlying array. This strides 

340 information is important for showing how many bytes must be jumped to 

341 get to the next element in the array. 

342 """ 

343 return self.strides_as(_getintp_ctype()) 

344 

345 @property 

346 def _as_parameter_(self): 

347 """ 

348 Overrides the ctypes semi-magic method 

349 

350 Enables `c_func(some_array.ctypes)` 

351 """ 

352 return self.data_as(ctypes.c_void_p) 

353 

354 # Numpy 1.21.0, 2021-05-18 

355 

356 def get_data(self): 

357 """Deprecated getter for the `_ctypes.data` property. 

358 

359 .. deprecated:: 1.21 

360 """ 

361 warnings.warn('"get_data" is deprecated. Use "data" instead', 

362 DeprecationWarning, stacklevel=2) 

363 return self.data 

364 

365 def get_shape(self): 

366 """Deprecated getter for the `_ctypes.shape` property. 

367 

368 .. deprecated:: 1.21 

369 """ 

370 warnings.warn('"get_shape" is deprecated. Use "shape" instead', 

371 DeprecationWarning, stacklevel=2) 

372 return self.shape 

373 

374 def get_strides(self): 

375 """Deprecated getter for the `_ctypes.strides` property. 

376 

377 .. deprecated:: 1.21 

378 """ 

379 warnings.warn('"get_strides" is deprecated. Use "strides" instead', 

380 DeprecationWarning, stacklevel=2) 

381 return self.strides 

382 

383 def get_as_parameter(self): 

384 """Deprecated getter for the `_ctypes._as_parameter_` property. 

385 

386 .. deprecated:: 1.21 

387 """ 

388 warnings.warn( 

389 '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', 

390 DeprecationWarning, stacklevel=2, 

391 ) 

392 return self._as_parameter_ 

393 

394 

395def _newnames(datatype, order): 

396 """ 

397 Given a datatype and an order object, return a new names tuple, with the 

398 order indicated 

399 """ 

400 oldnames = datatype.names 

401 nameslist = list(oldnames) 

402 if isinstance(order, str): 

403 order = [order] 

404 seen = set() 

405 if isinstance(order, (list, tuple)): 

406 for name in order: 

407 try: 

408 nameslist.remove(name) 

409 except ValueError: 

410 if name in seen: 

411 raise ValueError(f"duplicate field name: {name}") from None 

412 else: 

413 raise ValueError(f"unknown field name: {name}") from None 

414 seen.add(name) 

415 return tuple(list(order) + nameslist) 

416 raise ValueError(f"unsupported order value: {order}") 

417 

418def _copy_fields(ary): 

419 """Return copy of structured array with padding between fields removed. 

420 

421 Parameters 

422 ---------- 

423 ary : ndarray 

424 Structured array from which to remove padding bytes 

425 

426 Returns 

427 ------- 

428 ary_copy : ndarray 

429 Copy of ary with padding bytes removed 

430 """ 

431 dt = ary.dtype 

432 copy_dtype = {'names': dt.names, 

433 'formats': [dt.fields[name][0] for name in dt.names]} 

434 return array(ary, dtype=copy_dtype, copy=True) 

435 

436def _promote_fields(dt1, dt2): 

437 """ Perform type promotion for two structured dtypes. 

438 

439 Parameters 

440 ---------- 

441 dt1 : structured dtype 

442 First dtype. 

443 dt2 : structured dtype 

444 Second dtype. 

445 

446 Returns 

447 ------- 

448 out : dtype 

449 The promoted dtype 

450 

451 Notes 

452 ----- 

453 If one of the inputs is aligned, the result will be. The titles of 

454 both descriptors must match (point to the same field). 

455 """ 

456 # Both must be structured and have the same names in the same order 

457 if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: 

458 raise DTypePromotionError( 

459 f"field names `{dt1.names}` and `{dt2.names}` mismatch.") 

460 

461 # if both are identical, we can (maybe!) just return the same dtype. 

462 identical = dt1 is dt2 

463 new_fields = [] 

464 for name in dt1.names: 

465 field1 = dt1.fields[name] 

466 field2 = dt2.fields[name] 

467 new_descr = promote_types(field1[0], field2[0]) 

468 identical = identical and new_descr is field1[0] 

469 

470 # Check that the titles match (if given): 

471 if field1[2:] != field2[2:]: 

472 raise DTypePromotionError( 

473 f"field titles of field '{name}' mismatch") 

474 if len(field1) == 2: 

475 new_fields.append((name, new_descr)) 

476 else: 

477 new_fields.append(((field1[2], name), new_descr)) 

478 

479 res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct) 

480 

481 # Might as well preserve identity (and metadata) if the dtype is identical 

482 # and the itemsize, offsets are also unmodified. This could probably be 

483 # sped up, but also probably just be removed entirely. 

484 if identical and res.itemsize == dt1.itemsize: 

485 for name in dt1.names: 

486 if dt1.fields[name][1] != res.fields[name][1]: 

487 return res # the dtype changed. 

488 return dt1 

489 

490 return res 

491 

492 

493def _getfield_is_safe(oldtype, newtype, offset): 

494 """ Checks safety of getfield for object arrays. 

495 

496 As in _view_is_safe, we need to check that memory containing objects is not 

497 reinterpreted as a non-object datatype and vice versa. 

498 

499 Parameters 

500 ---------- 

501 oldtype : data-type 

502 Data type of the original ndarray. 

503 newtype : data-type 

504 Data type of the field being accessed by ndarray.getfield 

505 offset : int 

506 Offset of the field being accessed by ndarray.getfield 

507 

508 Raises 

509 ------ 

510 TypeError 

511 If the field access is invalid 

512 

513 """ 

514 if newtype.hasobject or oldtype.hasobject: 

515 if offset == 0 and newtype == oldtype: 

516 return 

517 if oldtype.names is not None: 

518 for name in oldtype.names: 

519 if (oldtype.fields[name][1] == offset and 

520 oldtype.fields[name][0] == newtype): 

521 return 

522 raise TypeError("Cannot get/set field of an object array") 

523 return 

524 

525def _view_is_safe(oldtype, newtype): 

526 """ Checks safety of a view involving object arrays, for example when 

527 doing:: 

528 

529 np.zeros(10, dtype=oldtype).view(newtype) 

530 

531 Parameters 

532 ---------- 

533 oldtype : data-type 

534 Data type of original ndarray 

535 newtype : data-type 

536 Data type of the view 

537 

538 Raises 

539 ------ 

540 TypeError 

541 If the new type is incompatible with the old type. 

542 

543 """ 

544 

545 # if the types are equivalent, there is no problem. 

546 # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) 

547 if oldtype == newtype: 

548 return 

549 

550 if newtype.hasobject or oldtype.hasobject: 

551 raise TypeError("Cannot change data-type for object array.") 

552 return 

553 

554# Given a string containing a PEP 3118 format specifier, 

555# construct a NumPy dtype 

556 

557_pep3118_native_map = { 

558 '?': '?', 

559 'c': 'S1', 

560 'b': 'b', 

561 'B': 'B', 

562 'h': 'h', 

563 'H': 'H', 

564 'i': 'i', 

565 'I': 'I', 

566 'l': 'l', 

567 'L': 'L', 

568 'q': 'q', 

569 'Q': 'Q', 

570 'e': 'e', 

571 'f': 'f', 

572 'd': 'd', 

573 'g': 'g', 

574 'Zf': 'F', 

575 'Zd': 'D', 

576 'Zg': 'G', 

577 's': 'S', 

578 'w': 'U', 

579 'O': 'O', 

580 'x': 'V', # padding 

581} 

582_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) 

583 

584_pep3118_standard_map = { 

585 '?': '?', 

586 'c': 'S1', 

587 'b': 'b', 

588 'B': 'B', 

589 'h': 'i2', 

590 'H': 'u2', 

591 'i': 'i4', 

592 'I': 'u4', 

593 'l': 'i4', 

594 'L': 'u4', 

595 'q': 'i8', 

596 'Q': 'u8', 

597 'e': 'f2', 

598 'f': 'f', 

599 'd': 'd', 

600 'Zf': 'F', 

601 'Zd': 'D', 

602 's': 'S', 

603 'w': 'U', 

604 'O': 'O', 

605 'x': 'V', # padding 

606} 

607_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) 

608 

609_pep3118_unsupported_map = { 

610 'u': 'UCS-2 strings', 

611 '&': 'pointers', 

612 't': 'bitfields', 

613 'X': 'function pointers', 

614} 

615 

616class _Stream: 

617 def __init__(self, s): 

618 self.s = s 

619 self.byteorder = '@' 

620 

621 def advance(self, n): 

622 res = self.s[:n] 

623 self.s = self.s[n:] 

624 return res 

625 

626 def consume(self, c): 

627 if self.s[:len(c)] == c: 

628 self.advance(len(c)) 

629 return True 

630 return False 

631 

632 def consume_until(self, c): 

633 if callable(c): 

634 i = 0 

635 while i < len(self.s) and not c(self.s[i]): 

636 i = i + 1 

637 return self.advance(i) 

638 else: 

639 i = self.s.index(c) 

640 res = self.advance(i) 

641 self.advance(len(c)) 

642 return res 

643 

644 @property 

645 def next(self): 

646 return self.s[0] 

647 

648 def __bool__(self): 

649 return bool(self.s) 

650 

651 

652def _dtype_from_pep3118(spec): 

653 stream = _Stream(spec) 

654 dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) 

655 return dtype 

656 

657def __dtype_from_pep3118(stream, is_subdtype): 

658 field_spec = dict( 

659 names=[], 

660 formats=[], 

661 offsets=[], 

662 itemsize=0 

663 ) 

664 offset = 0 

665 common_alignment = 1 

666 is_padding = False 

667 

668 # Parse spec 

669 while stream: 

670 value = None 

671 

672 # End of structure, bail out to upper level 

673 if stream.consume('}'): 

674 break 

675 

676 # Sub-arrays (1) 

677 shape = None 

678 if stream.consume('('): 

679 shape = stream.consume_until(')') 

680 shape = tuple(map(int, shape.split(','))) 

681 

682 # Byte order 

683 if stream.next in ('@', '=', '<', '>', '^', '!'): 

684 byteorder = stream.advance(1) 

685 if byteorder == '!': 

686 byteorder = '>' 

687 stream.byteorder = byteorder 

688 

689 # Byte order characters also control native vs. standard type sizes 

690 if stream.byteorder in ('@', '^'): 

691 type_map = _pep3118_native_map 

692 type_map_chars = _pep3118_native_typechars 

693 else: 

694 type_map = _pep3118_standard_map 

695 type_map_chars = _pep3118_standard_typechars 

696 

697 # Item sizes 

698 itemsize_str = stream.consume_until(lambda c: not c.isdigit()) 

699 if itemsize_str: 

700 itemsize = int(itemsize_str) 

701 else: 

702 itemsize = 1 

703 

704 # Data types 

705 is_padding = False 

706 

707 if stream.consume('T{'): 

708 value, align = __dtype_from_pep3118( 

709 stream, is_subdtype=True) 

710 elif stream.next in type_map_chars: 

711 if stream.next == 'Z': 

712 typechar = stream.advance(2) 

713 else: 

714 typechar = stream.advance(1) 

715 

716 is_padding = (typechar == 'x') 

717 dtypechar = type_map[typechar] 

718 if dtypechar in 'USV': 

719 dtypechar += '%d' % itemsize 

720 itemsize = 1 

721 numpy_byteorder = {'@': '=', '^': '='}.get( 

722 stream.byteorder, stream.byteorder) 

723 value = dtype(numpy_byteorder + dtypechar) 

724 align = value.alignment 

725 elif stream.next in _pep3118_unsupported_map: 

726 desc = _pep3118_unsupported_map[stream.next] 

727 raise NotImplementedError( 

728 "Unrepresentable PEP 3118 data type {!r} ({})" 

729 .format(stream.next, desc)) 

730 else: 

731 raise ValueError("Unknown PEP 3118 data type specifier %r" % stream.s) 

732 

733 # 

734 # Native alignment may require padding 

735 # 

736 # Here we assume that the presence of a '@' character implicitly implies 

737 # that the start of the array is *already* aligned. 

738 # 

739 extra_offset = 0 

740 if stream.byteorder == '@': 

741 start_padding = (-offset) % align 

742 intra_padding = (-value.itemsize) % align 

743 

744 offset += start_padding 

745 

746 if intra_padding != 0: 

747 if itemsize > 1 or (shape is not None and _prod(shape) > 1): 

748 # Inject internal padding to the end of the sub-item 

749 value = _add_trailing_padding(value, intra_padding) 

750 else: 

751 # We can postpone the injection of internal padding, 

752 # as the item appears at most once 

753 extra_offset += intra_padding 

754 

755 # Update common alignment 

756 common_alignment = _lcm(align, common_alignment) 

757 

758 # Convert itemsize to sub-array 

759 if itemsize != 1: 

760 value = dtype((value, (itemsize,))) 

761 

762 # Sub-arrays (2) 

763 if shape is not None: 

764 value = dtype((value, shape)) 

765 

766 # Field name 

767 if stream.consume(':'): 

768 name = stream.consume_until(':') 

769 else: 

770 name = None 

771 

772 if not (is_padding and name is None): 

773 if name is not None and name in field_spec['names']: 

774 raise RuntimeError(f"Duplicate field name '{name}' in PEP3118 format") 

775 field_spec['names'].append(name) 

776 field_spec['formats'].append(value) 

777 field_spec['offsets'].append(offset) 

778 

779 offset += value.itemsize 

780 offset += extra_offset 

781 

782 field_spec['itemsize'] = offset 

783 

784 # extra final padding for aligned types 

785 if stream.byteorder == '@': 

786 field_spec['itemsize'] += (-offset) % common_alignment 

787 

788 # Check if this was a simple 1-item type, and unwrap it 

789 if (field_spec['names'] == [None] 

790 and field_spec['offsets'][0] == 0 

791 and field_spec['itemsize'] == field_spec['formats'][0].itemsize 

792 and not is_subdtype): 

793 ret = field_spec['formats'][0] 

794 else: 

795 _fix_names(field_spec) 

796 ret = dtype(field_spec) 

797 

798 # Finished 

799 return ret, common_alignment 

800 

801def _fix_names(field_spec): 

802 """ Replace names which are None with the next unused f%d name """ 

803 names = field_spec['names'] 

804 for i, name in enumerate(names): 

805 if name is not None: 

806 continue 

807 

808 j = 0 

809 while True: 

810 name = f'f{j}' 

811 if name not in names: 

812 break 

813 j = j + 1 

814 names[i] = name 

815 

816def _add_trailing_padding(value, padding): 

817 """Inject the specified number of padding bytes at the end of a dtype""" 

818 if value.fields is None: 

819 field_spec = dict( 

820 names=['f0'], 

821 formats=[value], 

822 offsets=[0], 

823 itemsize=value.itemsize 

824 ) 

825 else: 

826 fields = value.fields 

827 names = value.names 

828 field_spec = dict( 

829 names=names, 

830 formats=[fields[name][0] for name in names], 

831 offsets=[fields[name][1] for name in names], 

832 itemsize=value.itemsize 

833 ) 

834 

835 field_spec['itemsize'] += padding 

836 return dtype(field_spec) 

837 

838def _prod(a): 

839 p = 1 

840 for x in a: 

841 p *= x 

842 return p 

843 

844def _gcd(a, b): 

845 """Calculate the greatest common divisor of a and b""" 

846 while b: 

847 a, b = b, a % b 

848 return a 

849 

850def _lcm(a, b): 

851 return a // _gcd(a, b) * b 

852 

853def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): 

854 """ Format the error message for when __array_ufunc__ gives up. """ 

855 args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + 

856 ['{}={!r}'.format(k, v) 

857 for k, v in kwargs.items()]) 

858 args = inputs + kwargs.get('out', ()) 

859 types_string = ', '.join(repr(type(arg).__name__) for arg in args) 

860 return ('operand type(s) all returned NotImplemented from ' 

861 '__array_ufunc__({!r}, {!r}, {}): {}' 

862 .format(ufunc, method, args_string, types_string)) 

863 

864 

865def array_function_errmsg_formatter(public_api, types): 

866 """ Format the error message for when __array_ufunc__ gives up. """ 

867 func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) 

868 return ("no implementation found for '{}' on types that implement " 

869 '__array_function__: {}'.format(func_name, list(types))) 

870 

871 

872def _ufunc_doc_signature_formatter(ufunc): 

873 """ 

874 Builds a signature string which resembles PEP 457 

875 

876 This is used to construct the first line of the docstring 

877 """ 

878 

879 # input arguments are simple 

880 if ufunc.nin == 1: 

881 in_args = 'x' 

882 else: 

883 in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin)) 

884 

885 # output arguments are both keyword or positional 

886 if ufunc.nout == 0: 

887 out_args = ', /, out=()' 

888 elif ufunc.nout == 1: 

889 out_args = ', /, out=None' 

890 else: 

891 out_args = '[, {positional}], / [, out={default}]'.format( 

892 positional=', '.join( 

893 'out{}'.format(i+1) for i in range(ufunc.nout)), 

894 default=repr((None,)*ufunc.nout) 

895 ) 

896 

897 # keyword only args depend on whether this is a gufunc 

898 kwargs = ( 

899 ", casting='same_kind'" 

900 ", order='K'" 

901 ", dtype=None" 

902 ", subok=True" 

903 ) 

904 

905 # NOTE: gufuncs may or may not support the `axis` parameter 

906 if ufunc.signature is None: 

907 kwargs = f", where=True{kwargs}[, signature, extobj]" 

908 else: 

909 kwargs += "[, signature, extobj, axes, axis]" 

910 

911 # join all the parts together 

912 return '{name}({in_args}{out_args}, *{kwargs})'.format( 

913 name=ufunc.__name__, 

914 in_args=in_args, 

915 out_args=out_args, 

916 kwargs=kwargs 

917 ) 

918 

919 

920def npy_ctypes_check(cls): 

921 # determine if a class comes from ctypes, in order to work around 

922 # a bug in the buffer protocol for those objects, bpo-10746 

923 try: 

924 # ctypes class are new-style, so have an __mro__. This probably fails 

925 # for ctypes classes with multiple inheritance. 

926 if IS_PYPY: 

927 # (..., _ctypes.basics._CData, Bufferable, object) 

928 ctype_base = cls.__mro__[-3] 

929 else: 

930 # # (..., _ctypes._CData, object) 

931 ctype_base = cls.__mro__[-2] 

932 # right now, they're part of the _ctypes module 

933 return '_ctypes' in ctype_base.__module__ 

934 except Exception: 

935 return False