Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.10/site-packages/numpy/_core/_internal.py: 17%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

458 statements  

1""" 

2A place for internal code 

3 

4Some things are more easily handled Python. 

5 

6""" 

7import ast 

8import math 

9import re 

10import sys 

11import warnings 

12 

13from ..exceptions import DTypePromotionError 

14from .multiarray import dtype, array, ndarray, promote_types, StringDType 

15from numpy import _NoValue 

16try: 

17 import ctypes 

18except ImportError: 

19 ctypes = None 

20 

21IS_PYPY = sys.implementation.name == 'pypy' 

22 

23if sys.byteorder == 'little': 

24 _nbo = '<' 

25else: 

26 _nbo = '>' 

27 

28def _makenames_list(adict, align): 

29 allfields = [] 

30 

31 for fname, obj in adict.items(): 

32 n = len(obj) 

33 if not isinstance(obj, tuple) or n not in (2, 3): 

34 raise ValueError("entry not a 2- or 3- tuple") 

35 if n > 2 and obj[2] == fname: 

36 continue 

37 num = int(obj[1]) 

38 if num < 0: 

39 raise ValueError("invalid offset.") 

40 format = dtype(obj[0], align=align) 

41 if n > 2: 

42 title = obj[2] 

43 else: 

44 title = None 

45 allfields.append((fname, format, num, title)) 

46 # sort by offsets 

47 allfields.sort(key=lambda x: x[2]) 

48 names = [x[0] for x in allfields] 

49 formats = [x[1] for x in allfields] 

50 offsets = [x[2] for x in allfields] 

51 titles = [x[3] for x in allfields] 

52 

53 return names, formats, offsets, titles 

54 

55# Called in PyArray_DescrConverter function when 

56# a dictionary without "names" and "formats" 

57# fields is used as a data-type descriptor. 

58def _usefields(adict, align): 

59 try: 

60 names = adict[-1] 

61 except KeyError: 

62 names = None 

63 if names is None: 

64 names, formats, offsets, titles = _makenames_list(adict, align) 

65 else: 

66 formats = [] 

67 offsets = [] 

68 titles = [] 

69 for name in names: 

70 res = adict[name] 

71 formats.append(res[0]) 

72 offsets.append(res[1]) 

73 if len(res) > 2: 

74 titles.append(res[2]) 

75 else: 

76 titles.append(None) 

77 

78 return dtype({"names": names, 

79 "formats": formats, 

80 "offsets": offsets, 

81 "titles": titles}, align) 

82 

83 

84# construct an array_protocol descriptor list 

85# from the fields attribute of a descriptor 

86# This calls itself recursively but should eventually hit 

87# a descriptor that has no fields and then return 

88# a simple typestring 

89 

90def _array_descr(descriptor): 

91 fields = descriptor.fields 

92 if fields is None: 

93 subdtype = descriptor.subdtype 

94 if subdtype is None: 

95 if descriptor.metadata is None: 

96 return descriptor.str 

97 else: 

98 new = descriptor.metadata.copy() 

99 if new: 

100 return (descriptor.str, new) 

101 else: 

102 return descriptor.str 

103 else: 

104 return (_array_descr(subdtype[0]), subdtype[1]) 

105 

106 names = descriptor.names 

107 ordered_fields = [fields[x] + (x,) for x in names] 

108 result = [] 

109 offset = 0 

110 for field in ordered_fields: 

111 if field[1] > offset: 

112 num = field[1] - offset 

113 result.append(('', f'|V{num}')) 

114 offset += num 

115 elif field[1] < offset: 

116 raise ValueError( 

117 "dtype.descr is not defined for types with overlapping or " 

118 "out-of-order fields") 

119 if len(field) > 3: 

120 name = (field[2], field[3]) 

121 else: 

122 name = field[2] 

123 if field[0].subdtype: 

124 tup = (name, _array_descr(field[0].subdtype[0]), 

125 field[0].subdtype[1]) 

126 else: 

127 tup = (name, _array_descr(field[0])) 

128 offset += field[0].itemsize 

129 result.append(tup) 

130 

131 if descriptor.itemsize > offset: 

132 num = descriptor.itemsize - offset 

133 result.append(('', f'|V{num}')) 

134 

135 return result 

136 

137 

138# format_re was originally from numarray by J. Todd Miller 

139 

140format_re = re.compile(r'(?P<order1>[<>|=]?)' 

141 r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)' 

142 r'(?P<order2>[<>|=]?)' 

143 r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') 

144sep_re = re.compile(r'\s*,\s*') 

145space_re = re.compile(r'\s+$') 

146 

147# astr is a string (perhaps comma separated) 

148 

149_convorder = {'=': _nbo} 

150 

151def _commastring(astr): 

152 startindex = 0 

153 result = [] 

154 islist = False 

155 while startindex < len(astr): 

156 mo = format_re.match(astr, pos=startindex) 

157 try: 

158 (order1, repeats, order2, dtype) = mo.groups() 

159 except (TypeError, AttributeError): 

160 raise ValueError( 

161 f'format number {len(result)+1} of "{astr}" is not recognized' 

162 ) from None 

163 startindex = mo.end() 

164 # Separator or ending padding 

165 if startindex < len(astr): 

166 if space_re.match(astr, pos=startindex): 

167 startindex = len(astr) 

168 else: 

169 mo = sep_re.match(astr, pos=startindex) 

170 if not mo: 

171 raise ValueError( 

172 'format number %d of "%s" is not recognized' % 

173 (len(result)+1, astr)) 

174 startindex = mo.end() 

175 islist = True 

176 

177 if order2 == '': 

178 order = order1 

179 elif order1 == '': 

180 order = order2 

181 else: 

182 order1 = _convorder.get(order1, order1) 

183 order2 = _convorder.get(order2, order2) 

184 if (order1 != order2): 

185 raise ValueError( 

186 'inconsistent byte-order specification %s and %s' % 

187 (order1, order2)) 

188 order = order1 

189 

190 if order in ('|', '=', _nbo): 

191 order = '' 

192 dtype = order + dtype 

193 if repeats == '': 

194 newitem = dtype 

195 else: 

196 if (repeats[0] == "(" and repeats[-1] == ")" 

197 and repeats[1:-1].strip() != "" 

198 and "," not in repeats): 

199 warnings.warn( 

200 'Passing in a parenthesized single number for repeats ' 

201 'is deprecated; pass either a single number or indicate ' 

202 'a tuple with a comma, like "(2,)".', DeprecationWarning, 

203 stacklevel=2) 

204 newitem = (dtype, ast.literal_eval(repeats)) 

205 

206 result.append(newitem) 

207 

208 return result if islist else result[0] 

209 

210class dummy_ctype: 

211 

212 def __init__(self, cls): 

213 self._cls = cls 

214 

215 def __mul__(self, other): 

216 return self 

217 

218 def __call__(self, *other): 

219 return self._cls(other) 

220 

221 def __eq__(self, other): 

222 return self._cls == other._cls 

223 

224 def __ne__(self, other): 

225 return self._cls != other._cls 

226 

227def _getintp_ctype(): 

228 val = _getintp_ctype.cache 

229 if val is not None: 

230 return val 

231 if ctypes is None: 

232 import numpy as np 

233 val = dummy_ctype(np.intp) 

234 else: 

235 char = dtype('n').char 

236 if char == 'i': 

237 val = ctypes.c_int 

238 elif char == 'l': 

239 val = ctypes.c_long 

240 elif char == 'q': 

241 val = ctypes.c_longlong 

242 else: 

243 val = ctypes.c_long 

244 _getintp_ctype.cache = val 

245 return val 

246 

247 

248_getintp_ctype.cache = None 

249 

250# Used for .ctypes attribute of ndarray 

251 

252class _missing_ctypes: 

253 def cast(self, num, obj): 

254 return num.value 

255 

256 class c_void_p: 

257 def __init__(self, ptr): 

258 self.value = ptr 

259 

260 

261class _ctypes: 

262 def __init__(self, array, ptr=None): 

263 self._arr = array 

264 

265 if ctypes: 

266 self._ctypes = ctypes 

267 self._data = self._ctypes.c_void_p(ptr) 

268 else: 

269 # fake a pointer-like object that holds onto the reference 

270 self._ctypes = _missing_ctypes() 

271 self._data = self._ctypes.c_void_p(ptr) 

272 self._data._objects = array 

273 

274 if self._arr.ndim == 0: 

275 self._zerod = True 

276 else: 

277 self._zerod = False 

278 

279 def data_as(self, obj): 

280 """ 

281 Return the data pointer cast to a particular c-types object. 

282 For example, calling ``self._as_parameter_`` is equivalent to 

283 ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use 

284 the data as a pointer to a ctypes array of floating-point data: 

285 ``self.data_as(ctypes.POINTER(ctypes.c_double))``. 

286 

287 The returned pointer will keep a reference to the array. 

288 """ 

289 # _ctypes.cast function causes a circular reference of self._data in 

290 # self._data._objects. Attributes of self._data cannot be released 

291 # until gc.collect is called. Make a copy of the pointer first then 

292 # let it hold the array reference. This is a workaround to circumvent 

293 # the CPython bug https://bugs.python.org/issue12836. 

294 ptr = self._ctypes.cast(self._data, obj) 

295 ptr._arr = self._arr 

296 return ptr 

297 

298 def shape_as(self, obj): 

299 """ 

300 Return the shape tuple as an array of some other c-types 

301 type. For example: ``self.shape_as(ctypes.c_short)``. 

302 """ 

303 if self._zerod: 

304 return None 

305 return (obj*self._arr.ndim)(*self._arr.shape) 

306 

307 def strides_as(self, obj): 

308 """ 

309 Return the strides tuple as an array of some other 

310 c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. 

311 """ 

312 if self._zerod: 

313 return None 

314 return (obj*self._arr.ndim)(*self._arr.strides) 

315 

316 @property 

317 def data(self): 

318 """ 

319 A pointer to the memory area of the array as a Python integer. 

320 This memory area may contain data that is not aligned, or not in 

321 correct byte-order. The memory area may not even be writeable. 

322 The array flags and data-type of this array should be respected 

323 when passing this attribute to arbitrary C-code to avoid trouble 

324 that can include Python crashing. User Beware! The value of this 

325 attribute is exactly the same as: 

326 ``self._array_interface_['data'][0]``. 

327 

328 Note that unlike ``data_as``, a reference won't be kept to the array: 

329 code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a 

330 pointer to a deallocated array, and should be spelt 

331 ``(a + b).ctypes.data_as(ctypes.c_void_p)`` 

332 """ 

333 return self._data.value 

334 

335 @property 

336 def shape(self): 

337 """ 

338 (c_intp*self.ndim): A ctypes array of length self.ndim where 

339 the basetype is the C-integer corresponding to ``dtype('p')`` on this 

340 platform (see `~numpy.ctypeslib.c_intp`). This base-type could be 

341 `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on 

342 the platform. The ctypes array contains the shape of 

343 the underlying array. 

344 """ 

345 return self.shape_as(_getintp_ctype()) 

346 

347 @property 

348 def strides(self): 

349 """ 

350 (c_intp*self.ndim): A ctypes array of length self.ndim where 

351 the basetype is the same as for the shape attribute. This ctypes 

352 array contains the strides information from the underlying array. 

353 This strides information is important for showing how many bytes 

354 must be jumped to get to the next element in the array. 

355 """ 

356 return self.strides_as(_getintp_ctype()) 

357 

358 @property 

359 def _as_parameter_(self): 

360 """ 

361 Overrides the ctypes semi-magic method 

362 

363 Enables `c_func(some_array.ctypes)` 

364 """ 

365 return self.data_as(ctypes.c_void_p) 

366 

367 # Numpy 1.21.0, 2021-05-18 

368 

369 def get_data(self): 

370 """Deprecated getter for the `_ctypes.data` property. 

371 

372 .. deprecated:: 1.21 

373 """ 

374 warnings.warn('"get_data" is deprecated. Use "data" instead', 

375 DeprecationWarning, stacklevel=2) 

376 return self.data 

377 

378 def get_shape(self): 

379 """Deprecated getter for the `_ctypes.shape` property. 

380 

381 .. deprecated:: 1.21 

382 """ 

383 warnings.warn('"get_shape" is deprecated. Use "shape" instead', 

384 DeprecationWarning, stacklevel=2) 

385 return self.shape 

386 

387 def get_strides(self): 

388 """Deprecated getter for the `_ctypes.strides` property. 

389 

390 .. deprecated:: 1.21 

391 """ 

392 warnings.warn('"get_strides" is deprecated. Use "strides" instead', 

393 DeprecationWarning, stacklevel=2) 

394 return self.strides 

395 

396 def get_as_parameter(self): 

397 """Deprecated getter for the `_ctypes._as_parameter_` property. 

398 

399 .. deprecated:: 1.21 

400 """ 

401 warnings.warn( 

402 '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', 

403 DeprecationWarning, stacklevel=2, 

404 ) 

405 return self._as_parameter_ 

406 

407 

408def _newnames(datatype, order): 

409 """ 

410 Given a datatype and an order object, return a new names tuple, with the 

411 order indicated 

412 """ 

413 oldnames = datatype.names 

414 nameslist = list(oldnames) 

415 if isinstance(order, str): 

416 order = [order] 

417 seen = set() 

418 if isinstance(order, (list, tuple)): 

419 for name in order: 

420 try: 

421 nameslist.remove(name) 

422 except ValueError: 

423 if name in seen: 

424 raise ValueError(f"duplicate field name: {name}") from None 

425 else: 

426 raise ValueError(f"unknown field name: {name}") from None 

427 seen.add(name) 

428 return tuple(list(order) + nameslist) 

429 raise ValueError(f"unsupported order value: {order}") 

430 

431def _copy_fields(ary): 

432 """Return copy of structured array with padding between fields removed. 

433 

434 Parameters 

435 ---------- 

436 ary : ndarray 

437 Structured array from which to remove padding bytes 

438 

439 Returns 

440 ------- 

441 ary_copy : ndarray 

442 Copy of ary with padding bytes removed 

443 """ 

444 dt = ary.dtype 

445 copy_dtype = {'names': dt.names, 

446 'formats': [dt.fields[name][0] for name in dt.names]} 

447 return array(ary, dtype=copy_dtype, copy=True) 

448 

449def _promote_fields(dt1, dt2): 

450 """ Perform type promotion for two structured dtypes. 

451 

452 Parameters 

453 ---------- 

454 dt1 : structured dtype 

455 First dtype. 

456 dt2 : structured dtype 

457 Second dtype. 

458 

459 Returns 

460 ------- 

461 out : dtype 

462 The promoted dtype 

463 

464 Notes 

465 ----- 

466 If one of the inputs is aligned, the result will be. The titles of 

467 both descriptors must match (point to the same field). 

468 """ 

469 # Both must be structured and have the same names in the same order 

470 if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: 

471 raise DTypePromotionError( 

472 f"field names `{dt1.names}` and `{dt2.names}` mismatch.") 

473 

474 # if both are identical, we can (maybe!) just return the same dtype. 

475 identical = dt1 is dt2 

476 new_fields = [] 

477 for name in dt1.names: 

478 field1 = dt1.fields[name] 

479 field2 = dt2.fields[name] 

480 new_descr = promote_types(field1[0], field2[0]) 

481 identical = identical and new_descr is field1[0] 

482 

483 # Check that the titles match (if given): 

484 if field1[2:] != field2[2:]: 

485 raise DTypePromotionError( 

486 f"field titles of field '{name}' mismatch") 

487 if len(field1) == 2: 

488 new_fields.append((name, new_descr)) 

489 else: 

490 new_fields.append(((field1[2], name), new_descr)) 

491 

492 res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct) 

493 

494 # Might as well preserve identity (and metadata) if the dtype is identical 

495 # and the itemsize, offsets are also unmodified. This could probably be 

496 # sped up, but also probably just be removed entirely. 

497 if identical and res.itemsize == dt1.itemsize: 

498 for name in dt1.names: 

499 if dt1.fields[name][1] != res.fields[name][1]: 

500 return res # the dtype changed. 

501 return dt1 

502 

503 return res 

504 

505 

506def _getfield_is_safe(oldtype, newtype, offset): 

507 """ Checks safety of getfield for object arrays. 

508 

509 As in _view_is_safe, we need to check that memory containing objects is not 

510 reinterpreted as a non-object datatype and vice versa. 

511 

512 Parameters 

513 ---------- 

514 oldtype : data-type 

515 Data type of the original ndarray. 

516 newtype : data-type 

517 Data type of the field being accessed by ndarray.getfield 

518 offset : int 

519 Offset of the field being accessed by ndarray.getfield 

520 

521 Raises 

522 ------ 

523 TypeError 

524 If the field access is invalid 

525 

526 """ 

527 if newtype.hasobject or oldtype.hasobject: 

528 if offset == 0 and newtype == oldtype: 

529 return 

530 if oldtype.names is not None: 

531 for name in oldtype.names: 

532 if (oldtype.fields[name][1] == offset and 

533 oldtype.fields[name][0] == newtype): 

534 return 

535 raise TypeError("Cannot get/set field of an object array") 

536 return 

537 

538def _view_is_safe(oldtype, newtype): 

539 """ Checks safety of a view involving object arrays, for example when 

540 doing:: 

541 

542 np.zeros(10, dtype=oldtype).view(newtype) 

543 

544 Parameters 

545 ---------- 

546 oldtype : data-type 

547 Data type of original ndarray 

548 newtype : data-type 

549 Data type of the view 

550 

551 Raises 

552 ------ 

553 TypeError 

554 If the new type is incompatible with the old type. 

555 

556 """ 

557 

558 # if the types are equivalent, there is no problem. 

559 # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) 

560 if oldtype == newtype: 

561 return 

562 

563 if newtype.hasobject or oldtype.hasobject: 

564 raise TypeError("Cannot change data-type for array of references.") 

565 return 

566 

567 

568# Given a string containing a PEP 3118 format specifier, 

569# construct a NumPy dtype 

570 

571_pep3118_native_map = { 

572 '?': '?', 

573 'c': 'S1', 

574 'b': 'b', 

575 'B': 'B', 

576 'h': 'h', 

577 'H': 'H', 

578 'i': 'i', 

579 'I': 'I', 

580 'l': 'l', 

581 'L': 'L', 

582 'q': 'q', 

583 'Q': 'Q', 

584 'e': 'e', 

585 'f': 'f', 

586 'd': 'd', 

587 'g': 'g', 

588 'Zf': 'F', 

589 'Zd': 'D', 

590 'Zg': 'G', 

591 's': 'S', 

592 'w': 'U', 

593 'O': 'O', 

594 'x': 'V', # padding 

595} 

596_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) 

597 

598_pep3118_standard_map = { 

599 '?': '?', 

600 'c': 'S1', 

601 'b': 'b', 

602 'B': 'B', 

603 'h': 'i2', 

604 'H': 'u2', 

605 'i': 'i4', 

606 'I': 'u4', 

607 'l': 'i4', 

608 'L': 'u4', 

609 'q': 'i8', 

610 'Q': 'u8', 

611 'e': 'f2', 

612 'f': 'f', 

613 'd': 'd', 

614 'Zf': 'F', 

615 'Zd': 'D', 

616 's': 'S', 

617 'w': 'U', 

618 'O': 'O', 

619 'x': 'V', # padding 

620} 

621_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) 

622 

623_pep3118_unsupported_map = { 

624 'u': 'UCS-2 strings', 

625 '&': 'pointers', 

626 't': 'bitfields', 

627 'X': 'function pointers', 

628} 

629 

630class _Stream: 

631 def __init__(self, s): 

632 self.s = s 

633 self.byteorder = '@' 

634 

635 def advance(self, n): 

636 res = self.s[:n] 

637 self.s = self.s[n:] 

638 return res 

639 

640 def consume(self, c): 

641 if self.s[:len(c)] == c: 

642 self.advance(len(c)) 

643 return True 

644 return False 

645 

646 def consume_until(self, c): 

647 if callable(c): 

648 i = 0 

649 while i < len(self.s) and not c(self.s[i]): 

650 i = i + 1 

651 return self.advance(i) 

652 else: 

653 i = self.s.index(c) 

654 res = self.advance(i) 

655 self.advance(len(c)) 

656 return res 

657 

658 @property 

659 def next(self): 

660 return self.s[0] 

661 

662 def __bool__(self): 

663 return bool(self.s) 

664 

665 

666def _dtype_from_pep3118(spec): 

667 stream = _Stream(spec) 

668 dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) 

669 return dtype 

670 

671def __dtype_from_pep3118(stream, is_subdtype): 

672 field_spec = dict( 

673 names=[], 

674 formats=[], 

675 offsets=[], 

676 itemsize=0 

677 ) 

678 offset = 0 

679 common_alignment = 1 

680 is_padding = False 

681 

682 # Parse spec 

683 while stream: 

684 value = None 

685 

686 # End of structure, bail out to upper level 

687 if stream.consume('}'): 

688 break 

689 

690 # Sub-arrays (1) 

691 shape = None 

692 if stream.consume('('): 

693 shape = stream.consume_until(')') 

694 shape = tuple(map(int, shape.split(','))) 

695 

696 # Byte order 

697 if stream.next in ('@', '=', '<', '>', '^', '!'): 

698 byteorder = stream.advance(1) 

699 if byteorder == '!': 

700 byteorder = '>' 

701 stream.byteorder = byteorder 

702 

703 # Byte order characters also control native vs. standard type sizes 

704 if stream.byteorder in ('@', '^'): 

705 type_map = _pep3118_native_map 

706 type_map_chars = _pep3118_native_typechars 

707 else: 

708 type_map = _pep3118_standard_map 

709 type_map_chars = _pep3118_standard_typechars 

710 

711 # Item sizes 

712 itemsize_str = stream.consume_until(lambda c: not c.isdigit()) 

713 if itemsize_str: 

714 itemsize = int(itemsize_str) 

715 else: 

716 itemsize = 1 

717 

718 # Data types 

719 is_padding = False 

720 

721 if stream.consume('T{'): 

722 value, align = __dtype_from_pep3118( 

723 stream, is_subdtype=True) 

724 elif stream.next in type_map_chars: 

725 if stream.next == 'Z': 

726 typechar = stream.advance(2) 

727 else: 

728 typechar = stream.advance(1) 

729 

730 is_padding = (typechar == 'x') 

731 dtypechar = type_map[typechar] 

732 if dtypechar in 'USV': 

733 dtypechar += '%d' % itemsize 

734 itemsize = 1 

735 numpy_byteorder = {'@': '=', '^': '='}.get( 

736 stream.byteorder, stream.byteorder) 

737 value = dtype(numpy_byteorder + dtypechar) 

738 align = value.alignment 

739 elif stream.next in _pep3118_unsupported_map: 

740 desc = _pep3118_unsupported_map[stream.next] 

741 raise NotImplementedError( 

742 "Unrepresentable PEP 3118 data type {!r} ({})" 

743 .format(stream.next, desc)) 

744 else: 

745 raise ValueError( 

746 "Unknown PEP 3118 data type specifier %r" % stream.s 

747 ) 

748 

749 # 

750 # Native alignment may require padding 

751 # 

752 # Here we assume that the presence of a '@' character implicitly 

753 # implies that the start of the array is *already* aligned. 

754 # 

755 extra_offset = 0 

756 if stream.byteorder == '@': 

757 start_padding = (-offset) % align 

758 intra_padding = (-value.itemsize) % align 

759 

760 offset += start_padding 

761 

762 if intra_padding != 0: 

763 if itemsize > 1 or (shape is not None and _prod(shape) > 1): 

764 # Inject internal padding to the end of the sub-item 

765 value = _add_trailing_padding(value, intra_padding) 

766 else: 

767 # We can postpone the injection of internal padding, 

768 # as the item appears at most once 

769 extra_offset += intra_padding 

770 

771 # Update common alignment 

772 common_alignment = _lcm(align, common_alignment) 

773 

774 # Convert itemsize to sub-array 

775 if itemsize != 1: 

776 value = dtype((value, (itemsize,))) 

777 

778 # Sub-arrays (2) 

779 if shape is not None: 

780 value = dtype((value, shape)) 

781 

782 # Field name 

783 if stream.consume(':'): 

784 name = stream.consume_until(':') 

785 else: 

786 name = None 

787 

788 if not (is_padding and name is None): 

789 if name is not None and name in field_spec['names']: 

790 raise RuntimeError( 

791 f"Duplicate field name '{name}' in PEP3118 format" 

792 ) 

793 field_spec['names'].append(name) 

794 field_spec['formats'].append(value) 

795 field_spec['offsets'].append(offset) 

796 

797 offset += value.itemsize 

798 offset += extra_offset 

799 

800 field_spec['itemsize'] = offset 

801 

802 # extra final padding for aligned types 

803 if stream.byteorder == '@': 

804 field_spec['itemsize'] += (-offset) % common_alignment 

805 

806 # Check if this was a simple 1-item type, and unwrap it 

807 if (field_spec['names'] == [None] 

808 and field_spec['offsets'][0] == 0 

809 and field_spec['itemsize'] == field_spec['formats'][0].itemsize 

810 and not is_subdtype): 

811 ret = field_spec['formats'][0] 

812 else: 

813 _fix_names(field_spec) 

814 ret = dtype(field_spec) 

815 

816 # Finished 

817 return ret, common_alignment 

818 

819def _fix_names(field_spec): 

820 """ Replace names which are None with the next unused f%d name """ 

821 names = field_spec['names'] 

822 for i, name in enumerate(names): 

823 if name is not None: 

824 continue 

825 

826 j = 0 

827 while True: 

828 name = f'f{j}' 

829 if name not in names: 

830 break 

831 j = j + 1 

832 names[i] = name 

833 

834def _add_trailing_padding(value, padding): 

835 """Inject the specified number of padding bytes at the end of a dtype""" 

836 if value.fields is None: 

837 field_spec = dict( 

838 names=['f0'], 

839 formats=[value], 

840 offsets=[0], 

841 itemsize=value.itemsize 

842 ) 

843 else: 

844 fields = value.fields 

845 names = value.names 

846 field_spec = dict( 

847 names=names, 

848 formats=[fields[name][0] for name in names], 

849 offsets=[fields[name][1] for name in names], 

850 itemsize=value.itemsize 

851 ) 

852 

853 field_spec['itemsize'] += padding 

854 return dtype(field_spec) 

855 

856def _prod(a): 

857 p = 1 

858 for x in a: 

859 p *= x 

860 return p 

861 

862def _gcd(a, b): 

863 """Calculate the greatest common divisor of a and b""" 

864 if not (math.isfinite(a) and math.isfinite(b)): 

865 raise ValueError('Can only find greatest common divisor of ' 

866 f'finite arguments, found "{a}" and "{b}"') 

867 while b: 

868 a, b = b, a % b 

869 return a 

870 

871def _lcm(a, b): 

872 return a // _gcd(a, b) * b 

873 

874def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): 

875 """ Format the error message for when __array_ufunc__ gives up. """ 

876 args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + 

877 ['{}={!r}'.format(k, v) 

878 for k, v in kwargs.items()]) 

879 args = inputs + kwargs.get('out', ()) 

880 types_string = ', '.join(repr(type(arg).__name__) for arg in args) 

881 return ('operand type(s) all returned NotImplemented from ' 

882 '__array_ufunc__({!r}, {!r}, {}): {}' 

883 .format(ufunc, method, args_string, types_string)) 

884 

885 

886def array_function_errmsg_formatter(public_api, types): 

887 """ Format the error message for when __array_ufunc__ gives up. """ 

888 func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) 

889 return ("no implementation found for '{}' on types that implement " 

890 '__array_function__: {}'.format(func_name, list(types))) 

891 

892 

893def _ufunc_doc_signature_formatter(ufunc): 

894 """ 

895 Builds a signature string which resembles PEP 457 

896 

897 This is used to construct the first line of the docstring 

898 """ 

899 

900 # input arguments are simple 

901 if ufunc.nin == 1: 

902 in_args = 'x' 

903 else: 

904 in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin)) 

905 

906 # output arguments are both keyword or positional 

907 if ufunc.nout == 0: 

908 out_args = ', /, out=()' 

909 elif ufunc.nout == 1: 

910 out_args = ', /, out=None' 

911 else: 

912 out_args = '[, {positional}], / [, out={default}]'.format( 

913 positional=', '.join( 

914 'out{}'.format(i+1) for i in range(ufunc.nout)), 

915 default=repr((None,)*ufunc.nout) 

916 ) 

917 

918 # keyword only args depend on whether this is a gufunc 

919 kwargs = ( 

920 ", casting='same_kind'" 

921 ", order='K'" 

922 ", dtype=None" 

923 ", subok=True" 

924 ) 

925 

926 # NOTE: gufuncs may or may not support the `axis` parameter 

927 if ufunc.signature is None: 

928 kwargs = f", where=True{kwargs}[, signature]" 

929 else: 

930 kwargs += "[, signature, axes, axis]" 

931 

932 # join all the parts together 

933 return '{name}({in_args}{out_args}, *{kwargs})'.format( 

934 name=ufunc.__name__, 

935 in_args=in_args, 

936 out_args=out_args, 

937 kwargs=kwargs 

938 ) 

939 

940 

941def npy_ctypes_check(cls): 

942 # determine if a class comes from ctypes, in order to work around 

943 # a bug in the buffer protocol for those objects, bpo-10746 

944 try: 

945 # ctypes class are new-style, so have an __mro__. This probably fails 

946 # for ctypes classes with multiple inheritance. 

947 if IS_PYPY: 

948 # (..., _ctypes.basics._CData, Bufferable, object) 

949 ctype_base = cls.__mro__[-3] 

950 else: 

951 # # (..., _ctypes._CData, object) 

952 ctype_base = cls.__mro__[-2] 

953 # right now, they're part of the _ctypes module 

954 return '_ctypes' in ctype_base.__module__ 

955 except Exception: 

956 return False 

957 

958# used to handle the _NoValue default argument for na_object 

959# in the C implementation of the __reduce__ method for stringdtype 

960def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue): 

961 if na_object is _NoValue: 

962 return StringDType(coerce=coerce) 

963 return StringDType(coerce=coerce, na_object=na_object)