Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/numpy/_core/_internal.py: 17%

454 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-04-09 06:12 +0000

1""" 

2A place for internal code 

3 

4Some things are more easily handled Python. 

5 

6""" 

7import ast 

8import re 

9import sys 

10import warnings 

11 

12from ..exceptions import DTypePromotionError 

13from .multiarray import dtype, array, ndarray, promote_types, StringDType 

14from numpy import _NoValue 

15try: 

16 import ctypes 

17except ImportError: 

18 ctypes = None 

19 

20IS_PYPY = sys.implementation.name == 'pypy' 

21 

22if sys.byteorder == 'little': 

23 _nbo = '<' 

24else: 

25 _nbo = '>' 

26 

27def _makenames_list(adict, align): 

28 allfields = [] 

29 

30 for fname, obj in adict.items(): 

31 n = len(obj) 

32 if not isinstance(obj, tuple) or n not in (2, 3): 

33 raise ValueError("entry not a 2- or 3- tuple") 

34 if n > 2 and obj[2] == fname: 

35 continue 

36 num = int(obj[1]) 

37 if num < 0: 

38 raise ValueError("invalid offset.") 

39 format = dtype(obj[0], align=align) 

40 if n > 2: 

41 title = obj[2] 

42 else: 

43 title = None 

44 allfields.append((fname, format, num, title)) 

45 # sort by offsets 

46 allfields.sort(key=lambda x: x[2]) 

47 names = [x[0] for x in allfields] 

48 formats = [x[1] for x in allfields] 

49 offsets = [x[2] for x in allfields] 

50 titles = [x[3] for x in allfields] 

51 

52 return names, formats, offsets, titles 

53 

54# Called in PyArray_DescrConverter function when 

55# a dictionary without "names" and "formats" 

56# fields is used as a data-type descriptor. 

57def _usefields(adict, align): 

58 try: 

59 names = adict[-1] 

60 except KeyError: 

61 names = None 

62 if names is None: 

63 names, formats, offsets, titles = _makenames_list(adict, align) 

64 else: 

65 formats = [] 

66 offsets = [] 

67 titles = [] 

68 for name in names: 

69 res = adict[name] 

70 formats.append(res[0]) 

71 offsets.append(res[1]) 

72 if len(res) > 2: 

73 titles.append(res[2]) 

74 else: 

75 titles.append(None) 

76 

77 return dtype({"names": names, 

78 "formats": formats, 

79 "offsets": offsets, 

80 "titles": titles}, align) 

81 

82 

83# construct an array_protocol descriptor list 

84# from the fields attribute of a descriptor 

85# This calls itself recursively but should eventually hit 

86# a descriptor that has no fields and then return 

87# a simple typestring 

88 

89def _array_descr(descriptor): 

90 fields = descriptor.fields 

91 if fields is None: 

92 subdtype = descriptor.subdtype 

93 if subdtype is None: 

94 if descriptor.metadata is None: 

95 return descriptor.str 

96 else: 

97 new = descriptor.metadata.copy() 

98 if new: 

99 return (descriptor.str, new) 

100 else: 

101 return descriptor.str 

102 else: 

103 return (_array_descr(subdtype[0]), subdtype[1]) 

104 

105 names = descriptor.names 

106 ordered_fields = [fields[x] + (x,) for x in names] 

107 result = [] 

108 offset = 0 

109 for field in ordered_fields: 

110 if field[1] > offset: 

111 num = field[1] - offset 

112 result.append(('', f'|V{num}')) 

113 offset += num 

114 elif field[1] < offset: 

115 raise ValueError( 

116 "dtype.descr is not defined for types with overlapping or " 

117 "out-of-order fields") 

118 if len(field) > 3: 

119 name = (field[2], field[3]) 

120 else: 

121 name = field[2] 

122 if field[0].subdtype: 

123 tup = (name, _array_descr(field[0].subdtype[0]), 

124 field[0].subdtype[1]) 

125 else: 

126 tup = (name, _array_descr(field[0])) 

127 offset += field[0].itemsize 

128 result.append(tup) 

129 

130 if descriptor.itemsize > offset: 

131 num = descriptor.itemsize - offset 

132 result.append(('', f'|V{num}')) 

133 

134 return result 

135 

136 

137# format_re was originally from numarray by J. Todd Miller 

138 

139format_re = re.compile(r'(?P<order1>[<>|=]?)' 

140 r'(?P<repeats> *[(]?[ ,0-9]*[)]? *)' 

141 r'(?P<order2>[<>|=]?)' 

142 r'(?P<dtype>[A-Za-z0-9.?]*(?:\[[a-zA-Z0-9,.]+\])?)') 

143sep_re = re.compile(r'\s*,\s*') 

144space_re = re.compile(r'\s+$') 

145 

146# astr is a string (perhaps comma separated) 

147 

148_convorder = {'=': _nbo} 

149 

150def _commastring(astr): 

151 startindex = 0 

152 result = [] 

153 islist = False 

154 while startindex < len(astr): 

155 mo = format_re.match(astr, pos=startindex) 

156 try: 

157 (order1, repeats, order2, dtype) = mo.groups() 

158 except (TypeError, AttributeError): 

159 raise ValueError( 

160 f'format number {len(result)+1} of "{astr}" is not recognized' 

161 ) from None 

162 startindex = mo.end() 

163 # Separator or ending padding 

164 if startindex < len(astr): 

165 if space_re.match(astr, pos=startindex): 

166 startindex = len(astr) 

167 else: 

168 mo = sep_re.match(astr, pos=startindex) 

169 if not mo: 

170 raise ValueError( 

171 'format number %d of "%s" is not recognized' % 

172 (len(result)+1, astr)) 

173 startindex = mo.end() 

174 islist = True 

175 

176 if order2 == '': 

177 order = order1 

178 elif order1 == '': 

179 order = order2 

180 else: 

181 order1 = _convorder.get(order1, order1) 

182 order2 = _convorder.get(order2, order2) 

183 if (order1 != order2): 

184 raise ValueError( 

185 'inconsistent byte-order specification %s and %s' % 

186 (order1, order2)) 

187 order = order1 

188 

189 if order in ('|', '=', _nbo): 

190 order = '' 

191 dtype = order + dtype 

192 if repeats == '': 

193 newitem = dtype 

194 else: 

195 if (repeats[0] == "(" and repeats[-1] == ")" 

196 and repeats[1:-1].strip() != "" 

197 and "," not in repeats): 

198 warnings.warn( 

199 'Passing in a parenthesized single number for repeats ' 

200 'is deprecated; pass either a single number or indicate ' 

201 'a tuple with a comma, like "(2,)".', DeprecationWarning, 

202 stacklevel=2) 

203 newitem = (dtype, ast.literal_eval(repeats)) 

204 

205 result.append(newitem) 

206 

207 return result if islist else result[0] 

208 

209class dummy_ctype: 

210 

211 def __init__(self, cls): 

212 self._cls = cls 

213 

214 def __mul__(self, other): 

215 return self 

216 

217 def __call__(self, *other): 

218 return self._cls(other) 

219 

220 def __eq__(self, other): 

221 return self._cls == other._cls 

222 

223 def __ne__(self, other): 

224 return self._cls != other._cls 

225 

226def _getintp_ctype(): 

227 val = _getintp_ctype.cache 

228 if val is not None: 

229 return val 

230 if ctypes is None: 

231 import numpy as np 

232 val = dummy_ctype(np.intp) 

233 else: 

234 char = dtype('n').char 

235 if char == 'i': 

236 val = ctypes.c_int 

237 elif char == 'l': 

238 val = ctypes.c_long 

239 elif char == 'q': 

240 val = ctypes.c_longlong 

241 else: 

242 val = ctypes.c_long 

243 _getintp_ctype.cache = val 

244 return val 

245 

246 

247_getintp_ctype.cache = None 

248 

249# Used for .ctypes attribute of ndarray 

250 

251class _missing_ctypes: 

252 def cast(self, num, obj): 

253 return num.value 

254 

255 class c_void_p: 

256 def __init__(self, ptr): 

257 self.value = ptr 

258 

259 

260class _ctypes: 

261 def __init__(self, array, ptr=None): 

262 self._arr = array 

263 

264 if ctypes: 

265 self._ctypes = ctypes 

266 self._data = self._ctypes.c_void_p(ptr) 

267 else: 

268 # fake a pointer-like object that holds onto the reference 

269 self._ctypes = _missing_ctypes() 

270 self._data = self._ctypes.c_void_p(ptr) 

271 self._data._objects = array 

272 

273 if self._arr.ndim == 0: 

274 self._zerod = True 

275 else: 

276 self._zerod = False 

277 

278 def data_as(self, obj): 

279 """ 

280 Return the data pointer cast to a particular c-types object. 

281 For example, calling ``self._as_parameter_`` is equivalent to 

282 ``self.data_as(ctypes.c_void_p)``. Perhaps you want to use 

283 the data as a pointer to a ctypes array of floating-point data: 

284 ``self.data_as(ctypes.POINTER(ctypes.c_double))``. 

285 

286 The returned pointer will keep a reference to the array. 

287 """ 

288 # _ctypes.cast function causes a circular reference of self._data in 

289 # self._data._objects. Attributes of self._data cannot be released 

290 # until gc.collect is called. Make a copy of the pointer first then 

291 # let it hold the array reference. This is a workaround to circumvent 

292 # the CPython bug https://bugs.python.org/issue12836. 

293 ptr = self._ctypes.cast(self._data, obj) 

294 ptr._arr = self._arr 

295 return ptr 

296 

297 def shape_as(self, obj): 

298 """ 

299 Return the shape tuple as an array of some other c-types 

300 type. For example: ``self.shape_as(ctypes.c_short)``. 

301 """ 

302 if self._zerod: 

303 return None 

304 return (obj*self._arr.ndim)(*self._arr.shape) 

305 

306 def strides_as(self, obj): 

307 """ 

308 Return the strides tuple as an array of some other 

309 c-types type. For example: ``self.strides_as(ctypes.c_longlong)``. 

310 """ 

311 if self._zerod: 

312 return None 

313 return (obj*self._arr.ndim)(*self._arr.strides) 

314 

315 @property 

316 def data(self): 

317 """ 

318 A pointer to the memory area of the array as a Python integer. 

319 This memory area may contain data that is not aligned, or not in 

320 correct byte-order. The memory area may not even be writeable. 

321 The array flags and data-type of this array should be respected 

322 when passing this attribute to arbitrary C-code to avoid trouble 

323 that can include Python crashing. User Beware! The value of this 

324 attribute is exactly the same as: 

325 ``self._array_interface_['data'][0]``. 

326 

327 Note that unlike ``data_as``, a reference won't be kept to the array: 

328 code like ``ctypes.c_void_p((a + b).ctypes.data)`` will result in a 

329 pointer to a deallocated array, and should be spelt 

330 ``(a + b).ctypes.data_as(ctypes.c_void_p)`` 

331 """ 

332 return self._data.value 

333 

334 @property 

335 def shape(self): 

336 """ 

337 (c_intp*self.ndim): A ctypes array of length self.ndim where 

338 the basetype is the C-integer corresponding to ``dtype('p')`` on this 

339 platform (see `~numpy.ctypeslib.c_intp`). This base-type could be 

340 `ctypes.c_int`, `ctypes.c_long`, or `ctypes.c_longlong` depending on 

341 the platform. The ctypes array contains the shape of 

342 the underlying array. 

343 """ 

344 return self.shape_as(_getintp_ctype()) 

345 

346 @property 

347 def strides(self): 

348 """ 

349 (c_intp*self.ndim): A ctypes array of length self.ndim where 

350 the basetype is the same as for the shape attribute. This ctypes 

351 array contains the strides information from the underlying array. 

352 This strides information is important for showing how many bytes 

353 must be jumped to get to the next element in the array. 

354 """ 

355 return self.strides_as(_getintp_ctype()) 

356 

357 @property 

358 def _as_parameter_(self): 

359 """ 

360 Overrides the ctypes semi-magic method 

361 

362 Enables `c_func(some_array.ctypes)` 

363 """ 

364 return self.data_as(ctypes.c_void_p) 

365 

366 # Numpy 1.21.0, 2021-05-18 

367 

368 def get_data(self): 

369 """Deprecated getter for the `_ctypes.data` property. 

370 

371 .. deprecated:: 1.21 

372 """ 

373 warnings.warn('"get_data" is deprecated. Use "data" instead', 

374 DeprecationWarning, stacklevel=2) 

375 return self.data 

376 

377 def get_shape(self): 

378 """Deprecated getter for the `_ctypes.shape` property. 

379 

380 .. deprecated:: 1.21 

381 """ 

382 warnings.warn('"get_shape" is deprecated. Use "shape" instead', 

383 DeprecationWarning, stacklevel=2) 

384 return self.shape 

385 

386 def get_strides(self): 

387 """Deprecated getter for the `_ctypes.strides` property. 

388 

389 .. deprecated:: 1.21 

390 """ 

391 warnings.warn('"get_strides" is deprecated. Use "strides" instead', 

392 DeprecationWarning, stacklevel=2) 

393 return self.strides 

394 

395 def get_as_parameter(self): 

396 """Deprecated getter for the `_ctypes._as_parameter_` property. 

397 

398 .. deprecated:: 1.21 

399 """ 

400 warnings.warn( 

401 '"get_as_parameter" is deprecated. Use "_as_parameter_" instead', 

402 DeprecationWarning, stacklevel=2, 

403 ) 

404 return self._as_parameter_ 

405 

406 

407def _newnames(datatype, order): 

408 """ 

409 Given a datatype and an order object, return a new names tuple, with the 

410 order indicated 

411 """ 

412 oldnames = datatype.names 

413 nameslist = list(oldnames) 

414 if isinstance(order, str): 

415 order = [order] 

416 seen = set() 

417 if isinstance(order, (list, tuple)): 

418 for name in order: 

419 try: 

420 nameslist.remove(name) 

421 except ValueError: 

422 if name in seen: 

423 raise ValueError(f"duplicate field name: {name}") from None 

424 else: 

425 raise ValueError(f"unknown field name: {name}") from None 

426 seen.add(name) 

427 return tuple(list(order) + nameslist) 

428 raise ValueError(f"unsupported order value: {order}") 

429 

430def _copy_fields(ary): 

431 """Return copy of structured array with padding between fields removed. 

432 

433 Parameters 

434 ---------- 

435 ary : ndarray 

436 Structured array from which to remove padding bytes 

437 

438 Returns 

439 ------- 

440 ary_copy : ndarray 

441 Copy of ary with padding bytes removed 

442 """ 

443 dt = ary.dtype 

444 copy_dtype = {'names': dt.names, 

445 'formats': [dt.fields[name][0] for name in dt.names]} 

446 return array(ary, dtype=copy_dtype, copy=True) 

447 

448def _promote_fields(dt1, dt2): 

449 """ Perform type promotion for two structured dtypes. 

450 

451 Parameters 

452 ---------- 

453 dt1 : structured dtype 

454 First dtype. 

455 dt2 : structured dtype 

456 Second dtype. 

457 

458 Returns 

459 ------- 

460 out : dtype 

461 The promoted dtype 

462 

463 Notes 

464 ----- 

465 If one of the inputs is aligned, the result will be. The titles of 

466 both descriptors must match (point to the same field). 

467 """ 

468 # Both must be structured and have the same names in the same order 

469 if (dt1.names is None or dt2.names is None) or dt1.names != dt2.names: 

470 raise DTypePromotionError( 

471 f"field names `{dt1.names}` and `{dt2.names}` mismatch.") 

472 

473 # if both are identical, we can (maybe!) just return the same dtype. 

474 identical = dt1 is dt2 

475 new_fields = [] 

476 for name in dt1.names: 

477 field1 = dt1.fields[name] 

478 field2 = dt2.fields[name] 

479 new_descr = promote_types(field1[0], field2[0]) 

480 identical = identical and new_descr is field1[0] 

481 

482 # Check that the titles match (if given): 

483 if field1[2:] != field2[2:]: 

484 raise DTypePromotionError( 

485 f"field titles of field '{name}' mismatch") 

486 if len(field1) == 2: 

487 new_fields.append((name, new_descr)) 

488 else: 

489 new_fields.append(((field1[2], name), new_descr)) 

490 

491 res = dtype(new_fields, align=dt1.isalignedstruct or dt2.isalignedstruct) 

492 

493 # Might as well preserve identity (and metadata) if the dtype is identical 

494 # and the itemsize, offsets are also unmodified. This could probably be 

495 # sped up, but also probably just be removed entirely. 

496 if identical and res.itemsize == dt1.itemsize: 

497 for name in dt1.names: 

498 if dt1.fields[name][1] != res.fields[name][1]: 

499 return res # the dtype changed. 

500 return dt1 

501 

502 return res 

503 

504 

505def _getfield_is_safe(oldtype, newtype, offset): 

506 """ Checks safety of getfield for object arrays. 

507 

508 As in _view_is_safe, we need to check that memory containing objects is not 

509 reinterpreted as a non-object datatype and vice versa. 

510 

511 Parameters 

512 ---------- 

513 oldtype : data-type 

514 Data type of the original ndarray. 

515 newtype : data-type 

516 Data type of the field being accessed by ndarray.getfield 

517 offset : int 

518 Offset of the field being accessed by ndarray.getfield 

519 

520 Raises 

521 ------ 

522 TypeError 

523 If the field access is invalid 

524 

525 """ 

526 if newtype.hasobject or oldtype.hasobject: 

527 if offset == 0 and newtype == oldtype: 

528 return 

529 if oldtype.names is not None: 

530 for name in oldtype.names: 

531 if (oldtype.fields[name][1] == offset and 

532 oldtype.fields[name][0] == newtype): 

533 return 

534 raise TypeError("Cannot get/set field of an object array") 

535 return 

536 

537def _view_is_safe(oldtype, newtype): 

538 """ Checks safety of a view involving object arrays, for example when 

539 doing:: 

540 

541 np.zeros(10, dtype=oldtype).view(newtype) 

542 

543 Parameters 

544 ---------- 

545 oldtype : data-type 

546 Data type of original ndarray 

547 newtype : data-type 

548 Data type of the view 

549 

550 Raises 

551 ------ 

552 TypeError 

553 If the new type is incompatible with the old type. 

554 

555 """ 

556 

557 # if the types are equivalent, there is no problem. 

558 # for example: dtype((np.record, 'i4,i4')) == dtype((np.void, 'i4,i4')) 

559 if oldtype == newtype: 

560 return 

561 

562 if newtype.hasobject or oldtype.hasobject: 

563 raise TypeError("Cannot change data-type for array of references.") 

564 return 

565 

566 

567# Given a string containing a PEP 3118 format specifier, 

568# construct a NumPy dtype 

569 

570_pep3118_native_map = { 

571 '?': '?', 

572 'c': 'S1', 

573 'b': 'b', 

574 'B': 'B', 

575 'h': 'h', 

576 'H': 'H', 

577 'i': 'i', 

578 'I': 'I', 

579 'l': 'l', 

580 'L': 'L', 

581 'q': 'q', 

582 'Q': 'Q', 

583 'e': 'e', 

584 'f': 'f', 

585 'd': 'd', 

586 'g': 'g', 

587 'Zf': 'F', 

588 'Zd': 'D', 

589 'Zg': 'G', 

590 's': 'S', 

591 'w': 'U', 

592 'O': 'O', 

593 'x': 'V', # padding 

594} 

595_pep3118_native_typechars = ''.join(_pep3118_native_map.keys()) 

596 

597_pep3118_standard_map = { 

598 '?': '?', 

599 'c': 'S1', 

600 'b': 'b', 

601 'B': 'B', 

602 'h': 'i2', 

603 'H': 'u2', 

604 'i': 'i4', 

605 'I': 'u4', 

606 'l': 'i4', 

607 'L': 'u4', 

608 'q': 'i8', 

609 'Q': 'u8', 

610 'e': 'f2', 

611 'f': 'f', 

612 'd': 'd', 

613 'Zf': 'F', 

614 'Zd': 'D', 

615 's': 'S', 

616 'w': 'U', 

617 'O': 'O', 

618 'x': 'V', # padding 

619} 

620_pep3118_standard_typechars = ''.join(_pep3118_standard_map.keys()) 

621 

622_pep3118_unsupported_map = { 

623 'u': 'UCS-2 strings', 

624 '&': 'pointers', 

625 't': 'bitfields', 

626 'X': 'function pointers', 

627} 

628 

629class _Stream: 

630 def __init__(self, s): 

631 self.s = s 

632 self.byteorder = '@' 

633 

634 def advance(self, n): 

635 res = self.s[:n] 

636 self.s = self.s[n:] 

637 return res 

638 

639 def consume(self, c): 

640 if self.s[:len(c)] == c: 

641 self.advance(len(c)) 

642 return True 

643 return False 

644 

645 def consume_until(self, c): 

646 if callable(c): 

647 i = 0 

648 while i < len(self.s) and not c(self.s[i]): 

649 i = i + 1 

650 return self.advance(i) 

651 else: 

652 i = self.s.index(c) 

653 res = self.advance(i) 

654 self.advance(len(c)) 

655 return res 

656 

657 @property 

658 def next(self): 

659 return self.s[0] 

660 

661 def __bool__(self): 

662 return bool(self.s) 

663 

664 

665def _dtype_from_pep3118(spec): 

666 stream = _Stream(spec) 

667 dtype, align = __dtype_from_pep3118(stream, is_subdtype=False) 

668 return dtype 

669 

670def __dtype_from_pep3118(stream, is_subdtype): 

671 field_spec = dict( 

672 names=[], 

673 formats=[], 

674 offsets=[], 

675 itemsize=0 

676 ) 

677 offset = 0 

678 common_alignment = 1 

679 is_padding = False 

680 

681 # Parse spec 

682 while stream: 

683 value = None 

684 

685 # End of structure, bail out to upper level 

686 if stream.consume('}'): 

687 break 

688 

689 # Sub-arrays (1) 

690 shape = None 

691 if stream.consume('('): 

692 shape = stream.consume_until(')') 

693 shape = tuple(map(int, shape.split(','))) 

694 

695 # Byte order 

696 if stream.next in ('@', '=', '<', '>', '^', '!'): 

697 byteorder = stream.advance(1) 

698 if byteorder == '!': 

699 byteorder = '>' 

700 stream.byteorder = byteorder 

701 

702 # Byte order characters also control native vs. standard type sizes 

703 if stream.byteorder in ('@', '^'): 

704 type_map = _pep3118_native_map 

705 type_map_chars = _pep3118_native_typechars 

706 else: 

707 type_map = _pep3118_standard_map 

708 type_map_chars = _pep3118_standard_typechars 

709 

710 # Item sizes 

711 itemsize_str = stream.consume_until(lambda c: not c.isdigit()) 

712 if itemsize_str: 

713 itemsize = int(itemsize_str) 

714 else: 

715 itemsize = 1 

716 

717 # Data types 

718 is_padding = False 

719 

720 if stream.consume('T{'): 

721 value, align = __dtype_from_pep3118( 

722 stream, is_subdtype=True) 

723 elif stream.next in type_map_chars: 

724 if stream.next == 'Z': 

725 typechar = stream.advance(2) 

726 else: 

727 typechar = stream.advance(1) 

728 

729 is_padding = (typechar == 'x') 

730 dtypechar = type_map[typechar] 

731 if dtypechar in 'USV': 

732 dtypechar += '%d' % itemsize 

733 itemsize = 1 

734 numpy_byteorder = {'@': '=', '^': '='}.get( 

735 stream.byteorder, stream.byteorder) 

736 value = dtype(numpy_byteorder + dtypechar) 

737 align = value.alignment 

738 elif stream.next in _pep3118_unsupported_map: 

739 desc = _pep3118_unsupported_map[stream.next] 

740 raise NotImplementedError( 

741 "Unrepresentable PEP 3118 data type {!r} ({})" 

742 .format(stream.next, desc)) 

743 else: 

744 raise ValueError( 

745 "Unknown PEP 3118 data type specifier %r" % stream.s 

746 ) 

747 

748 # 

749 # Native alignment may require padding 

750 # 

751 # Here we assume that the presence of a '@' character implicitly 

752 # implies that the start of the array is *already* aligned. 

753 # 

754 extra_offset = 0 

755 if stream.byteorder == '@': 

756 start_padding = (-offset) % align 

757 intra_padding = (-value.itemsize) % align 

758 

759 offset += start_padding 

760 

761 if intra_padding != 0: 

762 if itemsize > 1 or (shape is not None and _prod(shape) > 1): 

763 # Inject internal padding to the end of the sub-item 

764 value = _add_trailing_padding(value, intra_padding) 

765 else: 

766 # We can postpone the injection of internal padding, 

767 # as the item appears at most once 

768 extra_offset += intra_padding 

769 

770 # Update common alignment 

771 common_alignment = _lcm(align, common_alignment) 

772 

773 # Convert itemsize to sub-array 

774 if itemsize != 1: 

775 value = dtype((value, (itemsize,))) 

776 

777 # Sub-arrays (2) 

778 if shape is not None: 

779 value = dtype((value, shape)) 

780 

781 # Field name 

782 if stream.consume(':'): 

783 name = stream.consume_until(':') 

784 else: 

785 name = None 

786 

787 if not (is_padding and name is None): 

788 if name is not None and name in field_spec['names']: 

789 raise RuntimeError( 

790 f"Duplicate field name '{name}' in PEP3118 format" 

791 ) 

792 field_spec['names'].append(name) 

793 field_spec['formats'].append(value) 

794 field_spec['offsets'].append(offset) 

795 

796 offset += value.itemsize 

797 offset += extra_offset 

798 

799 field_spec['itemsize'] = offset 

800 

801 # extra final padding for aligned types 

802 if stream.byteorder == '@': 

803 field_spec['itemsize'] += (-offset) % common_alignment 

804 

805 # Check if this was a simple 1-item type, and unwrap it 

806 if (field_spec['names'] == [None] 

807 and field_spec['offsets'][0] == 0 

808 and field_spec['itemsize'] == field_spec['formats'][0].itemsize 

809 and not is_subdtype): 

810 ret = field_spec['formats'][0] 

811 else: 

812 _fix_names(field_spec) 

813 ret = dtype(field_spec) 

814 

815 # Finished 

816 return ret, common_alignment 

817 

818def _fix_names(field_spec): 

819 """ Replace names which are None with the next unused f%d name """ 

820 names = field_spec['names'] 

821 for i, name in enumerate(names): 

822 if name is not None: 

823 continue 

824 

825 j = 0 

826 while True: 

827 name = f'f{j}' 

828 if name not in names: 

829 break 

830 j = j + 1 

831 names[i] = name 

832 

833def _add_trailing_padding(value, padding): 

834 """Inject the specified number of padding bytes at the end of a dtype""" 

835 if value.fields is None: 

836 field_spec = dict( 

837 names=['f0'], 

838 formats=[value], 

839 offsets=[0], 

840 itemsize=value.itemsize 

841 ) 

842 else: 

843 fields = value.fields 

844 names = value.names 

845 field_spec = dict( 

846 names=names, 

847 formats=[fields[name][0] for name in names], 

848 offsets=[fields[name][1] for name in names], 

849 itemsize=value.itemsize 

850 ) 

851 

852 field_spec['itemsize'] += padding 

853 return dtype(field_spec) 

854 

855def _prod(a): 

856 p = 1 

857 for x in a: 

858 p *= x 

859 return p 

860 

861def _gcd(a, b): 

862 """Calculate the greatest common divisor of a and b""" 

863 while b: 

864 a, b = b, a % b 

865 return a 

866 

867def _lcm(a, b): 

868 return a // _gcd(a, b) * b 

869 

870def array_ufunc_errmsg_formatter(dummy, ufunc, method, *inputs, **kwargs): 

871 """ Format the error message for when __array_ufunc__ gives up. """ 

872 args_string = ', '.join(['{!r}'.format(arg) for arg in inputs] + 

873 ['{}={!r}'.format(k, v) 

874 for k, v in kwargs.items()]) 

875 args = inputs + kwargs.get('out', ()) 

876 types_string = ', '.join(repr(type(arg).__name__) for arg in args) 

877 return ('operand type(s) all returned NotImplemented from ' 

878 '__array_ufunc__({!r}, {!r}, {}): {}' 

879 .format(ufunc, method, args_string, types_string)) 

880 

881 

882def array_function_errmsg_formatter(public_api, types): 

883 """ Format the error message for when __array_ufunc__ gives up. """ 

884 func_name = '{}.{}'.format(public_api.__module__, public_api.__name__) 

885 return ("no implementation found for '{}' on types that implement " 

886 '__array_function__: {}'.format(func_name, list(types))) 

887 

888 

889def _ufunc_doc_signature_formatter(ufunc): 

890 """ 

891 Builds a signature string which resembles PEP 457 

892 

893 This is used to construct the first line of the docstring 

894 """ 

895 

896 # input arguments are simple 

897 if ufunc.nin == 1: 

898 in_args = 'x' 

899 else: 

900 in_args = ', '.join(f'x{i+1}' for i in range(ufunc.nin)) 

901 

902 # output arguments are both keyword or positional 

903 if ufunc.nout == 0: 

904 out_args = ', /, out=()' 

905 elif ufunc.nout == 1: 

906 out_args = ', /, out=None' 

907 else: 

908 out_args = '[, {positional}], / [, out={default}]'.format( 

909 positional=', '.join( 

910 'out{}'.format(i+1) for i in range(ufunc.nout)), 

911 default=repr((None,)*ufunc.nout) 

912 ) 

913 

914 # keyword only args depend on whether this is a gufunc 

915 kwargs = ( 

916 ", casting='same_kind'" 

917 ", order='K'" 

918 ", dtype=None" 

919 ", subok=True" 

920 ) 

921 

922 # NOTE: gufuncs may or may not support the `axis` parameter 

923 if ufunc.signature is None: 

924 kwargs = f", where=True{kwargs}[, signature]" 

925 else: 

926 kwargs += "[, signature, axes, axis]" 

927 

928 # join all the parts together 

929 return '{name}({in_args}{out_args}, *{kwargs})'.format( 

930 name=ufunc.__name__, 

931 in_args=in_args, 

932 out_args=out_args, 

933 kwargs=kwargs 

934 ) 

935 

936 

937def npy_ctypes_check(cls): 

938 # determine if a class comes from ctypes, in order to work around 

939 # a bug in the buffer protocol for those objects, bpo-10746 

940 try: 

941 # ctypes class are new-style, so have an __mro__. This probably fails 

942 # for ctypes classes with multiple inheritance. 

943 if IS_PYPY: 

944 # (..., _ctypes.basics._CData, Bufferable, object) 

945 ctype_base = cls.__mro__[-3] 

946 else: 

947 # # (..., _ctypes._CData, object) 

948 ctype_base = cls.__mro__[-2] 

949 # right now, they're part of the _ctypes module 

950 return '_ctypes' in ctype_base.__module__ 

951 except Exception: 

952 return False 

953 

954# used to handle the _NoValue default argument for na_object 

955# in the C implementation of the __reduce__ method for stringdtype 

956def _convert_to_stringdtype_kwargs(coerce, na_object=_NoValue): 

957 if na_object is _NoValue: 

958 return StringDType(coerce=coerce) 

959 return StringDType(coerce=coerce, na_object=na_object)