Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/numpy/lib/npyio.py: 9%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

786 statements  

1import os 

2import re 

3import functools 

4import itertools 

5import warnings 

6import weakref 

7import contextlib 

8import operator 

9from operator import itemgetter, index as opindex, methodcaller 

10from collections.abc import Mapping 

11 

12import numpy as np 

13from . import format 

14from ._datasource import DataSource 

15from numpy.core import overrides 

16from numpy.core.multiarray import packbits, unpackbits 

17from numpy.core._multiarray_umath import _load_from_filelike 

18from numpy.core.overrides import set_array_function_like_doc, set_module 

19from ._iotools import ( 

20 LineSplitter, NameValidator, StringConverter, ConverterError, 

21 ConverterLockError, ConversionWarning, _is_string_like, 

22 has_nested_fields, flatten_dtype, easy_dtype, _decode_line 

23 ) 

24 

25from numpy.compat import ( 

26 asbytes, asstr, asunicode, os_fspath, os_PathLike, 

27 pickle 

28 ) 

29 

30 

31__all__ = [ 

32 'savetxt', 'loadtxt', 'genfromtxt', 

33 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez', 

34 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource' 

35 ] 

36 

37 

38array_function_dispatch = functools.partial( 

39 overrides.array_function_dispatch, module='numpy') 

40 

41 

42class BagObj: 

43 """ 

44 BagObj(obj) 

45 

46 Convert attribute look-ups to getitems on the object passed in. 

47 

48 Parameters 

49 ---------- 

50 obj : class instance 

51 Object on which attribute look-up is performed. 

52 

53 Examples 

54 -------- 

55 >>> from numpy.lib.npyio import BagObj as BO 

56 >>> class BagDemo: 

57 ... def __getitem__(self, key): # An instance of BagObj(BagDemo) 

58 ... # will call this method when any 

59 ... # attribute look-up is required 

60 ... result = "Doesn't matter what you want, " 

61 ... return result + "you're gonna get this" 

62 ... 

63 >>> demo_obj = BagDemo() 

64 >>> bagobj = BO(demo_obj) 

65 >>> bagobj.hello_there 

66 "Doesn't matter what you want, you're gonna get this" 

67 >>> bagobj.I_can_be_anything 

68 "Doesn't matter what you want, you're gonna get this" 

69 

70 """ 

71 

72 def __init__(self, obj): 

73 # Use weakref to make NpzFile objects collectable by refcount 

74 self._obj = weakref.proxy(obj) 

75 

76 def __getattribute__(self, key): 

77 try: 

78 return object.__getattribute__(self, '_obj')[key] 

79 except KeyError: 

80 raise AttributeError(key) from None 

81 

82 def __dir__(self): 

83 """ 

84 Enables dir(bagobj) to list the files in an NpzFile. 

85 

86 This also enables tab-completion in an interpreter or IPython. 

87 """ 

88 return list(object.__getattribute__(self, '_obj').keys()) 

89 

90 

91def zipfile_factory(file, *args, **kwargs): 

92 """ 

93 Create a ZipFile. 

94 

95 Allows for Zip64, and the `file` argument can accept file, str, or 

96 pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile 

97 constructor. 

98 """ 

99 if not hasattr(file, 'read'): 

100 file = os_fspath(file) 

101 import zipfile 

102 kwargs['allowZip64'] = True 

103 return zipfile.ZipFile(file, *args, **kwargs) 

104 

105 

106class NpzFile(Mapping): 

107 """ 

108 NpzFile(fid) 

109 

110 A dictionary-like object with lazy-loading of files in the zipped 

111 archive provided on construction. 

112 

113 `NpzFile` is used to load files in the NumPy ``.npz`` data archive 

114 format. It assumes that files in the archive have a ``.npy`` extension, 

115 other files are ignored. 

116 

117 The arrays and file strings are lazily loaded on either 

118 getitem access using ``obj['key']`` or attribute lookup using 

119 ``obj.f.key``. A list of all files (without ``.npy`` extensions) can 

120 be obtained with ``obj.files`` and the ZipFile object itself using 

121 ``obj.zip``. 

122 

123 Attributes 

124 ---------- 

125 files : list of str 

126 List of all files in the archive with a ``.npy`` extension. 

127 zip : ZipFile instance 

128 The ZipFile object initialized with the zipped archive. 

129 f : BagObj instance 

130 An object on which attribute can be performed as an alternative 

131 to getitem access on the `NpzFile` instance itself. 

132 allow_pickle : bool, optional 

133 Allow loading pickled data. Default: False 

134 

135 .. versionchanged:: 1.16.3 

136 Made default False in response to CVE-2019-6446. 

137 

138 pickle_kwargs : dict, optional 

139 Additional keyword arguments to pass on to pickle.load. 

140 These are only useful when loading object arrays saved on 

141 Python 2 when using Python 3. 

142 max_header_size : int, optional 

143 Maximum allowed size of the header. Large headers may not be safe 

144 to load securely and thus require explicitly passing a larger value. 

145 See :py:meth:`ast.literal_eval()` for details. 

146 This option is ignored when `allow_pickle` is passed. In that case 

147 the file is by definition trusted and the limit is unnecessary. 

148 

149 Parameters 

150 ---------- 

151 fid : file or str 

152 The zipped archive to open. This is either a file-like object 

153 or a string containing the path to the archive. 

154 own_fid : bool, optional 

155 Whether NpzFile should close the file handle. 

156 Requires that `fid` is a file-like object. 

157 

158 Examples 

159 -------- 

160 >>> from tempfile import TemporaryFile 

161 >>> outfile = TemporaryFile() 

162 >>> x = np.arange(10) 

163 >>> y = np.sin(x) 

164 >>> np.savez(outfile, x=x, y=y) 

165 >>> _ = outfile.seek(0) 

166 

167 >>> npz = np.load(outfile) 

168 >>> isinstance(npz, np.lib.npyio.NpzFile) 

169 True 

170 >>> sorted(npz.files) 

171 ['x', 'y'] 

172 >>> npz['x'] # getitem access 

173 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

174 >>> npz.f.x # attribute lookup 

175 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

176 

177 """ 

178 # Make __exit__ safe if zipfile_factory raises an exception 

179 zip = None 

180 fid = None 

181 

182 def __init__(self, fid, own_fid=False, allow_pickle=False, 

183 pickle_kwargs=None, *, 

184 max_header_size=format._MAX_HEADER_SIZE): 

185 # Import is postponed to here since zipfile depends on gzip, an 

186 # optional component of the so-called standard library. 

187 _zip = zipfile_factory(fid) 

188 self._files = _zip.namelist() 

189 self.files = [] 

190 self.allow_pickle = allow_pickle 

191 self.max_header_size = max_header_size 

192 self.pickle_kwargs = pickle_kwargs 

193 for x in self._files: 

194 if x.endswith('.npy'): 

195 self.files.append(x[:-4]) 

196 else: 

197 self.files.append(x) 

198 self.zip = _zip 

199 self.f = BagObj(self) 

200 if own_fid: 

201 self.fid = fid 

202 

203 def __enter__(self): 

204 return self 

205 

206 def __exit__(self, exc_type, exc_value, traceback): 

207 self.close() 

208 

209 def close(self): 

210 """ 

211 Close the file. 

212 

213 """ 

214 if self.zip is not None: 

215 self.zip.close() 

216 self.zip = None 

217 if self.fid is not None: 

218 self.fid.close() 

219 self.fid = None 

220 self.f = None # break reference cycle 

221 

222 def __del__(self): 

223 self.close() 

224 

225 # Implement the Mapping ABC 

226 def __iter__(self): 

227 return iter(self.files) 

228 

229 def __len__(self): 

230 return len(self.files) 

231 

232 def __getitem__(self, key): 

233 # FIXME: This seems like it will copy strings around 

234 # more than is strictly necessary. The zipfile 

235 # will read the string and then 

236 # the format.read_array will copy the string 

237 # to another place in memory. 

238 # It would be better if the zipfile could read 

239 # (or at least uncompress) the data 

240 # directly into the array memory. 

241 member = False 

242 if key in self._files: 

243 member = True 

244 elif key in self.files: 

245 member = True 

246 key += '.npy' 

247 if member: 

248 bytes = self.zip.open(key) 

249 magic = bytes.read(len(format.MAGIC_PREFIX)) 

250 bytes.close() 

251 if magic == format.MAGIC_PREFIX: 

252 bytes = self.zip.open(key) 

253 return format.read_array(bytes, 

254 allow_pickle=self.allow_pickle, 

255 pickle_kwargs=self.pickle_kwargs, 

256 max_header_size=self.max_header_size) 

257 else: 

258 return self.zip.read(key) 

259 else: 

260 raise KeyError("%s is not a file in the archive" % key) 

261 

262 

263@set_module('numpy') 

264def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True, 

265 encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE): 

266 """ 

267 Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files. 

268 

269 .. warning:: Loading files that contain object arrays uses the ``pickle`` 

270 module, which is not secure against erroneous or maliciously 

271 constructed data. Consider passing ``allow_pickle=False`` to 

272 load data that is known not to contain object arrays for the 

273 safer handling of untrusted sources. 

274 

275 Parameters 

276 ---------- 

277 file : file-like object, string, or pathlib.Path 

278 The file to read. File-like objects must support the 

279 ``seek()`` and ``read()`` methods and must always 

280 be opened in binary mode. Pickled files require that the 

281 file-like object support the ``readline()`` method as well. 

282 mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional 

283 If not None, then memory-map the file, using the given mode (see 

284 `numpy.memmap` for a detailed description of the modes). A 

285 memory-mapped array is kept on disk. However, it can be accessed 

286 and sliced like any ndarray. Memory mapping is especially useful 

287 for accessing small fragments of large files without reading the 

288 entire file into memory. 

289 allow_pickle : bool, optional 

290 Allow loading pickled object arrays stored in npy files. Reasons for 

291 disallowing pickles include security, as loading pickled data can 

292 execute arbitrary code. If pickles are disallowed, loading object 

293 arrays will fail. Default: False 

294 

295 .. versionchanged:: 1.16.3 

296 Made default False in response to CVE-2019-6446. 

297 

298 fix_imports : bool, optional 

299 Only useful when loading Python 2 generated pickled files on Python 3, 

300 which includes npy/npz files containing object arrays. If `fix_imports` 

301 is True, pickle will try to map the old Python 2 names to the new names 

302 used in Python 3. 

303 encoding : str, optional 

304 What encoding to use when reading Python 2 strings. Only useful when 

305 loading Python 2 generated pickled files in Python 3, which includes 

306 npy/npz files containing object arrays. Values other than 'latin1', 

307 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical 

308 data. Default: 'ASCII' 

309 max_header_size : int, optional 

310 Maximum allowed size of the header. Large headers may not be safe 

311 to load securely and thus require explicitly passing a larger value. 

312 See :py:meth:`ast.literal_eval()` for details. 

313 This option is ignored when `allow_pickle` is passed. In that case 

314 the file is by definition trusted and the limit is unnecessary. 

315 

316 Returns 

317 ------- 

318 result : array, tuple, dict, etc. 

319 Data stored in the file. For ``.npz`` files, the returned instance 

320 of NpzFile class must be closed to avoid leaking file descriptors. 

321 

322 Raises 

323 ------ 

324 OSError 

325 If the input file does not exist or cannot be read. 

326 UnpicklingError 

327 If ``allow_pickle=True``, but the file cannot be loaded as a pickle. 

328 ValueError 

329 The file contains an object array, but ``allow_pickle=False`` given. 

330 

331 See Also 

332 -------- 

333 save, savez, savez_compressed, loadtxt 

334 memmap : Create a memory-map to an array stored in a file on disk. 

335 lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file. 

336 

337 Notes 

338 ----- 

339 - If the file contains pickle data, then whatever object is stored 

340 in the pickle is returned. 

341 - If the file is a ``.npy`` file, then a single array is returned. 

342 - If the file is a ``.npz`` file, then a dictionary-like object is 

343 returned, containing ``{filename: array}`` key-value pairs, one for 

344 each file in the archive. 

345 - If the file is a ``.npz`` file, the returned value supports the 

346 context manager protocol in a similar fashion to the open function:: 

347 

348 with load('foo.npz') as data: 

349 a = data['a'] 

350 

351 The underlying file descriptor is closed when exiting the 'with' 

352 block. 

353 

354 Examples 

355 -------- 

356 Store data to disk, and load it again: 

357 

358 >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]])) 

359 >>> np.load('/tmp/123.npy') 

360 array([[1, 2, 3], 

361 [4, 5, 6]]) 

362 

363 Store compressed data to disk, and load it again: 

364 

365 >>> a=np.array([[1, 2, 3], [4, 5, 6]]) 

366 >>> b=np.array([1, 2]) 

367 >>> np.savez('/tmp/123.npz', a=a, b=b) 

368 >>> data = np.load('/tmp/123.npz') 

369 >>> data['a'] 

370 array([[1, 2, 3], 

371 [4, 5, 6]]) 

372 >>> data['b'] 

373 array([1, 2]) 

374 >>> data.close() 

375 

376 Mem-map the stored array, and then access the second row 

377 directly from disk: 

378 

379 >>> X = np.load('/tmp/123.npy', mmap_mode='r') 

380 >>> X[1, :] 

381 memmap([4, 5, 6]) 

382 

383 """ 

384 if encoding not in ('ASCII', 'latin1', 'bytes'): 

385 # The 'encoding' value for pickle also affects what encoding 

386 # the serialized binary data of NumPy arrays is loaded 

387 # in. Pickle does not pass on the encoding information to 

388 # NumPy. The unpickling code in numpy.core.multiarray is 

389 # written to assume that unicode data appearing where binary 

390 # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'. 

391 # 

392 # Other encoding values can corrupt binary data, and we 

393 # purposefully disallow them. For the same reason, the errors= 

394 # argument is not exposed, as values other than 'strict' 

395 # result can similarly silently corrupt numerical data. 

396 raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'") 

397 

398 pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports) 

399 

400 with contextlib.ExitStack() as stack: 

401 if hasattr(file, 'read'): 

402 fid = file 

403 own_fid = False 

404 else: 

405 fid = stack.enter_context(open(os_fspath(file), "rb")) 

406 own_fid = True 

407 

408 # Code to distinguish from NumPy binary files and pickles. 

409 _ZIP_PREFIX = b'PK\x03\x04' 

410 _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this 

411 N = len(format.MAGIC_PREFIX) 

412 magic = fid.read(N) 

413 # If the file size is less than N, we need to make sure not 

414 # to seek past the beginning of the file 

415 fid.seek(-min(N, len(magic)), 1) # back-up 

416 if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX): 

417 # zip-file (assume .npz) 

418 # Potentially transfer file ownership to NpzFile 

419 stack.pop_all() 

420 ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle, 

421 pickle_kwargs=pickle_kwargs, 

422 max_header_size=max_header_size) 

423 return ret 

424 elif magic == format.MAGIC_PREFIX: 

425 # .npy file 

426 if mmap_mode: 

427 if allow_pickle: 

428 max_header_size = 2**64 

429 return format.open_memmap(file, mode=mmap_mode, 

430 max_header_size=max_header_size) 

431 else: 

432 return format.read_array(fid, allow_pickle=allow_pickle, 

433 pickle_kwargs=pickle_kwargs, 

434 max_header_size=max_header_size) 

435 else: 

436 # Try a pickle 

437 if not allow_pickle: 

438 raise ValueError("Cannot load file containing pickled data " 

439 "when allow_pickle=False") 

440 try: 

441 return pickle.load(fid, **pickle_kwargs) 

442 except Exception as e: 

443 raise pickle.UnpicklingError( 

444 f"Failed to interpret file {file!r} as a pickle") from e 

445 

446 

447def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None): 

448 return (arr,) 

449 

450 

451@array_function_dispatch(_save_dispatcher) 

452def save(file, arr, allow_pickle=True, fix_imports=True): 

453 """ 

454 Save an array to a binary file in NumPy ``.npy`` format. 

455 

456 Parameters 

457 ---------- 

458 file : file, str, or pathlib.Path 

459 File or filename to which the data is saved. If file is a file-object, 

460 then the filename is unchanged. If file is a string or Path, a ``.npy`` 

461 extension will be appended to the filename if it does not already 

462 have one. 

463 arr : array_like 

464 Array data to be saved. 

465 allow_pickle : bool, optional 

466 Allow saving object arrays using Python pickles. Reasons for disallowing 

467 pickles include security (loading pickled data can execute arbitrary 

468 code) and portability (pickled objects may not be loadable on different 

469 Python installations, for example if the stored objects require libraries 

470 that are not available, and not all pickled data is compatible between 

471 Python 2 and Python 3). 

472 Default: True 

473 fix_imports : bool, optional 

474 Only useful in forcing objects in object arrays on Python 3 to be 

475 pickled in a Python 2 compatible way. If `fix_imports` is True, pickle 

476 will try to map the new Python 3 names to the old module names used in 

477 Python 2, so that the pickle data stream is readable with Python 2. 

478 

479 See Also 

480 -------- 

481 savez : Save several arrays into a ``.npz`` archive 

482 savetxt, load 

483 

484 Notes 

485 ----- 

486 For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. 

487 

488 Any data saved to the file is appended to the end of the file. 

489 

490 Examples 

491 -------- 

492 >>> from tempfile import TemporaryFile 

493 >>> outfile = TemporaryFile() 

494 

495 >>> x = np.arange(10) 

496 >>> np.save(outfile, x) 

497 

498 >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file 

499 >>> np.load(outfile) 

500 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

501 

502 

503 >>> with open('test.npy', 'wb') as f: 

504 ... np.save(f, np.array([1, 2])) 

505 ... np.save(f, np.array([1, 3])) 

506 >>> with open('test.npy', 'rb') as f: 

507 ... a = np.load(f) 

508 ... b = np.load(f) 

509 >>> print(a, b) 

510 # [1 2] [1 3] 

511 """ 

512 if hasattr(file, 'write'): 

513 file_ctx = contextlib.nullcontext(file) 

514 else: 

515 file = os_fspath(file) 

516 if not file.endswith('.npy'): 

517 file = file + '.npy' 

518 file_ctx = open(file, "wb") 

519 

520 with file_ctx as fid: 

521 arr = np.asanyarray(arr) 

522 format.write_array(fid, arr, allow_pickle=allow_pickle, 

523 pickle_kwargs=dict(fix_imports=fix_imports)) 

524 

525 

526def _savez_dispatcher(file, *args, **kwds): 

527 yield from args 

528 yield from kwds.values() 

529 

530 

531@array_function_dispatch(_savez_dispatcher) 

532def savez(file, *args, **kwds): 

533 """Save several arrays into a single file in uncompressed ``.npz`` format. 

534 

535 Provide arrays as keyword arguments to store them under the 

536 corresponding name in the output file: ``savez(fn, x=x, y=y)``. 

537 

538 If arrays are specified as positional arguments, i.e., ``savez(fn, 

539 x, y)``, their names will be `arr_0`, `arr_1`, etc. 

540 

541 Parameters 

542 ---------- 

543 file : str or file 

544 Either the filename (string) or an open file (file-like object) 

545 where the data will be saved. If file is a string or a Path, the 

546 ``.npz`` extension will be appended to the filename if it is not 

547 already there. 

548 args : Arguments, optional 

549 Arrays to save to the file. Please use keyword arguments (see 

550 `kwds` below) to assign names to arrays. Arrays specified as 

551 args will be named "arr_0", "arr_1", and so on. 

552 kwds : Keyword arguments, optional 

553 Arrays to save to the file. Each array will be saved to the 

554 output file with its corresponding keyword name. 

555 

556 Returns 

557 ------- 

558 None 

559 

560 See Also 

561 -------- 

562 save : Save a single array to a binary file in NumPy format. 

563 savetxt : Save an array to a file as plain text. 

564 savez_compressed : Save several arrays into a compressed ``.npz`` archive 

565 

566 Notes 

567 ----- 

568 The ``.npz`` file format is a zipped archive of files named after the 

569 variables they contain. The archive is not compressed and each file 

570 in the archive contains one variable in ``.npy`` format. For a 

571 description of the ``.npy`` format, see :py:mod:`numpy.lib.format`. 

572 

573 When opening the saved ``.npz`` file with `load` a `NpzFile` object is 

574 returned. This is a dictionary-like object which can be queried for 

575 its list of arrays (with the ``.files`` attribute), and for the arrays 

576 themselves. 

577 

578 Keys passed in `kwds` are used as filenames inside the ZIP archive. 

579 Therefore, keys should be valid filenames; e.g., avoid keys that begin with 

580 ``/`` or contain ``.``. 

581 

582 When naming variables with keyword arguments, it is not possible to name a 

583 variable ``file``, as this would cause the ``file`` argument to be defined 

584 twice in the call to ``savez``. 

585 

586 Examples 

587 -------- 

588 >>> from tempfile import TemporaryFile 

589 >>> outfile = TemporaryFile() 

590 >>> x = np.arange(10) 

591 >>> y = np.sin(x) 

592 

593 Using `savez` with \\*args, the arrays are saved with default names. 

594 

595 >>> np.savez(outfile, x, y) 

596 >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file 

597 >>> npzfile = np.load(outfile) 

598 >>> npzfile.files 

599 ['arr_0', 'arr_1'] 

600 >>> npzfile['arr_0'] 

601 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

602 

603 Using `savez` with \\**kwds, the arrays are saved with the keyword names. 

604 

605 >>> outfile = TemporaryFile() 

606 >>> np.savez(outfile, x=x, y=y) 

607 >>> _ = outfile.seek(0) 

608 >>> npzfile = np.load(outfile) 

609 >>> sorted(npzfile.files) 

610 ['x', 'y'] 

611 >>> npzfile['x'] 

612 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]) 

613 

614 """ 

615 _savez(file, args, kwds, False) 

616 

617 

618def _savez_compressed_dispatcher(file, *args, **kwds): 

619 yield from args 

620 yield from kwds.values() 

621 

622 

623@array_function_dispatch(_savez_compressed_dispatcher) 

624def savez_compressed(file, *args, **kwds): 

625 """ 

626 Save several arrays into a single file in compressed ``.npz`` format. 

627 

628 Provide arrays as keyword arguments to store them under the 

629 corresponding name in the output file: ``savez(fn, x=x, y=y)``. 

630 

631 If arrays are specified as positional arguments, i.e., ``savez(fn, 

632 x, y)``, their names will be `arr_0`, `arr_1`, etc. 

633 

634 Parameters 

635 ---------- 

636 file : str or file 

637 Either the filename (string) or an open file (file-like object) 

638 where the data will be saved. If file is a string or a Path, the 

639 ``.npz`` extension will be appended to the filename if it is not 

640 already there. 

641 args : Arguments, optional 

642 Arrays to save to the file. Please use keyword arguments (see 

643 `kwds` below) to assign names to arrays. Arrays specified as 

644 args will be named "arr_0", "arr_1", and so on. 

645 kwds : Keyword arguments, optional 

646 Arrays to save to the file. Each array will be saved to the 

647 output file with its corresponding keyword name. 

648 

649 Returns 

650 ------- 

651 None 

652 

653 See Also 

654 -------- 

655 numpy.save : Save a single array to a binary file in NumPy format. 

656 numpy.savetxt : Save an array to a file as plain text. 

657 numpy.savez : Save several arrays into an uncompressed ``.npz`` file format 

658 numpy.load : Load the files created by savez_compressed. 

659 

660 Notes 

661 ----- 

662 The ``.npz`` file format is a zipped archive of files named after the 

663 variables they contain. The archive is compressed with 

664 ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable 

665 in ``.npy`` format. For a description of the ``.npy`` format, see 

666 :py:mod:`numpy.lib.format`. 

667 

668 

669 When opening the saved ``.npz`` file with `load` a `NpzFile` object is 

670 returned. This is a dictionary-like object which can be queried for 

671 its list of arrays (with the ``.files`` attribute), and for the arrays 

672 themselves. 

673 

674 Examples 

675 -------- 

676 >>> test_array = np.random.rand(3, 2) 

677 >>> test_vector = np.random.rand(4) 

678 >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector) 

679 >>> loaded = np.load('/tmp/123.npz') 

680 >>> print(np.array_equal(test_array, loaded['a'])) 

681 True 

682 >>> print(np.array_equal(test_vector, loaded['b'])) 

683 True 

684 

685 """ 

686 _savez(file, args, kwds, True) 

687 

688 

689def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None): 

690 # Import is postponed to here since zipfile depends on gzip, an optional 

691 # component of the so-called standard library. 

692 import zipfile 

693 

694 if not hasattr(file, 'write'): 

695 file = os_fspath(file) 

696 if not file.endswith('.npz'): 

697 file = file + '.npz' 

698 

699 namedict = kwds 

700 for i, val in enumerate(args): 

701 key = 'arr_%d' % i 

702 if key in namedict.keys(): 

703 raise ValueError( 

704 "Cannot use un-named variables and keyword %s" % key) 

705 namedict[key] = val 

706 

707 if compress: 

708 compression = zipfile.ZIP_DEFLATED 

709 else: 

710 compression = zipfile.ZIP_STORED 

711 

712 zipf = zipfile_factory(file, mode="w", compression=compression) 

713 

714 for key, val in namedict.items(): 

715 fname = key + '.npy' 

716 val = np.asanyarray(val) 

717 # always force zip64, gh-10776 

718 with zipf.open(fname, 'w', force_zip64=True) as fid: 

719 format.write_array(fid, val, 

720 allow_pickle=allow_pickle, 

721 pickle_kwargs=pickle_kwargs) 

722 

723 zipf.close() 

724 

725 

726def _ensure_ndmin_ndarray_check_param(ndmin): 

727 """Just checks if the param ndmin is supported on 

728 _ensure_ndmin_ndarray. It is intended to be used as 

729 verification before running anything expensive. 

730 e.g. loadtxt, genfromtxt 

731 """ 

732 # Check correctness of the values of `ndmin` 

733 if ndmin not in [0, 1, 2]: 

734 raise ValueError(f"Illegal value of ndmin keyword: {ndmin}") 

735 

736def _ensure_ndmin_ndarray(a, *, ndmin: int): 

737 """This is a helper function of loadtxt and genfromtxt to ensure 

738 proper minimum dimension as requested 

739 

740 ndim : int. Supported values 1, 2, 3 

741 ^^ whenever this changes, keep in sync with 

742 _ensure_ndmin_ndarray_check_param 

743 """ 

744 # Verify that the array has at least dimensions `ndmin`. 

745 # Tweak the size and shape of the arrays - remove extraneous dimensions 

746 if a.ndim > ndmin: 

747 a = np.squeeze(a) 

748 # and ensure we have the minimum number of dimensions asked for 

749 # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0 

750 if a.ndim < ndmin: 

751 if ndmin == 1: 

752 a = np.atleast_1d(a) 

753 elif ndmin == 2: 

754 a = np.atleast_2d(a).T 

755 

756 return a 

757 

758 

759# amount of lines loadtxt reads in one chunk, can be overridden for testing 

760_loadtxt_chunksize = 50000 

761 

762 

763def _loadtxt_dispatcher( 

764 fname, dtype=None, comments=None, delimiter=None, 

765 converters=None, skiprows=None, usecols=None, unpack=None, 

766 ndmin=None, encoding=None, max_rows=None, *, like=None): 

767 return (like,) 

768 

769 

770def _check_nonneg_int(value, name="argument"): 

771 try: 

772 operator.index(value) 

773 except TypeError: 

774 raise TypeError(f"{name} must be an integer") from None 

775 if value < 0: 

776 raise ValueError(f"{name} must be nonnegative") 

777 

778 

779def _preprocess_comments(iterable, comments, encoding): 

780 """ 

781 Generator that consumes a line iterated iterable and strips out the 

782 multiple (or multi-character) comments from lines. 

783 This is a pre-processing step to achieve feature parity with loadtxt 

784 (we assume that this feature is a nieche feature). 

785 """ 

786 for line in iterable: 

787 if isinstance(line, bytes): 

788 # Need to handle conversion here, or the splitting would fail 

789 line = line.decode(encoding) 

790 

791 for c in comments: 

792 line = line.split(c, 1)[0] 

793 

794 yield line 

795 

796 

797# The number of rows we read in one go if confronted with a parametric dtype 

798_loadtxt_chunksize = 50000 

799 

800 

801def _read(fname, *, delimiter=',', comment='#', quote='"', 

802 imaginary_unit='j', usecols=None, skiplines=0, 

803 max_rows=None, converters=None, ndmin=None, unpack=False, 

804 dtype=np.float64, encoding="bytes"): 

805 r""" 

806 Read a NumPy array from a text file. 

807 

808 Parameters 

809 ---------- 

810 fname : str or file object 

811 The filename or the file to be read. 

812 delimiter : str, optional 

813 Field delimiter of the fields in line of the file. 

814 Default is a comma, ','. If None any sequence of whitespace is 

815 considered a delimiter. 

816 comment : str or sequence of str or None, optional 

817 Character that begins a comment. All text from the comment 

818 character to the end of the line is ignored. 

819 Multiple comments or multiple-character comment strings are supported, 

820 but may be slower and `quote` must be empty if used. 

821 Use None to disable all use of comments. 

822 quote : str or None, optional 

823 Character that is used to quote string fields. Default is '"' 

824 (a double quote). Use None to disable quote support. 

825 imaginary_unit : str, optional 

826 Character that represent the imaginay unit `sqrt(-1)`. 

827 Default is 'j'. 

828 usecols : array_like, optional 

829 A one-dimensional array of integer column numbers. These are the 

830 columns from the file to be included in the array. If this value 

831 is not given, all the columns are used. 

832 skiplines : int, optional 

833 Number of lines to skip before interpreting the data in the file. 

834 max_rows : int, optional 

835 Maximum number of rows of data to read. Default is to read the 

836 entire file. 

837 converters : dict or callable, optional 

838 A function to parse all columns strings into the desired value, or 

839 a dictionary mapping column number to a parser function. 

840 E.g. if column 0 is a date string: ``converters = {0: datestr2num}``. 

841 Converters can also be used to provide a default value for missing 

842 data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will 

843 convert empty fields to 0. 

844 Default: None 

845 ndmin : int, optional 

846 Minimum dimension of the array returned. 

847 Allowed values are 0, 1 or 2. Default is 0. 

848 unpack : bool, optional 

849 If True, the returned array is transposed, so that arguments may be 

850 unpacked using ``x, y, z = read(...)``. When used with a structured 

851 data-type, arrays are returned for each field. Default is False. 

852 dtype : numpy data type 

853 A NumPy dtype instance, can be a structured dtype to map to the 

854 columns of the file. 

855 encoding : str, optional 

856 Encoding used to decode the inputfile. The special value 'bytes' 

857 (the default) enables backwards-compatible behavior for `converters`, 

858 ensuring that inputs to the converter functions are encoded 

859 bytes objects. The special value 'bytes' has no additional effect if 

860 ``converters=None``. If encoding is ``'bytes'`` or ``None``, the 

861 default system encoding is used. 

862 

863 Returns 

864 ------- 

865 ndarray 

866 NumPy array. 

867 

868 Examples 

869 -------- 

870 First we create a file for the example. 

871 

872 >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n' 

873 >>> with open('example1.csv', 'w') as f: 

874 ... f.write(s1) 

875 >>> a1 = read_from_filename('example1.csv') 

876 >>> a1 

877 array([[1., 2., 3.], 

878 [4., 5., 6.]]) 

879 

880 The second example has columns with different data types, so a 

881 one-dimensional array with a structured data type is returned. 

882 The tab character is used as the field delimiter. 

883 

884 >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n' 

885 >>> with open('example2.tsv', 'w') as f: 

886 ... f.write(s2) 

887 >>> a2 = read_from_filename('example2.tsv', delimiter='\t') 

888 >>> a2 

889 array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')], 

890 dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')]) 

891 """ 

892 # Handle special 'bytes' keyword for encoding 

893 byte_converters = False 

894 if encoding == 'bytes': 

895 encoding = None 

896 byte_converters = True 

897 

898 if dtype is None: 

899 raise TypeError("a dtype must be provided.") 

900 dtype = np.dtype(dtype) 

901 

902 read_dtype_via_object_chunks = None 

903 if dtype.kind in 'SUM' and ( 

904 dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'): 

905 # This is a legacy "flexible" dtype. We do not truly support 

906 # parametric dtypes currently (no dtype discovery step in the core), 

907 # but have to support these for backward compatibility. 

908 read_dtype_via_object_chunks = dtype 

909 dtype = np.dtype(object) 

910 

911 if usecols is not None: 

912 # Allow usecols to be a single int or a sequence of ints, the C-code 

913 # handles the rest 

914 try: 

915 usecols = list(usecols) 

916 except TypeError: 

917 usecols = [usecols] 

918 

919 _ensure_ndmin_ndarray_check_param(ndmin) 

920 

921 if comment is None: 

922 comments = None 

923 else: 

924 # assume comments are a sequence of strings 

925 if "" in comment: 

926 raise ValueError( 

927 "comments cannot be an empty string. Use comments=None to " 

928 "disable comments." 

929 ) 

930 comments = tuple(comment) 

931 comment = None 

932 if len(comments) == 0: 

933 comments = None # No comments at all 

934 elif len(comments) == 1: 

935 # If there is only one comment, and that comment has one character, 

936 # the normal parsing can deal with it just fine. 

937 if isinstance(comments[0], str) and len(comments[0]) == 1: 

938 comment = comments[0] 

939 comments = None 

940 else: 

941 # Input validation if there are multiple comment characters 

942 if delimiter in comments: 

943 raise TypeError( 

944 f"Comment characters '{comments}' cannot include the " 

945 f"delimiter '{delimiter}'" 

946 ) 

947 

948 # comment is now either a 1 or 0 character string or a tuple: 

949 if comments is not None: 

950 # Note: An earlier version support two character comments (and could 

951 # have been extended to multiple characters, we assume this is 

952 # rare enough to not optimize for. 

953 if quote is not None: 

954 raise ValueError( 

955 "when multiple comments or a multi-character comment is " 

956 "given, quotes are not supported. In this case quotechar " 

957 "must be set to None.") 

958 

959 if len(imaginary_unit) != 1: 

960 raise ValueError('len(imaginary_unit) must be 1.') 

961 

962 _check_nonneg_int(skiplines) 

963 if max_rows is not None: 

964 _check_nonneg_int(max_rows) 

965 else: 

966 # Passing -1 to the C code means "read the entire file". 

967 max_rows = -1 

968 

969 fh_closing_ctx = contextlib.nullcontext() 

970 filelike = False 

971 try: 

972 if isinstance(fname, os.PathLike): 

973 fname = os.fspath(fname) 

974 if isinstance(fname, str): 

975 fh = np.lib._datasource.open(fname, 'rt', encoding=encoding) 

976 if encoding is None: 

977 encoding = getattr(fh, 'encoding', 'latin1') 

978 

979 fh_closing_ctx = contextlib.closing(fh) 

980 data = fh 

981 filelike = True 

982 else: 

983 if encoding is None: 

984 encoding = getattr(fname, 'encoding', 'latin1') 

985 data = iter(fname) 

986 except TypeError as e: 

987 raise ValueError( 

988 f"fname must be a string, filehandle, list of strings,\n" 

989 f"or generator. Got {type(fname)} instead.") from e 

990 

991 with fh_closing_ctx: 

992 if comments is not None: 

993 if filelike: 

994 data = iter(data) 

995 filelike = False 

996 data = _preprocess_comments(data, comments, encoding) 

997 

998 if read_dtype_via_object_chunks is None: 

999 arr = _load_from_filelike( 

1000 data, delimiter=delimiter, comment=comment, quote=quote, 

1001 imaginary_unit=imaginary_unit, 

1002 usecols=usecols, skiplines=skiplines, max_rows=max_rows, 

1003 converters=converters, dtype=dtype, 

1004 encoding=encoding, filelike=filelike, 

1005 byte_converters=byte_converters) 

1006 

1007 else: 

1008 # This branch reads the file into chunks of object arrays and then 

1009 # casts them to the desired actual dtype. This ensures correct 

1010 # string-length and datetime-unit discovery (like `arr.astype()`). 

1011 # Due to chunking, certain error reports are less clear, currently. 

1012 if filelike: 

1013 data = iter(data) # cannot chunk when reading from file 

1014 

1015 c_byte_converters = False 

1016 if read_dtype_via_object_chunks == "S": 

1017 c_byte_converters = True # Use latin1 rather than ascii 

1018 

1019 chunks = [] 

1020 while max_rows != 0: 

1021 if max_rows < 0: 

1022 chunk_size = _loadtxt_chunksize 

1023 else: 

1024 chunk_size = min(_loadtxt_chunksize, max_rows) 

1025 

1026 next_arr = _load_from_filelike( 

1027 data, delimiter=delimiter, comment=comment, quote=quote, 

1028 imaginary_unit=imaginary_unit, 

1029 usecols=usecols, skiplines=skiplines, max_rows=max_rows, 

1030 converters=converters, dtype=dtype, 

1031 encoding=encoding, filelike=filelike, 

1032 byte_converters=byte_converters, 

1033 c_byte_converters=c_byte_converters) 

1034 # Cast here already. We hope that this is better even for 

1035 # large files because the storage is more compact. It could 

1036 # be adapted (in principle the concatenate could cast). 

1037 chunks.append(next_arr.astype(read_dtype_via_object_chunks)) 

1038 

1039 skiprows = 0 # Only have to skip for first chunk 

1040 if max_rows >= 0: 

1041 max_rows -= chunk_size 

1042 if len(next_arr) < chunk_size: 

1043 # There was less data than requested, so we are done. 

1044 break 

1045 

1046 # Need at least one chunk, but if empty, the last one may have 

1047 # the wrong shape. 

1048 if len(chunks) > 1 and len(chunks[-1]) == 0: 

1049 del chunks[-1] 

1050 if len(chunks) == 1: 

1051 arr = chunks[0] 

1052 else: 

1053 arr = np.concatenate(chunks, axis=0) 

1054 

1055 # NOTE: ndmin works as advertised for structured dtypes, but normally 

1056 # these would return a 1D result plus the structured dimension, 

1057 # so ndmin=2 adds a third dimension even when no squeezing occurs. 

1058 # A `squeeze=False` could be a better solution (pandas uses squeeze). 

1059 arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin) 

1060 

1061 if arr.shape: 

1062 if arr.shape[0] == 0: 

1063 warnings.warn( 

1064 f'loadtxt: input contained no data: "{fname}"', 

1065 category=UserWarning, 

1066 stacklevel=3 

1067 ) 

1068 

1069 if unpack: 

1070 # Unpack structured dtypes if requested: 

1071 dt = arr.dtype 

1072 if dt.names is not None: 

1073 # For structured arrays, return an array for each field. 

1074 return [arr[field] for field in dt.names] 

1075 else: 

1076 return arr.T 

1077 else: 

1078 return arr 

1079 

1080 

1081@set_array_function_like_doc 

1082@set_module('numpy') 

1083def loadtxt(fname, dtype=float, comments='#', delimiter=None, 

1084 converters=None, skiprows=0, usecols=None, unpack=False, 

1085 ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None, 

1086 like=None): 

1087 r""" 

1088 Load data from a text file. 

1089 

1090 Parameters 

1091 ---------- 

1092 fname : file, str, pathlib.Path, list of str, generator 

1093 File, filename, list, or generator to read. If the filename 

1094 extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note 

1095 that generators must return bytes or strings. The strings 

1096 in a list or produced by a generator are treated as lines. 

1097 dtype : data-type, optional 

1098 Data-type of the resulting array; default: float. If this is a 

1099 structured data-type, the resulting array will be 1-dimensional, and 

1100 each row will be interpreted as an element of the array. In this 

1101 case, the number of columns used must match the number of fields in 

1102 the data-type. 

1103 comments : str or sequence of str or None, optional 

1104 The characters or list of characters used to indicate the start of a 

1105 comment. None implies no comments. For backwards compatibility, byte 

1106 strings will be decoded as 'latin1'. The default is '#'. 

1107 delimiter : str, optional 

1108 The character used to separate the values. For backwards compatibility, 

1109 byte strings will be decoded as 'latin1'. The default is whitespace. 

1110 

1111 .. versionchanged:: 1.23.0 

1112 Only single character delimiters are supported. Newline characters 

1113 cannot be used as the delimiter. 

1114 

1115 converters : dict or callable, optional 

1116 Converter functions to customize value parsing. If `converters` is 

1117 callable, the function is applied to all columns, else it must be a 

1118 dict that maps column number to a parser function. 

1119 See examples for further details. 

1120 Default: None. 

1121 

1122 .. versionchanged:: 1.23.0 

1123 The ability to pass a single callable to be applied to all columns 

1124 was added. 

1125 

1126 skiprows : int, optional 

1127 Skip the first `skiprows` lines, including comments; default: 0. 

1128 usecols : int or sequence, optional 

1129 Which columns to read, with 0 being the first. For example, 

1130 ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns. 

1131 The default, None, results in all columns being read. 

1132 

1133 .. versionchanged:: 1.11.0 

1134 When a single column has to be read it is possible to use 

1135 an integer instead of a tuple. E.g ``usecols = 3`` reads the 

1136 fourth column the same way as ``usecols = (3,)`` would. 

1137 unpack : bool, optional 

1138 If True, the returned array is transposed, so that arguments may be 

1139 unpacked using ``x, y, z = loadtxt(...)``. When used with a 

1140 structured data-type, arrays are returned for each field. 

1141 Default is False. 

1142 ndmin : int, optional 

1143 The returned array will have at least `ndmin` dimensions. 

1144 Otherwise mono-dimensional axes will be squeezed. 

1145 Legal values: 0 (default), 1 or 2. 

1146 

1147 .. versionadded:: 1.6.0 

1148 encoding : str, optional 

1149 Encoding used to decode the inputfile. Does not apply to input streams. 

1150 The special value 'bytes' enables backward compatibility workarounds 

1151 that ensures you receive byte arrays as results if possible and passes 

1152 'latin1' encoded strings to converters. Override this value to receive 

1153 unicode arrays and pass strings as input to converters. If set to None 

1154 the system default is used. The default value is 'bytes'. 

1155 

1156 .. versionadded:: 1.14.0 

1157 max_rows : int, optional 

1158 Read `max_rows` rows of content after `skiprows` lines. The default is 

1159 to read all the rows. Note that empty rows containing no data such as 

1160 empty lines and comment lines are not counted towards `max_rows`, 

1161 while such lines are counted in `skiprows`. 

1162 

1163 .. versionadded:: 1.16.0 

1164  

1165 .. versionchanged:: 1.23.0 

1166 Lines containing no data, including comment lines (e.g., lines  

1167 starting with '#' or as specified via `comments`) are not counted  

1168 towards `max_rows`. 

1169 quotechar : unicode character or None, optional 

1170 The character used to denote the start and end of a quoted item. 

1171 Occurrences of the delimiter or comment characters are ignored within 

1172 a quoted item. The default value is ``quotechar=None``, which means 

1173 quoting support is disabled. 

1174 

1175 If two consecutive instances of `quotechar` are found within a quoted 

1176 field, the first is treated as an escape character. See examples. 

1177 

1178 .. versionadded:: 1.23.0 

1179 ${ARRAY_FUNCTION_LIKE} 

1180 

1181 .. versionadded:: 1.20.0 

1182 

1183 Returns 

1184 ------- 

1185 out : ndarray 

1186 Data read from the text file. 

1187 

1188 See Also 

1189 -------- 

1190 load, fromstring, fromregex 

1191 genfromtxt : Load data with missing values handled as specified. 

1192 scipy.io.loadmat : reads MATLAB data files 

1193 

1194 Notes 

1195 ----- 

1196 This function aims to be a fast reader for simply formatted files. The 

1197 `genfromtxt` function provides more sophisticated handling of, e.g., 

1198 lines with missing values. 

1199 

1200 Each row in the input text file must have the same number of values to be 

1201 able to read all values. If all rows do not have same number of values, a 

1202 subset of up to n columns (where n is the least number of values present 

1203 in all rows) can be read by specifying the columns via `usecols`. 

1204 

1205 .. versionadded:: 1.10.0 

1206 

1207 The strings produced by the Python float.hex method can be used as 

1208 input for floats. 

1209 

1210 Examples 

1211 -------- 

1212 >>> from io import StringIO # StringIO behaves like a file object 

1213 >>> c = StringIO("0 1\n2 3") 

1214 >>> np.loadtxt(c) 

1215 array([[0., 1.], 

1216 [2., 3.]]) 

1217 

1218 >>> d = StringIO("M 21 72\nF 35 58") 

1219 >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'), 

1220 ... 'formats': ('S1', 'i4', 'f4')}) 

1221 array([(b'M', 21, 72.), (b'F', 35, 58.)], 

1222 dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')]) 

1223 

1224 >>> c = StringIO("1,0,2\n3,0,4") 

1225 >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True) 

1226 >>> x 

1227 array([1., 3.]) 

1228 >>> y 

1229 array([2., 4.]) 

1230 

1231 The `converters` argument is used to specify functions to preprocess the 

1232 text prior to parsing. `converters` can be a dictionary that maps 

1233 preprocessing functions to each column: 

1234 

1235 >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n") 

1236 >>> conv = { 

1237 ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0 

1238 ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1 

1239 ... } 

1240 >>> np.loadtxt(s, delimiter=",", converters=conv) 

1241 array([[1., 3.], 

1242 [3., 5.]]) 

1243 

1244 `converters` can be a callable instead of a dictionary, in which case it 

1245 is applied to all columns: 

1246 

1247 >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE") 

1248 >>> import functools 

1249 >>> conv = functools.partial(int, base=16) 

1250 >>> np.loadtxt(s, converters=conv) 

1251 array([[222., 173.], 

1252 [192., 222.]]) 

1253 

1254 This example shows how `converters` can be used to convert a field 

1255 with a trailing minus sign into a negative number. 

1256 

1257 >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') 

1258 >>> def conv(fld): 

1259 ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld) 

1260 ... 

1261 >>> np.loadtxt(s, converters=conv) 

1262 array([[ 10.01, -31.25], 

1263 [ 19.22, 64.31], 

1264 [-17.57, 63.94]]) 

1265 

1266 Using a callable as the converter can be particularly useful for handling 

1267 values with different formatting, e.g. floats with underscores: 

1268 

1269 >>> s = StringIO("1 2.7 100_000") 

1270 >>> np.loadtxt(s, converters=float) 

1271 array([1.e+00, 2.7e+00, 1.e+05]) 

1272 

1273 This idea can be extended to automatically handle values specified in 

1274 many different formats: 

1275 

1276 >>> def conv(val): 

1277 ... try: 

1278 ... return float(val) 

1279 ... except ValueError: 

1280 ... return float.fromhex(val) 

1281 >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2") 

1282 >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None) 

1283 array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00]) 

1284 

1285 Note that with the default ``encoding="bytes"``, the inputs to the 

1286 converter function are latin-1 encoded byte strings. To deactivate the 

1287 implicit encoding prior to conversion, use ``encoding=None`` 

1288 

1289 >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94') 

1290 >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x) 

1291 >>> np.loadtxt(s, converters=conv, encoding=None) 

1292 array([[ 10.01, -31.25], 

1293 [ 19.22, 64.31], 

1294 [-17.57, 63.94]]) 

1295 

1296 Support for quoted fields is enabled with the `quotechar` parameter. 

1297 Comment and delimiter characters are ignored when they appear within a 

1298 quoted item delineated by `quotechar`: 

1299 

1300 >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n') 

1301 >>> dtype = np.dtype([("label", "U12"), ("value", float)]) 

1302 >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"') 

1303 array([('alpha, #42', 10.), ('beta, #64', 2.)], 

1304 dtype=[('label', '<U12'), ('value', '<f8')]) 

1305 

1306 Quoted fields can be separated by multiple whitespace characters: 

1307 

1308 >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n') 

1309 >>> dtype = np.dtype([("label", "U12"), ("value", float)]) 

1310 >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"') 

1311 array([('alpha, #42', 10.), ('beta, #64', 2.)], 

1312 dtype=[('label', '<U12'), ('value', '<f8')]) 

1313 

1314 Two consecutive quote characters within a quoted field are treated as a 

1315 single escaped character: 

1316 

1317 >>> s = StringIO('"Hello, my name is ""Monty""!"') 

1318 >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"') 

1319 array('Hello, my name is "Monty"!', dtype='<U26') 

1320 

1321 Read subset of columns when all rows do not contain equal number of values: 

1322 

1323 >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20") 

1324 >>> np.loadtxt(d, usecols=(0, 1)) 

1325 array([[ 1., 2.], 

1326 [ 2., 4.], 

1327 [ 3., 9.], 

1328 [ 4., 16.]]) 

1329 

1330 """ 

1331 

1332 if like is not None: 

1333 return _loadtxt_with_like( 

1334 fname, dtype=dtype, comments=comments, delimiter=delimiter, 

1335 converters=converters, skiprows=skiprows, usecols=usecols, 

1336 unpack=unpack, ndmin=ndmin, encoding=encoding, 

1337 max_rows=max_rows, like=like 

1338 ) 

1339 

1340 if isinstance(delimiter, bytes): 

1341 delimiter.decode("latin1") 

1342 

1343 if dtype is None: 

1344 dtype = np.float64 

1345 

1346 comment = comments 

1347 # Control character type conversions for Py3 convenience 

1348 if comment is not None: 

1349 if isinstance(comment, (str, bytes)): 

1350 comment = [comment] 

1351 comment = [ 

1352 x.decode('latin1') if isinstance(x, bytes) else x for x in comment] 

1353 if isinstance(delimiter, bytes): 

1354 delimiter = delimiter.decode('latin1') 

1355 

1356 arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter, 

1357 converters=converters, skiplines=skiprows, usecols=usecols, 

1358 unpack=unpack, ndmin=ndmin, encoding=encoding, 

1359 max_rows=max_rows, quote=quotechar) 

1360 

1361 return arr 

1362 

1363 

1364_loadtxt_with_like = array_function_dispatch( 

1365 _loadtxt_dispatcher, use_like=True 

1366)(loadtxt) 

1367 

1368 

1369def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None, 

1370 header=None, footer=None, comments=None, 

1371 encoding=None): 

1372 return (X,) 

1373 

1374 

1375@array_function_dispatch(_savetxt_dispatcher) 

1376def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='', 

1377 footer='', comments='# ', encoding=None): 

1378 """ 

1379 Save an array to a text file. 

1380 

1381 Parameters 

1382 ---------- 

1383 fname : filename or file handle 

1384 If the filename ends in ``.gz``, the file is automatically saved in 

1385 compressed gzip format. `loadtxt` understands gzipped files 

1386 transparently. 

1387 X : 1D or 2D array_like 

1388 Data to be saved to a text file. 

1389 fmt : str or sequence of strs, optional 

1390 A single format (%10.5f), a sequence of formats, or a 

1391 multi-format string, e.g. 'Iteration %d -- %10.5f', in which 

1392 case `delimiter` is ignored. For complex `X`, the legal options 

1393 for `fmt` are: 

1394 

1395 * a single specifier, `fmt='%.4e'`, resulting in numbers formatted 

1396 like `' (%s+%sj)' % (fmt, fmt)` 

1397 * a full string specifying every real and imaginary part, e.g. 

1398 `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns 

1399 * a list of specifiers, one per column - in this case, the real 

1400 and imaginary part must have separate specifiers, 

1401 e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns 

1402 delimiter : str, optional 

1403 String or character separating columns. 

1404 newline : str, optional 

1405 String or character separating lines. 

1406 

1407 .. versionadded:: 1.5.0 

1408 header : str, optional 

1409 String that will be written at the beginning of the file. 

1410 

1411 .. versionadded:: 1.7.0 

1412 footer : str, optional 

1413 String that will be written at the end of the file. 

1414 

1415 .. versionadded:: 1.7.0 

1416 comments : str, optional 

1417 String that will be prepended to the ``header`` and ``footer`` strings, 

1418 to mark them as comments. Default: '# ', as expected by e.g. 

1419 ``numpy.loadtxt``. 

1420 

1421 .. versionadded:: 1.7.0 

1422 encoding : {None, str}, optional 

1423 Encoding used to encode the outputfile. Does not apply to output 

1424 streams. If the encoding is something other than 'bytes' or 'latin1' 

1425 you will not be able to load the file in NumPy versions < 1.14. Default 

1426 is 'latin1'. 

1427 

1428 .. versionadded:: 1.14.0 

1429 

1430 

1431 See Also 

1432 -------- 

1433 save : Save an array to a binary file in NumPy ``.npy`` format 

1434 savez : Save several arrays into an uncompressed ``.npz`` archive 

1435 savez_compressed : Save several arrays into a compressed ``.npz`` archive 

1436 

1437 Notes 

1438 ----- 

1439 Further explanation of the `fmt` parameter 

1440 (``%[flag]width[.precision]specifier``): 

1441 

1442 flags: 

1443 ``-`` : left justify 

1444 

1445 ``+`` : Forces to precede result with + or -. 

1446 

1447 ``0`` : Left pad the number with zeros instead of space (see width). 

1448 

1449 width: 

1450 Minimum number of characters to be printed. The value is not truncated 

1451 if it has more characters. 

1452 

1453 precision: 

1454 - For integer specifiers (eg. ``d,i,o,x``), the minimum number of 

1455 digits. 

1456 - For ``e, E`` and ``f`` specifiers, the number of digits to print 

1457 after the decimal point. 

1458 - For ``g`` and ``G``, the maximum number of significant digits. 

1459 - For ``s``, the maximum number of characters. 

1460 

1461 specifiers: 

1462 ``c`` : character 

1463 

1464 ``d`` or ``i`` : signed decimal integer 

1465 

1466 ``e`` or ``E`` : scientific notation with ``e`` or ``E``. 

1467 

1468 ``f`` : decimal floating point 

1469 

1470 ``g,G`` : use the shorter of ``e,E`` or ``f`` 

1471 

1472 ``o`` : signed octal 

1473 

1474 ``s`` : string of characters 

1475 

1476 ``u`` : unsigned decimal integer 

1477 

1478 ``x,X`` : unsigned hexadecimal integer 

1479 

1480 This explanation of ``fmt`` is not complete, for an exhaustive 

1481 specification see [1]_. 

1482 

1483 References 

1484 ---------- 

1485 .. [1] `Format Specification Mini-Language 

1486 <https://docs.python.org/library/string.html#format-specification-mini-language>`_, 

1487 Python Documentation. 

1488 

1489 Examples 

1490 -------- 

1491 >>> x = y = z = np.arange(0.0,5.0,1.0) 

1492 >>> np.savetxt('test.out', x, delimiter=',') # X is an array 

1493 >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays 

1494 >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation 

1495 

1496 """ 

1497 

1498 # Py3 conversions first 

1499 if isinstance(fmt, bytes): 

1500 fmt = asstr(fmt) 

1501 delimiter = asstr(delimiter) 

1502 

1503 class WriteWrap: 

1504 """Convert to bytes on bytestream inputs. 

1505 

1506 """ 

1507 def __init__(self, fh, encoding): 

1508 self.fh = fh 

1509 self.encoding = encoding 

1510 self.do_write = self.first_write 

1511 

1512 def close(self): 

1513 self.fh.close() 

1514 

1515 def write(self, v): 

1516 self.do_write(v) 

1517 

1518 def write_bytes(self, v): 

1519 if isinstance(v, bytes): 

1520 self.fh.write(v) 

1521 else: 

1522 self.fh.write(v.encode(self.encoding)) 

1523 

1524 def write_normal(self, v): 

1525 self.fh.write(asunicode(v)) 

1526 

1527 def first_write(self, v): 

1528 try: 

1529 self.write_normal(v) 

1530 self.write = self.write_normal 

1531 except TypeError: 

1532 # input is probably a bytestream 

1533 self.write_bytes(v) 

1534 self.write = self.write_bytes 

1535 

1536 own_fh = False 

1537 if isinstance(fname, os_PathLike): 

1538 fname = os_fspath(fname) 

1539 if _is_string_like(fname): 

1540 # datasource doesn't support creating a new file ... 

1541 open(fname, 'wt').close() 

1542 fh = np.lib._datasource.open(fname, 'wt', encoding=encoding) 

1543 own_fh = True 

1544 elif hasattr(fname, 'write'): 

1545 # wrap to handle byte output streams 

1546 fh = WriteWrap(fname, encoding or 'latin1') 

1547 else: 

1548 raise ValueError('fname must be a string or file handle') 

1549 

1550 try: 

1551 X = np.asarray(X) 

1552 

1553 # Handle 1-dimensional arrays 

1554 if X.ndim == 0 or X.ndim > 2: 

1555 raise ValueError( 

1556 "Expected 1D or 2D array, got %dD array instead" % X.ndim) 

1557 elif X.ndim == 1: 

1558 # Common case -- 1d array of numbers 

1559 if X.dtype.names is None: 

1560 X = np.atleast_2d(X).T 

1561 ncol = 1 

1562 

1563 # Complex dtype -- each field indicates a separate column 

1564 else: 

1565 ncol = len(X.dtype.names) 

1566 else: 

1567 ncol = X.shape[1] 

1568 

1569 iscomplex_X = np.iscomplexobj(X) 

1570 # `fmt` can be a string with multiple insertion points or a 

1571 # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d') 

1572 if type(fmt) in (list, tuple): 

1573 if len(fmt) != ncol: 

1574 raise AttributeError('fmt has wrong shape. %s' % str(fmt)) 

1575 format = asstr(delimiter).join(map(asstr, fmt)) 

1576 elif isinstance(fmt, str): 

1577 n_fmt_chars = fmt.count('%') 

1578 error = ValueError('fmt has wrong number of %% formats: %s' % fmt) 

1579 if n_fmt_chars == 1: 

1580 if iscomplex_X: 

1581 fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol 

1582 else: 

1583 fmt = [fmt, ] * ncol 

1584 format = delimiter.join(fmt) 

1585 elif iscomplex_X and n_fmt_chars != (2 * ncol): 

1586 raise error 

1587 elif ((not iscomplex_X) and n_fmt_chars != ncol): 

1588 raise error 

1589 else: 

1590 format = fmt 

1591 else: 

1592 raise ValueError('invalid fmt: %r' % (fmt,)) 

1593 

1594 if len(header) > 0: 

1595 header = header.replace('\n', '\n' + comments) 

1596 fh.write(comments + header + newline) 

1597 if iscomplex_X: 

1598 for row in X: 

1599 row2 = [] 

1600 for number in row: 

1601 row2.append(number.real) 

1602 row2.append(number.imag) 

1603 s = format % tuple(row2) + newline 

1604 fh.write(s.replace('+-', '-')) 

1605 else: 

1606 for row in X: 

1607 try: 

1608 v = format % tuple(row) + newline 

1609 except TypeError as e: 

1610 raise TypeError("Mismatch between array dtype ('%s') and " 

1611 "format specifier ('%s')" 

1612 % (str(X.dtype), format)) from e 

1613 fh.write(v) 

1614 

1615 if len(footer) > 0: 

1616 footer = footer.replace('\n', '\n' + comments) 

1617 fh.write(comments + footer + newline) 

1618 finally: 

1619 if own_fh: 

1620 fh.close() 

1621 

1622 

1623@set_module('numpy') 

1624def fromregex(file, regexp, dtype, encoding=None): 

1625 r""" 

1626 Construct an array from a text file, using regular expression parsing. 

1627 

1628 The returned array is always a structured array, and is constructed from 

1629 all matches of the regular expression in the file. Groups in the regular 

1630 expression are converted to fields of the structured array. 

1631 

1632 Parameters 

1633 ---------- 

1634 file : path or file 

1635 Filename or file object to read. 

1636 

1637 .. versionchanged:: 1.22.0 

1638 Now accepts `os.PathLike` implementations. 

1639 regexp : str or regexp 

1640 Regular expression used to parse the file. 

1641 Groups in the regular expression correspond to fields in the dtype. 

1642 dtype : dtype or list of dtypes 

1643 Dtype for the structured array; must be a structured datatype. 

1644 encoding : str, optional 

1645 Encoding used to decode the inputfile. Does not apply to input streams. 

1646 

1647 .. versionadded:: 1.14.0 

1648 

1649 Returns 

1650 ------- 

1651 output : ndarray 

1652 The output array, containing the part of the content of `file` that 

1653 was matched by `regexp`. `output` is always a structured array. 

1654 

1655 Raises 

1656 ------ 

1657 TypeError 

1658 When `dtype` is not a valid dtype for a structured array. 

1659 

1660 See Also 

1661 -------- 

1662 fromstring, loadtxt 

1663 

1664 Notes 

1665 ----- 

1666 Dtypes for structured arrays can be specified in several forms, but all 

1667 forms specify at least the data type and field name. For details see 

1668 `basics.rec`. 

1669 

1670 Examples 

1671 -------- 

1672 >>> from io import StringIO 

1673 >>> text = StringIO("1312 foo\n1534 bar\n444 qux") 

1674 

1675 >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything] 

1676 >>> output = np.fromregex(text, regexp, 

1677 ... [('num', np.int64), ('key', 'S3')]) 

1678 >>> output 

1679 array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')], 

1680 dtype=[('num', '<i8'), ('key', 'S3')]) 

1681 >>> output['num'] 

1682 array([1312, 1534, 444]) 

1683 

1684 """ 

1685 own_fh = False 

1686 if not hasattr(file, "read"): 

1687 file = os.fspath(file) 

1688 file = np.lib._datasource.open(file, 'rt', encoding=encoding) 

1689 own_fh = True 

1690 

1691 try: 

1692 if not isinstance(dtype, np.dtype): 

1693 dtype = np.dtype(dtype) 

1694 if dtype.names is None: 

1695 raise TypeError('dtype must be a structured datatype.') 

1696 

1697 content = file.read() 

1698 if isinstance(content, bytes) and isinstance(regexp, str): 

1699 regexp = asbytes(regexp) 

1700 elif isinstance(content, str) and isinstance(regexp, bytes): 

1701 regexp = asstr(regexp) 

1702 

1703 if not hasattr(regexp, 'match'): 

1704 regexp = re.compile(regexp) 

1705 seq = regexp.findall(content) 

1706 if seq and not isinstance(seq[0], tuple): 

1707 # Only one group is in the regexp. 

1708 # Create the new array as a single data-type and then 

1709 # re-interpret as a single-field structured array. 

1710 newdtype = np.dtype(dtype[dtype.names[0]]) 

1711 output = np.array(seq, dtype=newdtype) 

1712 output.dtype = dtype 

1713 else: 

1714 output = np.array(seq, dtype=dtype) 

1715 

1716 return output 

1717 finally: 

1718 if own_fh: 

1719 file.close() 

1720 

1721 

1722#####-------------------------------------------------------------------------- 

1723#---- --- ASCII functions --- 

1724#####-------------------------------------------------------------------------- 

1725 

1726 

1727def _genfromtxt_dispatcher(fname, dtype=None, comments=None, delimiter=None, 

1728 skip_header=None, skip_footer=None, converters=None, 

1729 missing_values=None, filling_values=None, usecols=None, 

1730 names=None, excludelist=None, deletechars=None, 

1731 replace_space=None, autostrip=None, case_sensitive=None, 

1732 defaultfmt=None, unpack=None, usemask=None, loose=None, 

1733 invalid_raise=None, max_rows=None, encoding=None, 

1734 *, ndmin=None, like=None): 

1735 return (like,) 

1736 

1737 

1738@set_array_function_like_doc 

1739@set_module('numpy') 

1740def genfromtxt(fname, dtype=float, comments='#', delimiter=None, 

1741 skip_header=0, skip_footer=0, converters=None, 

1742 missing_values=None, filling_values=None, usecols=None, 

1743 names=None, excludelist=None, 

1744 deletechars=''.join(sorted(NameValidator.defaultdeletechars)), 

1745 replace_space='_', autostrip=False, case_sensitive=True, 

1746 defaultfmt="f%i", unpack=None, usemask=False, loose=True, 

1747 invalid_raise=True, max_rows=None, encoding='bytes', 

1748 *, ndmin=0, like=None): 

1749 """ 

1750 Load data from a text file, with missing values handled as specified. 

1751 

1752 Each line past the first `skip_header` lines is split at the `delimiter` 

1753 character, and characters following the `comments` character are discarded. 

1754 

1755 Parameters 

1756 ---------- 

1757 fname : file, str, pathlib.Path, list of str, generator 

1758 File, filename, list, or generator to read. If the filename 

1759 extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note 

1760 that generators must return bytes or strings. The strings 

1761 in a list or produced by a generator are treated as lines. 

1762 dtype : dtype, optional 

1763 Data type of the resulting array. 

1764 If None, the dtypes will be determined by the contents of each 

1765 column, individually. 

1766 comments : str, optional 

1767 The character used to indicate the start of a comment. 

1768 All the characters occurring on a line after a comment are discarded. 

1769 delimiter : str, int, or sequence, optional 

1770 The string used to separate values. By default, any consecutive 

1771 whitespaces act as delimiter. An integer or sequence of integers 

1772 can also be provided as width(s) of each field. 

1773 skiprows : int, optional 

1774 `skiprows` was removed in numpy 1.10. Please use `skip_header` instead. 

1775 skip_header : int, optional 

1776 The number of lines to skip at the beginning of the file. 

1777 skip_footer : int, optional 

1778 The number of lines to skip at the end of the file. 

1779 converters : variable, optional 

1780 The set of functions that convert the data of a column to a value. 

1781 The converters can also be used to provide a default value 

1782 for missing data: ``converters = {3: lambda s: float(s or 0)}``. 

1783 missing : variable, optional 

1784 `missing` was removed in numpy 1.10. Please use `missing_values` 

1785 instead. 

1786 missing_values : variable, optional 

1787 The set of strings corresponding to missing data. 

1788 filling_values : variable, optional 

1789 The set of values to be used as default when the data are missing. 

1790 usecols : sequence, optional 

1791 Which columns to read, with 0 being the first. For example, 

1792 ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns. 

1793 names : {None, True, str, sequence}, optional 

1794 If `names` is True, the field names are read from the first line after 

1795 the first `skip_header` lines. This line can optionally be preceded 

1796 by a comment delimiter. If `names` is a sequence or a single-string of 

1797 comma-separated names, the names will be used to define the field names 

1798 in a structured dtype. If `names` is None, the names of the dtype 

1799 fields will be used, if any. 

1800 excludelist : sequence, optional 

1801 A list of names to exclude. This list is appended to the default list 

1802 ['return','file','print']. Excluded names are appended with an 

1803 underscore: for example, `file` would become `file_`. 

1804 deletechars : str, optional 

1805 A string combining invalid characters that must be deleted from the 

1806 names. 

1807 defaultfmt : str, optional 

1808 A format used to define default field names, such as "f%i" or "f_%02i". 

1809 autostrip : bool, optional 

1810 Whether to automatically strip white spaces from the variables. 

1811 replace_space : char, optional 

1812 Character(s) used in replacement of white spaces in the variable 

1813 names. By default, use a '_'. 

1814 case_sensitive : {True, False, 'upper', 'lower'}, optional 

1815 If True, field names are case sensitive. 

1816 If False or 'upper', field names are converted to upper case. 

1817 If 'lower', field names are converted to lower case. 

1818 unpack : bool, optional 

1819 If True, the returned array is transposed, so that arguments may be 

1820 unpacked using ``x, y, z = genfromtxt(...)``. When used with a 

1821 structured data-type, arrays are returned for each field. 

1822 Default is False. 

1823 usemask : bool, optional 

1824 If True, return a masked array. 

1825 If False, return a regular array. 

1826 loose : bool, optional 

1827 If True, do not raise errors for invalid values. 

1828 invalid_raise : bool, optional 

1829 If True, an exception is raised if an inconsistency is detected in the 

1830 number of columns. 

1831 If False, a warning is emitted and the offending lines are skipped. 

1832 max_rows : int, optional 

1833 The maximum number of rows to read. Must not be used with skip_footer 

1834 at the same time. If given, the value must be at least 1. Default is 

1835 to read the entire file. 

1836 

1837 .. versionadded:: 1.10.0 

1838 encoding : str, optional 

1839 Encoding used to decode the inputfile. Does not apply when `fname` is 

1840 a file object. The special value 'bytes' enables backward compatibility 

1841 workarounds that ensure that you receive byte arrays when possible 

1842 and passes latin1 encoded strings to converters. Override this value to 

1843 receive unicode arrays and pass strings as input to converters. If set 

1844 to None the system default is used. The default value is 'bytes'. 

1845 

1846 .. versionadded:: 1.14.0 

1847 ndmin : int, optional 

1848 Same parameter as `loadtxt` 

1849 

1850 .. versionadded:: 1.23.0 

1851 ${ARRAY_FUNCTION_LIKE} 

1852 

1853 .. versionadded:: 1.20.0 

1854 

1855 Returns 

1856 ------- 

1857 out : ndarray 

1858 Data read from the text file. If `usemask` is True, this is a 

1859 masked array. 

1860 

1861 See Also 

1862 -------- 

1863 numpy.loadtxt : equivalent function when no data is missing. 

1864 

1865 Notes 

1866 ----- 

1867 * When spaces are used as delimiters, or when no delimiter has been given 

1868 as input, there should not be any missing data between two fields. 

1869 * When the variables are named (either by a flexible dtype or with `names`), 

1870 there must not be any header in the file (else a ValueError 

1871 exception is raised). 

1872 * Individual values are not stripped of spaces by default. 

1873 When using a custom converter, make sure the function does remove spaces. 

1874 

1875 References 

1876 ---------- 

1877 .. [1] NumPy User Guide, section `I/O with NumPy 

1878 <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_. 

1879 

1880 Examples 

1881 -------- 

1882 >>> from io import StringIO 

1883 >>> import numpy as np 

1884 

1885 Comma delimited file with mixed dtype 

1886 

1887 >>> s = StringIO(u"1,1.3,abcde") 

1888 >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'), 

1889 ... ('mystring','S5')], delimiter=",") 

1890 >>> data 

1891 array((1, 1.3, b'abcde'), 

1892 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) 

1893 

1894 Using dtype = None 

1895 

1896 >>> _ = s.seek(0) # needed for StringIO example only 

1897 >>> data = np.genfromtxt(s, dtype=None, 

1898 ... names = ['myint','myfloat','mystring'], delimiter=",") 

1899 >>> data 

1900 array((1, 1.3, b'abcde'), 

1901 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) 

1902 

1903 Specifying dtype and names 

1904 

1905 >>> _ = s.seek(0) 

1906 >>> data = np.genfromtxt(s, dtype="i8,f8,S5", 

1907 ... names=['myint','myfloat','mystring'], delimiter=",") 

1908 >>> data 

1909 array((1, 1.3, b'abcde'), 

1910 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')]) 

1911 

1912 An example with fixed-width columns 

1913 

1914 >>> s = StringIO(u"11.3abcde") 

1915 >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'], 

1916 ... delimiter=[1,3,5]) 

1917 >>> data 

1918 array((1, 1.3, b'abcde'), 

1919 dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')]) 

1920 

1921 An example to show comments 

1922 

1923 >>> f = StringIO(''' 

1924 ... text,# of chars 

1925 ... hello world,11 

1926 ... numpy,5''') 

1927 >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',') 

1928 array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')], 

1929 dtype=[('f0', 'S12'), ('f1', 'S12')]) 

1930 

1931 """ 

1932 

1933 if like is not None: 

1934 return _genfromtxt_with_like( 

1935 fname, dtype=dtype, comments=comments, delimiter=delimiter, 

1936 skip_header=skip_header, skip_footer=skip_footer, 

1937 converters=converters, missing_values=missing_values, 

1938 filling_values=filling_values, usecols=usecols, names=names, 

1939 excludelist=excludelist, deletechars=deletechars, 

1940 replace_space=replace_space, autostrip=autostrip, 

1941 case_sensitive=case_sensitive, defaultfmt=defaultfmt, 

1942 unpack=unpack, usemask=usemask, loose=loose, 

1943 invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding, 

1944 ndmin=ndmin, 

1945 like=like 

1946 ) 

1947 

1948 _ensure_ndmin_ndarray_check_param(ndmin) 

1949 

1950 if max_rows is not None: 

1951 if skip_footer: 

1952 raise ValueError( 

1953 "The keywords 'skip_footer' and 'max_rows' can not be " 

1954 "specified at the same time.") 

1955 if max_rows < 1: 

1956 raise ValueError("'max_rows' must be at least 1.") 

1957 

1958 if usemask: 

1959 from numpy.ma import MaskedArray, make_mask_descr 

1960 # Check the input dictionary of converters 

1961 user_converters = converters or {} 

1962 if not isinstance(user_converters, dict): 

1963 raise TypeError( 

1964 "The input argument 'converter' should be a valid dictionary " 

1965 "(got '%s' instead)" % type(user_converters)) 

1966 

1967 if encoding == 'bytes': 

1968 encoding = None 

1969 byte_converters = True 

1970 else: 

1971 byte_converters = False 

1972 

1973 # Initialize the filehandle, the LineSplitter and the NameValidator 

1974 if isinstance(fname, os_PathLike): 

1975 fname = os_fspath(fname) 

1976 if isinstance(fname, str): 

1977 fid = np.lib._datasource.open(fname, 'rt', encoding=encoding) 

1978 fid_ctx = contextlib.closing(fid) 

1979 else: 

1980 fid = fname 

1981 fid_ctx = contextlib.nullcontext(fid) 

1982 try: 

1983 fhd = iter(fid) 

1984 except TypeError as e: 

1985 raise TypeError( 

1986 "fname must be a string, a filehandle, a sequence of strings,\n" 

1987 f"or an iterator of strings. Got {type(fname)} instead." 

1988 ) from e 

1989 with fid_ctx: 

1990 split_line = LineSplitter(delimiter=delimiter, comments=comments, 

1991 autostrip=autostrip, encoding=encoding) 

1992 validate_names = NameValidator(excludelist=excludelist, 

1993 deletechars=deletechars, 

1994 case_sensitive=case_sensitive, 

1995 replace_space=replace_space) 

1996 

1997 # Skip the first `skip_header` rows 

1998 try: 

1999 for i in range(skip_header): 

2000 next(fhd) 

2001 

2002 # Keep on until we find the first valid values 

2003 first_values = None 

2004 

2005 while not first_values: 

2006 first_line = _decode_line(next(fhd), encoding) 

2007 if (names is True) and (comments is not None): 

2008 if comments in first_line: 

2009 first_line = ( 

2010 ''.join(first_line.split(comments)[1:])) 

2011 first_values = split_line(first_line) 

2012 except StopIteration: 

2013 # return an empty array if the datafile is empty 

2014 first_line = '' 

2015 first_values = [] 

2016 warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2) 

2017 

2018 # Should we take the first values as names ? 

2019 if names is True: 

2020 fval = first_values[0].strip() 

2021 if comments is not None: 

2022 if fval in comments: 

2023 del first_values[0] 

2024 

2025 # Check the columns to use: make sure `usecols` is a list 

2026 if usecols is not None: 

2027 try: 

2028 usecols = [_.strip() for _ in usecols.split(",")] 

2029 except AttributeError: 

2030 try: 

2031 usecols = list(usecols) 

2032 except TypeError: 

2033 usecols = [usecols, ] 

2034 nbcols = len(usecols or first_values) 

2035 

2036 # Check the names and overwrite the dtype.names if needed 

2037 if names is True: 

2038 names = validate_names([str(_.strip()) for _ in first_values]) 

2039 first_line = '' 

2040 elif _is_string_like(names): 

2041 names = validate_names([_.strip() for _ in names.split(',')]) 

2042 elif names: 

2043 names = validate_names(names) 

2044 # Get the dtype 

2045 if dtype is not None: 

2046 dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names, 

2047 excludelist=excludelist, 

2048 deletechars=deletechars, 

2049 case_sensitive=case_sensitive, 

2050 replace_space=replace_space) 

2051 # Make sure the names is a list (for 2.5) 

2052 if names is not None: 

2053 names = list(names) 

2054 

2055 if usecols: 

2056 for (i, current) in enumerate(usecols): 

2057 # if usecols is a list of names, convert to a list of indices 

2058 if _is_string_like(current): 

2059 usecols[i] = names.index(current) 

2060 elif current < 0: 

2061 usecols[i] = current + len(first_values) 

2062 # If the dtype is not None, make sure we update it 

2063 if (dtype is not None) and (len(dtype) > nbcols): 

2064 descr = dtype.descr 

2065 dtype = np.dtype([descr[_] for _ in usecols]) 

2066 names = list(dtype.names) 

2067 # If `names` is not None, update the names 

2068 elif (names is not None) and (len(names) > nbcols): 

2069 names = [names[_] for _ in usecols] 

2070 elif (names is not None) and (dtype is not None): 

2071 names = list(dtype.names) 

2072 

2073 # Process the missing values ............................... 

2074 # Rename missing_values for convenience 

2075 user_missing_values = missing_values or () 

2076 if isinstance(user_missing_values, bytes): 

2077 user_missing_values = user_missing_values.decode('latin1') 

2078 

2079 # Define the list of missing_values (one column: one list) 

2080 missing_values = [list(['']) for _ in range(nbcols)] 

2081 

2082 # We have a dictionary: process it field by field 

2083 if isinstance(user_missing_values, dict): 

2084 # Loop on the items 

2085 for (key, val) in user_missing_values.items(): 

2086 # Is the key a string ? 

2087 if _is_string_like(key): 

2088 try: 

2089 # Transform it into an integer 

2090 key = names.index(key) 

2091 except ValueError: 

2092 # We couldn't find it: the name must have been dropped 

2093 continue 

2094 # Redefine the key as needed if it's a column number 

2095 if usecols: 

2096 try: 

2097 key = usecols.index(key) 

2098 except ValueError: 

2099 pass 

2100 # Transform the value as a list of string 

2101 if isinstance(val, (list, tuple)): 

2102 val = [str(_) for _ in val] 

2103 else: 

2104 val = [str(val), ] 

2105 # Add the value(s) to the current list of missing 

2106 if key is None: 

2107 # None acts as default 

2108 for miss in missing_values: 

2109 miss.extend(val) 

2110 else: 

2111 missing_values[key].extend(val) 

2112 # We have a sequence : each item matches a column 

2113 elif isinstance(user_missing_values, (list, tuple)): 

2114 for (value, entry) in zip(user_missing_values, missing_values): 

2115 value = str(value) 

2116 if value not in entry: 

2117 entry.append(value) 

2118 # We have a string : apply it to all entries 

2119 elif isinstance(user_missing_values, str): 

2120 user_value = user_missing_values.split(",") 

2121 for entry in missing_values: 

2122 entry.extend(user_value) 

2123 # We have something else: apply it to all entries 

2124 else: 

2125 for entry in missing_values: 

2126 entry.extend([str(user_missing_values)]) 

2127 

2128 # Process the filling_values ............................... 

2129 # Rename the input for convenience 

2130 user_filling_values = filling_values 

2131 if user_filling_values is None: 

2132 user_filling_values = [] 

2133 # Define the default 

2134 filling_values = [None] * nbcols 

2135 # We have a dictionary : update each entry individually 

2136 if isinstance(user_filling_values, dict): 

2137 for (key, val) in user_filling_values.items(): 

2138 if _is_string_like(key): 

2139 try: 

2140 # Transform it into an integer 

2141 key = names.index(key) 

2142 except ValueError: 

2143 # We couldn't find it: the name must have been dropped, 

2144 continue 

2145 # Redefine the key if it's a column number and usecols is defined 

2146 if usecols: 

2147 try: 

2148 key = usecols.index(key) 

2149 except ValueError: 

2150 pass 

2151 # Add the value to the list 

2152 filling_values[key] = val 

2153 # We have a sequence : update on a one-to-one basis 

2154 elif isinstance(user_filling_values, (list, tuple)): 

2155 n = len(user_filling_values) 

2156 if (n <= nbcols): 

2157 filling_values[:n] = user_filling_values 

2158 else: 

2159 filling_values = user_filling_values[:nbcols] 

2160 # We have something else : use it for all entries 

2161 else: 

2162 filling_values = [user_filling_values] * nbcols 

2163 

2164 # Initialize the converters ................................ 

2165 if dtype is None: 

2166 # Note: we can't use a [...]*nbcols, as we would have 3 times the same 

2167 # ... converter, instead of 3 different converters. 

2168 converters = [StringConverter(None, missing_values=miss, default=fill) 

2169 for (miss, fill) in zip(missing_values, filling_values)] 

2170 else: 

2171 dtype_flat = flatten_dtype(dtype, flatten_base=True) 

2172 # Initialize the converters 

2173 if len(dtype_flat) > 1: 

2174 # Flexible type : get a converter from each dtype 

2175 zipit = zip(dtype_flat, missing_values, filling_values) 

2176 converters = [StringConverter(dt, locked=True, 

2177 missing_values=miss, default=fill) 

2178 for (dt, miss, fill) in zipit] 

2179 else: 

2180 # Set to a default converter (but w/ different missing values) 

2181 zipit = zip(missing_values, filling_values) 

2182 converters = [StringConverter(dtype, locked=True, 

2183 missing_values=miss, default=fill) 

2184 for (miss, fill) in zipit] 

2185 # Update the converters to use the user-defined ones 

2186 uc_update = [] 

2187 for (j, conv) in user_converters.items(): 

2188 # If the converter is specified by column names, use the index instead 

2189 if _is_string_like(j): 

2190 try: 

2191 j = names.index(j) 

2192 i = j 

2193 except ValueError: 

2194 continue 

2195 elif usecols: 

2196 try: 

2197 i = usecols.index(j) 

2198 except ValueError: 

2199 # Unused converter specified 

2200 continue 

2201 else: 

2202 i = j 

2203 # Find the value to test - first_line is not filtered by usecols: 

2204 if len(first_line): 

2205 testing_value = first_values[j] 

2206 else: 

2207 testing_value = None 

2208 if conv is bytes: 

2209 user_conv = asbytes 

2210 elif byte_converters: 

2211 # converters may use decode to workaround numpy's old behaviour, 

2212 # so encode the string again before passing to the user converter 

2213 def tobytes_first(x, conv): 

2214 if type(x) is bytes: 

2215 return conv(x) 

2216 return conv(x.encode("latin1")) 

2217 user_conv = functools.partial(tobytes_first, conv=conv) 

2218 else: 

2219 user_conv = conv 

2220 converters[i].update(user_conv, locked=True, 

2221 testing_value=testing_value, 

2222 default=filling_values[i], 

2223 missing_values=missing_values[i],) 

2224 uc_update.append((i, user_conv)) 

2225 # Make sure we have the corrected keys in user_converters... 

2226 user_converters.update(uc_update) 

2227 

2228 # Fixme: possible error as following variable never used. 

2229 # miss_chars = [_.missing_values for _ in converters] 

2230 

2231 # Initialize the output lists ... 

2232 # ... rows 

2233 rows = [] 

2234 append_to_rows = rows.append 

2235 # ... masks 

2236 if usemask: 

2237 masks = [] 

2238 append_to_masks = masks.append 

2239 # ... invalid 

2240 invalid = [] 

2241 append_to_invalid = invalid.append 

2242 

2243 # Parse each line 

2244 for (i, line) in enumerate(itertools.chain([first_line, ], fhd)): 

2245 values = split_line(line) 

2246 nbvalues = len(values) 

2247 # Skip an empty line 

2248 if nbvalues == 0: 

2249 continue 

2250 if usecols: 

2251 # Select only the columns we need 

2252 try: 

2253 values = [values[_] for _ in usecols] 

2254 except IndexError: 

2255 append_to_invalid((i + skip_header + 1, nbvalues)) 

2256 continue 

2257 elif nbvalues != nbcols: 

2258 append_to_invalid((i + skip_header + 1, nbvalues)) 

2259 continue 

2260 # Store the values 

2261 append_to_rows(tuple(values)) 

2262 if usemask: 

2263 append_to_masks(tuple([v.strip() in m 

2264 for (v, m) in zip(values, 

2265 missing_values)])) 

2266 if len(rows) == max_rows: 

2267 break 

2268 

2269 # Upgrade the converters (if needed) 

2270 if dtype is None: 

2271 for (i, converter) in enumerate(converters): 

2272 current_column = [itemgetter(i)(_m) for _m in rows] 

2273 try: 

2274 converter.iterupgrade(current_column) 

2275 except ConverterLockError: 

2276 errmsg = "Converter #%i is locked and cannot be upgraded: " % i 

2277 current_column = map(itemgetter(i), rows) 

2278 for (j, value) in enumerate(current_column): 

2279 try: 

2280 converter.upgrade(value) 

2281 except (ConverterError, ValueError): 

2282 errmsg += "(occurred line #%i for value '%s')" 

2283 errmsg %= (j + 1 + skip_header, value) 

2284 raise ConverterError(errmsg) 

2285 

2286 # Check that we don't have invalid values 

2287 nbinvalid = len(invalid) 

2288 if nbinvalid > 0: 

2289 nbrows = len(rows) + nbinvalid - skip_footer 

2290 # Construct the error message 

2291 template = " Line #%%i (got %%i columns instead of %i)" % nbcols 

2292 if skip_footer > 0: 

2293 nbinvalid_skipped = len([_ for _ in invalid 

2294 if _[0] > nbrows + skip_header]) 

2295 invalid = invalid[:nbinvalid - nbinvalid_skipped] 

2296 skip_footer -= nbinvalid_skipped 

2297# 

2298# nbrows -= skip_footer 

2299# errmsg = [template % (i, nb) 

2300# for (i, nb) in invalid if i < nbrows] 

2301# else: 

2302 errmsg = [template % (i, nb) 

2303 for (i, nb) in invalid] 

2304 if len(errmsg): 

2305 errmsg.insert(0, "Some errors were detected !") 

2306 errmsg = "\n".join(errmsg) 

2307 # Raise an exception ? 

2308 if invalid_raise: 

2309 raise ValueError(errmsg) 

2310 # Issue a warning ? 

2311 else: 

2312 warnings.warn(errmsg, ConversionWarning, stacklevel=2) 

2313 

2314 # Strip the last skip_footer data 

2315 if skip_footer > 0: 

2316 rows = rows[:-skip_footer] 

2317 if usemask: 

2318 masks = masks[:-skip_footer] 

2319 

2320 # Convert each value according to the converter: 

2321 # We want to modify the list in place to avoid creating a new one... 

2322 if loose: 

2323 rows = list( 

2324 zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)] 

2325 for (i, conv) in enumerate(converters)])) 

2326 else: 

2327 rows = list( 

2328 zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)] 

2329 for (i, conv) in enumerate(converters)])) 

2330 

2331 # Reset the dtype 

2332 data = rows 

2333 if dtype is None: 

2334 # Get the dtypes from the types of the converters 

2335 column_types = [conv.type for conv in converters] 

2336 # Find the columns with strings... 

2337 strcolidx = [i for (i, v) in enumerate(column_types) 

2338 if v == np.unicode_] 

2339 

2340 if byte_converters and strcolidx: 

2341 # convert strings back to bytes for backward compatibility 

2342 warnings.warn( 

2343 "Reading unicode strings without specifying the encoding " 

2344 "argument is deprecated. Set the encoding, use None for the " 

2345 "system default.", 

2346 np.VisibleDeprecationWarning, stacklevel=2) 

2347 def encode_unicode_cols(row_tup): 

2348 row = list(row_tup) 

2349 for i in strcolidx: 

2350 row[i] = row[i].encode('latin1') 

2351 return tuple(row) 

2352 

2353 try: 

2354 data = [encode_unicode_cols(r) for r in data] 

2355 except UnicodeEncodeError: 

2356 pass 

2357 else: 

2358 for i in strcolidx: 

2359 column_types[i] = np.bytes_ 

2360 

2361 # Update string types to be the right length 

2362 sized_column_types = column_types[:] 

2363 for i, col_type in enumerate(column_types): 

2364 if np.issubdtype(col_type, np.character): 

2365 n_chars = max(len(row[i]) for row in data) 

2366 sized_column_types[i] = (col_type, n_chars) 

2367 

2368 if names is None: 

2369 # If the dtype is uniform (before sizing strings) 

2370 base = { 

2371 c_type 

2372 for c, c_type in zip(converters, column_types) 

2373 if c._checked} 

2374 if len(base) == 1: 

2375 uniform_type, = base 

2376 (ddtype, mdtype) = (uniform_type, bool) 

2377 else: 

2378 ddtype = [(defaultfmt % i, dt) 

2379 for (i, dt) in enumerate(sized_column_types)] 

2380 if usemask: 

2381 mdtype = [(defaultfmt % i, bool) 

2382 for (i, dt) in enumerate(sized_column_types)] 

2383 else: 

2384 ddtype = list(zip(names, sized_column_types)) 

2385 mdtype = list(zip(names, [bool] * len(sized_column_types))) 

2386 output = np.array(data, dtype=ddtype) 

2387 if usemask: 

2388 outputmask = np.array(masks, dtype=mdtype) 

2389 else: 

2390 # Overwrite the initial dtype names if needed 

2391 if names and dtype.names is not None: 

2392 dtype.names = names 

2393 # Case 1. We have a structured type 

2394 if len(dtype_flat) > 1: 

2395 # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])] 

2396 # First, create the array using a flattened dtype: 

2397 # [('a', int), ('b1', int), ('b2', float)] 

2398 # Then, view the array using the specified dtype. 

2399 if 'O' in (_.char for _ in dtype_flat): 

2400 if has_nested_fields(dtype): 

2401 raise NotImplementedError( 

2402 "Nested fields involving objects are not supported...") 

2403 else: 

2404 output = np.array(data, dtype=dtype) 

2405 else: 

2406 rows = np.array(data, dtype=[('', _) for _ in dtype_flat]) 

2407 output = rows.view(dtype) 

2408 # Now, process the rowmasks the same way 

2409 if usemask: 

2410 rowmasks = np.array( 

2411 masks, dtype=np.dtype([('', bool) for t in dtype_flat])) 

2412 # Construct the new dtype 

2413 mdtype = make_mask_descr(dtype) 

2414 outputmask = rowmasks.view(mdtype) 

2415 # Case #2. We have a basic dtype 

2416 else: 

2417 # We used some user-defined converters 

2418 if user_converters: 

2419 ishomogeneous = True 

2420 descr = [] 

2421 for i, ttype in enumerate([conv.type for conv in converters]): 

2422 # Keep the dtype of the current converter 

2423 if i in user_converters: 

2424 ishomogeneous &= (ttype == dtype.type) 

2425 if np.issubdtype(ttype, np.character): 

2426 ttype = (ttype, max(len(row[i]) for row in data)) 

2427 descr.append(('', ttype)) 

2428 else: 

2429 descr.append(('', dtype)) 

2430 # So we changed the dtype ? 

2431 if not ishomogeneous: 

2432 # We have more than one field 

2433 if len(descr) > 1: 

2434 dtype = np.dtype(descr) 

2435 # We have only one field: drop the name if not needed. 

2436 else: 

2437 dtype = np.dtype(ttype) 

2438 # 

2439 output = np.array(data, dtype) 

2440 if usemask: 

2441 if dtype.names is not None: 

2442 mdtype = [(_, bool) for _ in dtype.names] 

2443 else: 

2444 mdtype = bool 

2445 outputmask = np.array(masks, dtype=mdtype) 

2446 # Try to take care of the missing data we missed 

2447 names = output.dtype.names 

2448 if usemask and names: 

2449 for (name, conv) in zip(names, converters): 

2450 missing_values = [conv(_) for _ in conv.missing_values 

2451 if _ != ''] 

2452 for mval in missing_values: 

2453 outputmask[name] |= (output[name] == mval) 

2454 # Construct the final array 

2455 if usemask: 

2456 output = output.view(MaskedArray) 

2457 output._mask = outputmask 

2458 

2459 output = _ensure_ndmin_ndarray(output, ndmin=ndmin) 

2460 

2461 if unpack: 

2462 if names is None: 

2463 return output.T 

2464 elif len(names) == 1: 

2465 # squeeze single-name dtypes too 

2466 return output[names[0]] 

2467 else: 

2468 # For structured arrays with multiple fields, 

2469 # return an array for each field. 

2470 return [output[field] for field in names] 

2471 return output 

2472 

2473 

2474_genfromtxt_with_like = array_function_dispatch( 

2475 _genfromtxt_dispatcher, use_like=True 

2476)(genfromtxt) 

2477 

2478 

2479def recfromtxt(fname, **kwargs): 

2480 """ 

2481 Load ASCII data from a file and return it in a record array. 

2482 

2483 If ``usemask=False`` a standard `recarray` is returned, 

2484 if ``usemask=True`` a MaskedRecords array is returned. 

2485 

2486 Parameters 

2487 ---------- 

2488 fname, kwargs : For a description of input parameters, see `genfromtxt`. 

2489 

2490 See Also 

2491 -------- 

2492 numpy.genfromtxt : generic function 

2493 

2494 Notes 

2495 ----- 

2496 By default, `dtype` is None, which means that the data-type of the output 

2497 array will be determined from the data. 

2498 

2499 """ 

2500 kwargs.setdefault("dtype", None) 

2501 usemask = kwargs.get('usemask', False) 

2502 output = genfromtxt(fname, **kwargs) 

2503 if usemask: 

2504 from numpy.ma.mrecords import MaskedRecords 

2505 output = output.view(MaskedRecords) 

2506 else: 

2507 output = output.view(np.recarray) 

2508 return output 

2509 

2510 

2511def recfromcsv(fname, **kwargs): 

2512 """ 

2513 Load ASCII data stored in a comma-separated file. 

2514 

2515 The returned array is a record array (if ``usemask=False``, see 

2516 `recarray`) or a masked record array (if ``usemask=True``, 

2517 see `ma.mrecords.MaskedRecords`). 

2518 

2519 Parameters 

2520 ---------- 

2521 fname, kwargs : For a description of input parameters, see `genfromtxt`. 

2522 

2523 See Also 

2524 -------- 

2525 numpy.genfromtxt : generic function to load ASCII data. 

2526 

2527 Notes 

2528 ----- 

2529 By default, `dtype` is None, which means that the data-type of the output 

2530 array will be determined from the data. 

2531 

2532 """ 

2533 # Set default kwargs for genfromtxt as relevant to csv import. 

2534 kwargs.setdefault("case_sensitive", "lower") 

2535 kwargs.setdefault("names", True) 

2536 kwargs.setdefault("delimiter", ",") 

2537 kwargs.setdefault("dtype", None) 

2538 output = genfromtxt(fname, **kwargs) 

2539 

2540 usemask = kwargs.get("usemask", False) 

2541 if usemask: 

2542 from numpy.ma.mrecords import MaskedRecords 

2543 output = output.view(MaskedRecords) 

2544 else: 

2545 output = output.view(np.recarray) 

2546 return output