Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/numpy/lib/npyio.py: 9%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import os
2import re
3import functools
4import itertools
5import warnings
6import weakref
7import contextlib
8import operator
9from operator import itemgetter, index as opindex, methodcaller
10from collections.abc import Mapping
12import numpy as np
13from . import format
14from ._datasource import DataSource
15from numpy.core import overrides
16from numpy.core.multiarray import packbits, unpackbits
17from numpy.core._multiarray_umath import _load_from_filelike
18from numpy.core.overrides import set_array_function_like_doc, set_module
19from ._iotools import (
20 LineSplitter, NameValidator, StringConverter, ConverterError,
21 ConverterLockError, ConversionWarning, _is_string_like,
22 has_nested_fields, flatten_dtype, easy_dtype, _decode_line
23 )
25from numpy.compat import (
26 asbytes, asstr, asunicode, os_fspath, os_PathLike,
27 pickle
28 )
31__all__ = [
32 'savetxt', 'loadtxt', 'genfromtxt',
33 'recfromtxt', 'recfromcsv', 'load', 'save', 'savez',
34 'savez_compressed', 'packbits', 'unpackbits', 'fromregex', 'DataSource'
35 ]
38array_function_dispatch = functools.partial(
39 overrides.array_function_dispatch, module='numpy')
42class BagObj:
43 """
44 BagObj(obj)
46 Convert attribute look-ups to getitems on the object passed in.
48 Parameters
49 ----------
50 obj : class instance
51 Object on which attribute look-up is performed.
53 Examples
54 --------
55 >>> from numpy.lib.npyio import BagObj as BO
56 >>> class BagDemo:
57 ... def __getitem__(self, key): # An instance of BagObj(BagDemo)
58 ... # will call this method when any
59 ... # attribute look-up is required
60 ... result = "Doesn't matter what you want, "
61 ... return result + "you're gonna get this"
62 ...
63 >>> demo_obj = BagDemo()
64 >>> bagobj = BO(demo_obj)
65 >>> bagobj.hello_there
66 "Doesn't matter what you want, you're gonna get this"
67 >>> bagobj.I_can_be_anything
68 "Doesn't matter what you want, you're gonna get this"
70 """
72 def __init__(self, obj):
73 # Use weakref to make NpzFile objects collectable by refcount
74 self._obj = weakref.proxy(obj)
76 def __getattribute__(self, key):
77 try:
78 return object.__getattribute__(self, '_obj')[key]
79 except KeyError:
80 raise AttributeError(key) from None
82 def __dir__(self):
83 """
84 Enables dir(bagobj) to list the files in an NpzFile.
86 This also enables tab-completion in an interpreter or IPython.
87 """
88 return list(object.__getattribute__(self, '_obj').keys())
91def zipfile_factory(file, *args, **kwargs):
92 """
93 Create a ZipFile.
95 Allows for Zip64, and the `file` argument can accept file, str, or
96 pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
97 constructor.
98 """
99 if not hasattr(file, 'read'):
100 file = os_fspath(file)
101 import zipfile
102 kwargs['allowZip64'] = True
103 return zipfile.ZipFile(file, *args, **kwargs)
106class NpzFile(Mapping):
107 """
108 NpzFile(fid)
110 A dictionary-like object with lazy-loading of files in the zipped
111 archive provided on construction.
113 `NpzFile` is used to load files in the NumPy ``.npz`` data archive
114 format. It assumes that files in the archive have a ``.npy`` extension,
115 other files are ignored.
117 The arrays and file strings are lazily loaded on either
118 getitem access using ``obj['key']`` or attribute lookup using
119 ``obj.f.key``. A list of all files (without ``.npy`` extensions) can
120 be obtained with ``obj.files`` and the ZipFile object itself using
121 ``obj.zip``.
123 Attributes
124 ----------
125 files : list of str
126 List of all files in the archive with a ``.npy`` extension.
127 zip : ZipFile instance
128 The ZipFile object initialized with the zipped archive.
129 f : BagObj instance
130 An object on which attribute can be performed as an alternative
131 to getitem access on the `NpzFile` instance itself.
132 allow_pickle : bool, optional
133 Allow loading pickled data. Default: False
135 .. versionchanged:: 1.16.3
136 Made default False in response to CVE-2019-6446.
138 pickle_kwargs : dict, optional
139 Additional keyword arguments to pass on to pickle.load.
140 These are only useful when loading object arrays saved on
141 Python 2 when using Python 3.
142 max_header_size : int, optional
143 Maximum allowed size of the header. Large headers may not be safe
144 to load securely and thus require explicitly passing a larger value.
145 See :py:func:`ast.literal_eval()` for details.
146 This option is ignored when `allow_pickle` is passed. In that case
147 the file is by definition trusted and the limit is unnecessary.
149 Parameters
150 ----------
151 fid : file or str
152 The zipped archive to open. This is either a file-like object
153 or a string containing the path to the archive.
154 own_fid : bool, optional
155 Whether NpzFile should close the file handle.
156 Requires that `fid` is a file-like object.
158 Examples
159 --------
160 >>> from tempfile import TemporaryFile
161 >>> outfile = TemporaryFile()
162 >>> x = np.arange(10)
163 >>> y = np.sin(x)
164 >>> np.savez(outfile, x=x, y=y)
165 >>> _ = outfile.seek(0)
167 >>> npz = np.load(outfile)
168 >>> isinstance(npz, np.lib.npyio.NpzFile)
169 True
170 >>> npz
171 NpzFile 'object' with keys x, y
172 >>> sorted(npz.files)
173 ['x', 'y']
174 >>> npz['x'] # getitem access
175 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
176 >>> npz.f.x # attribute lookup
177 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
179 """
180 # Make __exit__ safe if zipfile_factory raises an exception
181 zip = None
182 fid = None
183 _MAX_REPR_ARRAY_COUNT = 5
185 def __init__(self, fid, own_fid=False, allow_pickle=False,
186 pickle_kwargs=None, *,
187 max_header_size=format._MAX_HEADER_SIZE):
188 # Import is postponed to here since zipfile depends on gzip, an
189 # optional component of the so-called standard library.
190 _zip = zipfile_factory(fid)
191 self._files = _zip.namelist()
192 self.files = []
193 self.allow_pickle = allow_pickle
194 self.max_header_size = max_header_size
195 self.pickle_kwargs = pickle_kwargs
196 for x in self._files:
197 if x.endswith('.npy'):
198 self.files.append(x[:-4])
199 else:
200 self.files.append(x)
201 self.zip = _zip
202 self.f = BagObj(self)
203 if own_fid:
204 self.fid = fid
206 def __enter__(self):
207 return self
209 def __exit__(self, exc_type, exc_value, traceback):
210 self.close()
212 def close(self):
213 """
214 Close the file.
216 """
217 if self.zip is not None:
218 self.zip.close()
219 self.zip = None
220 if self.fid is not None:
221 self.fid.close()
222 self.fid = None
223 self.f = None # break reference cycle
225 def __del__(self):
226 self.close()
228 # Implement the Mapping ABC
229 def __iter__(self):
230 return iter(self.files)
232 def __len__(self):
233 return len(self.files)
235 def __getitem__(self, key):
236 # FIXME: This seems like it will copy strings around
237 # more than is strictly necessary. The zipfile
238 # will read the string and then
239 # the format.read_array will copy the string
240 # to another place in memory.
241 # It would be better if the zipfile could read
242 # (or at least uncompress) the data
243 # directly into the array memory.
244 member = False
245 if key in self._files:
246 member = True
247 elif key in self.files:
248 member = True
249 key += '.npy'
250 if member:
251 bytes = self.zip.open(key)
252 magic = bytes.read(len(format.MAGIC_PREFIX))
253 bytes.close()
254 if magic == format.MAGIC_PREFIX:
255 bytes = self.zip.open(key)
256 return format.read_array(bytes,
257 allow_pickle=self.allow_pickle,
258 pickle_kwargs=self.pickle_kwargs,
259 max_header_size=self.max_header_size)
260 else:
261 return self.zip.read(key)
262 else:
263 raise KeyError(f"{key} is not a file in the archive")
265 def __contains__(self, key):
266 return (key in self._files or key in self.files)
268 def __repr__(self):
269 # Get filename or default to `object`
270 if isinstance(self.fid, str):
271 filename = self.fid
272 else:
273 filename = getattr(self.fid, "name", "object")
275 # Get the name of arrays
276 array_names = ', '.join(self.files[:self._MAX_REPR_ARRAY_COUNT])
277 if len(self.files) > self._MAX_REPR_ARRAY_COUNT:
278 array_names += "..."
279 return f"NpzFile {filename!r} with keys: {array_names}"
282@set_module('numpy')
283def load(file, mmap_mode=None, allow_pickle=False, fix_imports=True,
284 encoding='ASCII', *, max_header_size=format._MAX_HEADER_SIZE):
285 """
286 Load arrays or pickled objects from ``.npy``, ``.npz`` or pickled files.
288 .. warning:: Loading files that contain object arrays uses the ``pickle``
289 module, which is not secure against erroneous or maliciously
290 constructed data. Consider passing ``allow_pickle=False`` to
291 load data that is known not to contain object arrays for the
292 safer handling of untrusted sources.
294 Parameters
295 ----------
296 file : file-like object, string, or pathlib.Path
297 The file to read. File-like objects must support the
298 ``seek()`` and ``read()`` methods and must always
299 be opened in binary mode. Pickled files require that the
300 file-like object support the ``readline()`` method as well.
301 mmap_mode : {None, 'r+', 'r', 'w+', 'c'}, optional
302 If not None, then memory-map the file, using the given mode (see
303 `numpy.memmap` for a detailed description of the modes). A
304 memory-mapped array is kept on disk. However, it can be accessed
305 and sliced like any ndarray. Memory mapping is especially useful
306 for accessing small fragments of large files without reading the
307 entire file into memory.
308 allow_pickle : bool, optional
309 Allow loading pickled object arrays stored in npy files. Reasons for
310 disallowing pickles include security, as loading pickled data can
311 execute arbitrary code. If pickles are disallowed, loading object
312 arrays will fail. Default: False
314 .. versionchanged:: 1.16.3
315 Made default False in response to CVE-2019-6446.
317 fix_imports : bool, optional
318 Only useful when loading Python 2 generated pickled files on Python 3,
319 which includes npy/npz files containing object arrays. If `fix_imports`
320 is True, pickle will try to map the old Python 2 names to the new names
321 used in Python 3.
322 encoding : str, optional
323 What encoding to use when reading Python 2 strings. Only useful when
324 loading Python 2 generated pickled files in Python 3, which includes
325 npy/npz files containing object arrays. Values other than 'latin1',
326 'ASCII', and 'bytes' are not allowed, as they can corrupt numerical
327 data. Default: 'ASCII'
328 max_header_size : int, optional
329 Maximum allowed size of the header. Large headers may not be safe
330 to load securely and thus require explicitly passing a larger value.
331 See :py:func:`ast.literal_eval()` for details.
332 This option is ignored when `allow_pickle` is passed. In that case
333 the file is by definition trusted and the limit is unnecessary.
335 Returns
336 -------
337 result : array, tuple, dict, etc.
338 Data stored in the file. For ``.npz`` files, the returned instance
339 of NpzFile class must be closed to avoid leaking file descriptors.
341 Raises
342 ------
343 OSError
344 If the input file does not exist or cannot be read.
345 UnpicklingError
346 If ``allow_pickle=True``, but the file cannot be loaded as a pickle.
347 ValueError
348 The file contains an object array, but ``allow_pickle=False`` given.
349 EOFError
350 When calling ``np.load`` multiple times on the same file handle,
351 if all data has already been read
353 See Also
354 --------
355 save, savez, savez_compressed, loadtxt
356 memmap : Create a memory-map to an array stored in a file on disk.
357 lib.format.open_memmap : Create or load a memory-mapped ``.npy`` file.
359 Notes
360 -----
361 - If the file contains pickle data, then whatever object is stored
362 in the pickle is returned.
363 - If the file is a ``.npy`` file, then a single array is returned.
364 - If the file is a ``.npz`` file, then a dictionary-like object is
365 returned, containing ``{filename: array}`` key-value pairs, one for
366 each file in the archive.
367 - If the file is a ``.npz`` file, the returned value supports the
368 context manager protocol in a similar fashion to the open function::
370 with load('foo.npz') as data:
371 a = data['a']
373 The underlying file descriptor is closed when exiting the 'with'
374 block.
376 Examples
377 --------
378 Store data to disk, and load it again:
380 >>> np.save('/tmp/123', np.array([[1, 2, 3], [4, 5, 6]]))
381 >>> np.load('/tmp/123.npy')
382 array([[1, 2, 3],
383 [4, 5, 6]])
385 Store compressed data to disk, and load it again:
387 >>> a=np.array([[1, 2, 3], [4, 5, 6]])
388 >>> b=np.array([1, 2])
389 >>> np.savez('/tmp/123.npz', a=a, b=b)
390 >>> data = np.load('/tmp/123.npz')
391 >>> data['a']
392 array([[1, 2, 3],
393 [4, 5, 6]])
394 >>> data['b']
395 array([1, 2])
396 >>> data.close()
398 Mem-map the stored array, and then access the second row
399 directly from disk:
401 >>> X = np.load('/tmp/123.npy', mmap_mode='r')
402 >>> X[1, :]
403 memmap([4, 5, 6])
405 """
406 if encoding not in ('ASCII', 'latin1', 'bytes'):
407 # The 'encoding' value for pickle also affects what encoding
408 # the serialized binary data of NumPy arrays is loaded
409 # in. Pickle does not pass on the encoding information to
410 # NumPy. The unpickling code in numpy.core.multiarray is
411 # written to assume that unicode data appearing where binary
412 # should be is in 'latin1'. 'bytes' is also safe, as is 'ASCII'.
413 #
414 # Other encoding values can corrupt binary data, and we
415 # purposefully disallow them. For the same reason, the errors=
416 # argument is not exposed, as values other than 'strict'
417 # result can similarly silently corrupt numerical data.
418 raise ValueError("encoding must be 'ASCII', 'latin1', or 'bytes'")
420 pickle_kwargs = dict(encoding=encoding, fix_imports=fix_imports)
422 with contextlib.ExitStack() as stack:
423 if hasattr(file, 'read'):
424 fid = file
425 own_fid = False
426 else:
427 fid = stack.enter_context(open(os_fspath(file), "rb"))
428 own_fid = True
430 # Code to distinguish from NumPy binary files and pickles.
431 _ZIP_PREFIX = b'PK\x03\x04'
432 _ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
433 N = len(format.MAGIC_PREFIX)
434 magic = fid.read(N)
435 if not magic:
436 raise EOFError("No data left in file")
437 # If the file size is less than N, we need to make sure not
438 # to seek past the beginning of the file
439 fid.seek(-min(N, len(magic)), 1) # back-up
440 if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
441 # zip-file (assume .npz)
442 # Potentially transfer file ownership to NpzFile
443 stack.pop_all()
444 ret = NpzFile(fid, own_fid=own_fid, allow_pickle=allow_pickle,
445 pickle_kwargs=pickle_kwargs,
446 max_header_size=max_header_size)
447 return ret
448 elif magic == format.MAGIC_PREFIX:
449 # .npy file
450 if mmap_mode:
451 if allow_pickle:
452 max_header_size = 2**64
453 return format.open_memmap(file, mode=mmap_mode,
454 max_header_size=max_header_size)
455 else:
456 return format.read_array(fid, allow_pickle=allow_pickle,
457 pickle_kwargs=pickle_kwargs,
458 max_header_size=max_header_size)
459 else:
460 # Try a pickle
461 if not allow_pickle:
462 raise ValueError("Cannot load file containing pickled data "
463 "when allow_pickle=False")
464 try:
465 return pickle.load(fid, **pickle_kwargs)
466 except Exception as e:
467 raise pickle.UnpicklingError(
468 f"Failed to interpret file {file!r} as a pickle") from e
471def _save_dispatcher(file, arr, allow_pickle=None, fix_imports=None):
472 return (arr,)
475@array_function_dispatch(_save_dispatcher)
476def save(file, arr, allow_pickle=True, fix_imports=True):
477 """
478 Save an array to a binary file in NumPy ``.npy`` format.
480 Parameters
481 ----------
482 file : file, str, or pathlib.Path
483 File or filename to which the data is saved. If file is a file-object,
484 then the filename is unchanged. If file is a string or Path, a ``.npy``
485 extension will be appended to the filename if it does not already
486 have one.
487 arr : array_like
488 Array data to be saved.
489 allow_pickle : bool, optional
490 Allow saving object arrays using Python pickles. Reasons for disallowing
491 pickles include security (loading pickled data can execute arbitrary
492 code) and portability (pickled objects may not be loadable on different
493 Python installations, for example if the stored objects require libraries
494 that are not available, and not all pickled data is compatible between
495 Python 2 and Python 3).
496 Default: True
497 fix_imports : bool, optional
498 Only useful in forcing objects in object arrays on Python 3 to be
499 pickled in a Python 2 compatible way. If `fix_imports` is True, pickle
500 will try to map the new Python 3 names to the old module names used in
501 Python 2, so that the pickle data stream is readable with Python 2.
503 See Also
504 --------
505 savez : Save several arrays into a ``.npz`` archive
506 savetxt, load
508 Notes
509 -----
510 For a description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
512 Any data saved to the file is appended to the end of the file.
514 Examples
515 --------
516 >>> from tempfile import TemporaryFile
517 >>> outfile = TemporaryFile()
519 >>> x = np.arange(10)
520 >>> np.save(outfile, x)
522 >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
523 >>> np.load(outfile)
524 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
527 >>> with open('test.npy', 'wb') as f:
528 ... np.save(f, np.array([1, 2]))
529 ... np.save(f, np.array([1, 3]))
530 >>> with open('test.npy', 'rb') as f:
531 ... a = np.load(f)
532 ... b = np.load(f)
533 >>> print(a, b)
534 # [1 2] [1 3]
535 """
536 if hasattr(file, 'write'):
537 file_ctx = contextlib.nullcontext(file)
538 else:
539 file = os_fspath(file)
540 if not file.endswith('.npy'):
541 file = file + '.npy'
542 file_ctx = open(file, "wb")
544 with file_ctx as fid:
545 arr = np.asanyarray(arr)
546 format.write_array(fid, arr, allow_pickle=allow_pickle,
547 pickle_kwargs=dict(fix_imports=fix_imports))
550def _savez_dispatcher(file, *args, **kwds):
551 yield from args
552 yield from kwds.values()
555@array_function_dispatch(_savez_dispatcher)
556def savez(file, *args, **kwds):
557 """Save several arrays into a single file in uncompressed ``.npz`` format.
559 Provide arrays as keyword arguments to store them under the
560 corresponding name in the output file: ``savez(fn, x=x, y=y)``.
562 If arrays are specified as positional arguments, i.e., ``savez(fn,
563 x, y)``, their names will be `arr_0`, `arr_1`, etc.
565 Parameters
566 ----------
567 file : str or file
568 Either the filename (string) or an open file (file-like object)
569 where the data will be saved. If file is a string or a Path, the
570 ``.npz`` extension will be appended to the filename if it is not
571 already there.
572 args : Arguments, optional
573 Arrays to save to the file. Please use keyword arguments (see
574 `kwds` below) to assign names to arrays. Arrays specified as
575 args will be named "arr_0", "arr_1", and so on.
576 kwds : Keyword arguments, optional
577 Arrays to save to the file. Each array will be saved to the
578 output file with its corresponding keyword name.
580 Returns
581 -------
582 None
584 See Also
585 --------
586 save : Save a single array to a binary file in NumPy format.
587 savetxt : Save an array to a file as plain text.
588 savez_compressed : Save several arrays into a compressed ``.npz`` archive
590 Notes
591 -----
592 The ``.npz`` file format is a zipped archive of files named after the
593 variables they contain. The archive is not compressed and each file
594 in the archive contains one variable in ``.npy`` format. For a
595 description of the ``.npy`` format, see :py:mod:`numpy.lib.format`.
597 When opening the saved ``.npz`` file with `load` a `NpzFile` object is
598 returned. This is a dictionary-like object which can be queried for
599 its list of arrays (with the ``.files`` attribute), and for the arrays
600 themselves.
602 Keys passed in `kwds` are used as filenames inside the ZIP archive.
603 Therefore, keys should be valid filenames; e.g., avoid keys that begin with
604 ``/`` or contain ``.``.
606 When naming variables with keyword arguments, it is not possible to name a
607 variable ``file``, as this would cause the ``file`` argument to be defined
608 twice in the call to ``savez``.
610 Examples
611 --------
612 >>> from tempfile import TemporaryFile
613 >>> outfile = TemporaryFile()
614 >>> x = np.arange(10)
615 >>> y = np.sin(x)
617 Using `savez` with \\*args, the arrays are saved with default names.
619 >>> np.savez(outfile, x, y)
620 >>> _ = outfile.seek(0) # Only needed here to simulate closing & reopening file
621 >>> npzfile = np.load(outfile)
622 >>> npzfile.files
623 ['arr_0', 'arr_1']
624 >>> npzfile['arr_0']
625 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
627 Using `savez` with \\**kwds, the arrays are saved with the keyword names.
629 >>> outfile = TemporaryFile()
630 >>> np.savez(outfile, x=x, y=y)
631 >>> _ = outfile.seek(0)
632 >>> npzfile = np.load(outfile)
633 >>> sorted(npzfile.files)
634 ['x', 'y']
635 >>> npzfile['x']
636 array([0, 1, 2, 3, 4, 5, 6, 7, 8, 9])
638 """
639 _savez(file, args, kwds, False)
642def _savez_compressed_dispatcher(file, *args, **kwds):
643 yield from args
644 yield from kwds.values()
647@array_function_dispatch(_savez_compressed_dispatcher)
648def savez_compressed(file, *args, **kwds):
649 """
650 Save several arrays into a single file in compressed ``.npz`` format.
652 Provide arrays as keyword arguments to store them under the
653 corresponding name in the output file: ``savez(fn, x=x, y=y)``.
655 If arrays are specified as positional arguments, i.e., ``savez(fn,
656 x, y)``, their names will be `arr_0`, `arr_1`, etc.
658 Parameters
659 ----------
660 file : str or file
661 Either the filename (string) or an open file (file-like object)
662 where the data will be saved. If file is a string or a Path, the
663 ``.npz`` extension will be appended to the filename if it is not
664 already there.
665 args : Arguments, optional
666 Arrays to save to the file. Please use keyword arguments (see
667 `kwds` below) to assign names to arrays. Arrays specified as
668 args will be named "arr_0", "arr_1", and so on.
669 kwds : Keyword arguments, optional
670 Arrays to save to the file. Each array will be saved to the
671 output file with its corresponding keyword name.
673 Returns
674 -------
675 None
677 See Also
678 --------
679 numpy.save : Save a single array to a binary file in NumPy format.
680 numpy.savetxt : Save an array to a file as plain text.
681 numpy.savez : Save several arrays into an uncompressed ``.npz`` file format
682 numpy.load : Load the files created by savez_compressed.
684 Notes
685 -----
686 The ``.npz`` file format is a zipped archive of files named after the
687 variables they contain. The archive is compressed with
688 ``zipfile.ZIP_DEFLATED`` and each file in the archive contains one variable
689 in ``.npy`` format. For a description of the ``.npy`` format, see
690 :py:mod:`numpy.lib.format`.
693 When opening the saved ``.npz`` file with `load` a `NpzFile` object is
694 returned. This is a dictionary-like object which can be queried for
695 its list of arrays (with the ``.files`` attribute), and for the arrays
696 themselves.
698 Examples
699 --------
700 >>> test_array = np.random.rand(3, 2)
701 >>> test_vector = np.random.rand(4)
702 >>> np.savez_compressed('/tmp/123', a=test_array, b=test_vector)
703 >>> loaded = np.load('/tmp/123.npz')
704 >>> print(np.array_equal(test_array, loaded['a']))
705 True
706 >>> print(np.array_equal(test_vector, loaded['b']))
707 True
709 """
710 _savez(file, args, kwds, True)
713def _savez(file, args, kwds, compress, allow_pickle=True, pickle_kwargs=None):
714 # Import is postponed to here since zipfile depends on gzip, an optional
715 # component of the so-called standard library.
716 import zipfile
718 if not hasattr(file, 'write'):
719 file = os_fspath(file)
720 if not file.endswith('.npz'):
721 file = file + '.npz'
723 namedict = kwds
724 for i, val in enumerate(args):
725 key = 'arr_%d' % i
726 if key in namedict.keys():
727 raise ValueError(
728 "Cannot use un-named variables and keyword %s" % key)
729 namedict[key] = val
731 if compress:
732 compression = zipfile.ZIP_DEFLATED
733 else:
734 compression = zipfile.ZIP_STORED
736 zipf = zipfile_factory(file, mode="w", compression=compression)
738 for key, val in namedict.items():
739 fname = key + '.npy'
740 val = np.asanyarray(val)
741 # always force zip64, gh-10776
742 with zipf.open(fname, 'w', force_zip64=True) as fid:
743 format.write_array(fid, val,
744 allow_pickle=allow_pickle,
745 pickle_kwargs=pickle_kwargs)
747 zipf.close()
750def _ensure_ndmin_ndarray_check_param(ndmin):
751 """Just checks if the param ndmin is supported on
752 _ensure_ndmin_ndarray. It is intended to be used as
753 verification before running anything expensive.
754 e.g. loadtxt, genfromtxt
755 """
756 # Check correctness of the values of `ndmin`
757 if ndmin not in [0, 1, 2]:
758 raise ValueError(f"Illegal value of ndmin keyword: {ndmin}")
760def _ensure_ndmin_ndarray(a, *, ndmin: int):
761 """This is a helper function of loadtxt and genfromtxt to ensure
762 proper minimum dimension as requested
764 ndim : int. Supported values 1, 2, 3
765 ^^ whenever this changes, keep in sync with
766 _ensure_ndmin_ndarray_check_param
767 """
768 # Verify that the array has at least dimensions `ndmin`.
769 # Tweak the size and shape of the arrays - remove extraneous dimensions
770 if a.ndim > ndmin:
771 a = np.squeeze(a)
772 # and ensure we have the minimum number of dimensions asked for
773 # - has to be in this order for the odd case ndmin=1, a.squeeze().ndim=0
774 if a.ndim < ndmin:
775 if ndmin == 1:
776 a = np.atleast_1d(a)
777 elif ndmin == 2:
778 a = np.atleast_2d(a).T
780 return a
783# amount of lines loadtxt reads in one chunk, can be overridden for testing
784_loadtxt_chunksize = 50000
787def _check_nonneg_int(value, name="argument"):
788 try:
789 operator.index(value)
790 except TypeError:
791 raise TypeError(f"{name} must be an integer") from None
792 if value < 0:
793 raise ValueError(f"{name} must be nonnegative")
796def _preprocess_comments(iterable, comments, encoding):
797 """
798 Generator that consumes a line iterated iterable and strips out the
799 multiple (or multi-character) comments from lines.
800 This is a pre-processing step to achieve feature parity with loadtxt
801 (we assume that this feature is a nieche feature).
802 """
803 for line in iterable:
804 if isinstance(line, bytes):
805 # Need to handle conversion here, or the splitting would fail
806 line = line.decode(encoding)
808 for c in comments:
809 line = line.split(c, 1)[0]
811 yield line
814# The number of rows we read in one go if confronted with a parametric dtype
815_loadtxt_chunksize = 50000
818def _read(fname, *, delimiter=',', comment='#', quote='"',
819 imaginary_unit='j', usecols=None, skiplines=0,
820 max_rows=None, converters=None, ndmin=None, unpack=False,
821 dtype=np.float64, encoding="bytes"):
822 r"""
823 Read a NumPy array from a text file.
825 Parameters
826 ----------
827 fname : str or file object
828 The filename or the file to be read.
829 delimiter : str, optional
830 Field delimiter of the fields in line of the file.
831 Default is a comma, ','. If None any sequence of whitespace is
832 considered a delimiter.
833 comment : str or sequence of str or None, optional
834 Character that begins a comment. All text from the comment
835 character to the end of the line is ignored.
836 Multiple comments or multiple-character comment strings are supported,
837 but may be slower and `quote` must be empty if used.
838 Use None to disable all use of comments.
839 quote : str or None, optional
840 Character that is used to quote string fields. Default is '"'
841 (a double quote). Use None to disable quote support.
842 imaginary_unit : str, optional
843 Character that represent the imaginay unit `sqrt(-1)`.
844 Default is 'j'.
845 usecols : array_like, optional
846 A one-dimensional array of integer column numbers. These are the
847 columns from the file to be included in the array. If this value
848 is not given, all the columns are used.
849 skiplines : int, optional
850 Number of lines to skip before interpreting the data in the file.
851 max_rows : int, optional
852 Maximum number of rows of data to read. Default is to read the
853 entire file.
854 converters : dict or callable, optional
855 A function to parse all columns strings into the desired value, or
856 a dictionary mapping column number to a parser function.
857 E.g. if column 0 is a date string: ``converters = {0: datestr2num}``.
858 Converters can also be used to provide a default value for missing
859 data, e.g. ``converters = lambda s: float(s.strip() or 0)`` will
860 convert empty fields to 0.
861 Default: None
862 ndmin : int, optional
863 Minimum dimension of the array returned.
864 Allowed values are 0, 1 or 2. Default is 0.
865 unpack : bool, optional
866 If True, the returned array is transposed, so that arguments may be
867 unpacked using ``x, y, z = read(...)``. When used with a structured
868 data-type, arrays are returned for each field. Default is False.
869 dtype : numpy data type
870 A NumPy dtype instance, can be a structured dtype to map to the
871 columns of the file.
872 encoding : str, optional
873 Encoding used to decode the inputfile. The special value 'bytes'
874 (the default) enables backwards-compatible behavior for `converters`,
875 ensuring that inputs to the converter functions are encoded
876 bytes objects. The special value 'bytes' has no additional effect if
877 ``converters=None``. If encoding is ``'bytes'`` or ``None``, the
878 default system encoding is used.
880 Returns
881 -------
882 ndarray
883 NumPy array.
885 Examples
886 --------
887 First we create a file for the example.
889 >>> s1 = '1.0,2.0,3.0\n4.0,5.0,6.0\n'
890 >>> with open('example1.csv', 'w') as f:
891 ... f.write(s1)
892 >>> a1 = read_from_filename('example1.csv')
893 >>> a1
894 array([[1., 2., 3.],
895 [4., 5., 6.]])
897 The second example has columns with different data types, so a
898 one-dimensional array with a structured data type is returned.
899 The tab character is used as the field delimiter.
901 >>> s2 = '1.0\t10\talpha\n2.3\t25\tbeta\n4.5\t16\tgamma\n'
902 >>> with open('example2.tsv', 'w') as f:
903 ... f.write(s2)
904 >>> a2 = read_from_filename('example2.tsv', delimiter='\t')
905 >>> a2
906 array([(1. , 10, b'alpha'), (2.3, 25, b'beta'), (4.5, 16, b'gamma')],
907 dtype=[('f0', '<f8'), ('f1', 'u1'), ('f2', 'S5')])
908 """
909 # Handle special 'bytes' keyword for encoding
910 byte_converters = False
911 if encoding == 'bytes':
912 encoding = None
913 byte_converters = True
915 if dtype is None:
916 raise TypeError("a dtype must be provided.")
917 dtype = np.dtype(dtype)
919 read_dtype_via_object_chunks = None
920 if dtype.kind in 'SUM' and (
921 dtype == "S0" or dtype == "U0" or dtype == "M8" or dtype == 'm8'):
922 # This is a legacy "flexible" dtype. We do not truly support
923 # parametric dtypes currently (no dtype discovery step in the core),
924 # but have to support these for backward compatibility.
925 read_dtype_via_object_chunks = dtype
926 dtype = np.dtype(object)
928 if usecols is not None:
929 # Allow usecols to be a single int or a sequence of ints, the C-code
930 # handles the rest
931 try:
932 usecols = list(usecols)
933 except TypeError:
934 usecols = [usecols]
936 _ensure_ndmin_ndarray_check_param(ndmin)
938 if comment is None:
939 comments = None
940 else:
941 # assume comments are a sequence of strings
942 if "" in comment:
943 raise ValueError(
944 "comments cannot be an empty string. Use comments=None to "
945 "disable comments."
946 )
947 comments = tuple(comment)
948 comment = None
949 if len(comments) == 0:
950 comments = None # No comments at all
951 elif len(comments) == 1:
952 # If there is only one comment, and that comment has one character,
953 # the normal parsing can deal with it just fine.
954 if isinstance(comments[0], str) and len(comments[0]) == 1:
955 comment = comments[0]
956 comments = None
957 else:
958 # Input validation if there are multiple comment characters
959 if delimiter in comments:
960 raise TypeError(
961 f"Comment characters '{comments}' cannot include the "
962 f"delimiter '{delimiter}'"
963 )
965 # comment is now either a 1 or 0 character string or a tuple:
966 if comments is not None:
967 # Note: An earlier version support two character comments (and could
968 # have been extended to multiple characters, we assume this is
969 # rare enough to not optimize for.
970 if quote is not None:
971 raise ValueError(
972 "when multiple comments or a multi-character comment is "
973 "given, quotes are not supported. In this case quotechar "
974 "must be set to None.")
976 if len(imaginary_unit) != 1:
977 raise ValueError('len(imaginary_unit) must be 1.')
979 _check_nonneg_int(skiplines)
980 if max_rows is not None:
981 _check_nonneg_int(max_rows)
982 else:
983 # Passing -1 to the C code means "read the entire file".
984 max_rows = -1
986 fh_closing_ctx = contextlib.nullcontext()
987 filelike = False
988 try:
989 if isinstance(fname, os.PathLike):
990 fname = os.fspath(fname)
991 if isinstance(fname, str):
992 fh = np.lib._datasource.open(fname, 'rt', encoding=encoding)
993 if encoding is None:
994 encoding = getattr(fh, 'encoding', 'latin1')
996 fh_closing_ctx = contextlib.closing(fh)
997 data = fh
998 filelike = True
999 else:
1000 if encoding is None:
1001 encoding = getattr(fname, 'encoding', 'latin1')
1002 data = iter(fname)
1003 except TypeError as e:
1004 raise ValueError(
1005 f"fname must be a string, filehandle, list of strings,\n"
1006 f"or generator. Got {type(fname)} instead.") from e
1008 with fh_closing_ctx:
1009 if comments is not None:
1010 if filelike:
1011 data = iter(data)
1012 filelike = False
1013 data = _preprocess_comments(data, comments, encoding)
1015 if read_dtype_via_object_chunks is None:
1016 arr = _load_from_filelike(
1017 data, delimiter=delimiter, comment=comment, quote=quote,
1018 imaginary_unit=imaginary_unit,
1019 usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1020 converters=converters, dtype=dtype,
1021 encoding=encoding, filelike=filelike,
1022 byte_converters=byte_converters)
1024 else:
1025 # This branch reads the file into chunks of object arrays and then
1026 # casts them to the desired actual dtype. This ensures correct
1027 # string-length and datetime-unit discovery (like `arr.astype()`).
1028 # Due to chunking, certain error reports are less clear, currently.
1029 if filelike:
1030 data = iter(data) # cannot chunk when reading from file
1032 c_byte_converters = False
1033 if read_dtype_via_object_chunks == "S":
1034 c_byte_converters = True # Use latin1 rather than ascii
1036 chunks = []
1037 while max_rows != 0:
1038 if max_rows < 0:
1039 chunk_size = _loadtxt_chunksize
1040 else:
1041 chunk_size = min(_loadtxt_chunksize, max_rows)
1043 next_arr = _load_from_filelike(
1044 data, delimiter=delimiter, comment=comment, quote=quote,
1045 imaginary_unit=imaginary_unit,
1046 usecols=usecols, skiplines=skiplines, max_rows=max_rows,
1047 converters=converters, dtype=dtype,
1048 encoding=encoding, filelike=filelike,
1049 byte_converters=byte_converters,
1050 c_byte_converters=c_byte_converters)
1051 # Cast here already. We hope that this is better even for
1052 # large files because the storage is more compact. It could
1053 # be adapted (in principle the concatenate could cast).
1054 chunks.append(next_arr.astype(read_dtype_via_object_chunks))
1056 skiprows = 0 # Only have to skip for first chunk
1057 if max_rows >= 0:
1058 max_rows -= chunk_size
1059 if len(next_arr) < chunk_size:
1060 # There was less data than requested, so we are done.
1061 break
1063 # Need at least one chunk, but if empty, the last one may have
1064 # the wrong shape.
1065 if len(chunks) > 1 and len(chunks[-1]) == 0:
1066 del chunks[-1]
1067 if len(chunks) == 1:
1068 arr = chunks[0]
1069 else:
1070 arr = np.concatenate(chunks, axis=0)
1072 # NOTE: ndmin works as advertised for structured dtypes, but normally
1073 # these would return a 1D result plus the structured dimension,
1074 # so ndmin=2 adds a third dimension even when no squeezing occurs.
1075 # A `squeeze=False` could be a better solution (pandas uses squeeze).
1076 arr = _ensure_ndmin_ndarray(arr, ndmin=ndmin)
1078 if arr.shape:
1079 if arr.shape[0] == 0:
1080 warnings.warn(
1081 f'loadtxt: input contained no data: "{fname}"',
1082 category=UserWarning,
1083 stacklevel=3
1084 )
1086 if unpack:
1087 # Unpack structured dtypes if requested:
1088 dt = arr.dtype
1089 if dt.names is not None:
1090 # For structured arrays, return an array for each field.
1091 return [arr[field] for field in dt.names]
1092 else:
1093 return arr.T
1094 else:
1095 return arr
1098@set_array_function_like_doc
1099@set_module('numpy')
1100def loadtxt(fname, dtype=float, comments='#', delimiter=None,
1101 converters=None, skiprows=0, usecols=None, unpack=False,
1102 ndmin=0, encoding='bytes', max_rows=None, *, quotechar=None,
1103 like=None):
1104 r"""
1105 Load data from a text file.
1107 Parameters
1108 ----------
1109 fname : file, str, pathlib.Path, list of str, generator
1110 File, filename, list, or generator to read. If the filename
1111 extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1112 that generators must return bytes or strings. The strings
1113 in a list or produced by a generator are treated as lines.
1114 dtype : data-type, optional
1115 Data-type of the resulting array; default: float. If this is a
1116 structured data-type, the resulting array will be 1-dimensional, and
1117 each row will be interpreted as an element of the array. In this
1118 case, the number of columns used must match the number of fields in
1119 the data-type.
1120 comments : str or sequence of str or None, optional
1121 The characters or list of characters used to indicate the start of a
1122 comment. None implies no comments. For backwards compatibility, byte
1123 strings will be decoded as 'latin1'. The default is '#'.
1124 delimiter : str, optional
1125 The character used to separate the values. For backwards compatibility,
1126 byte strings will be decoded as 'latin1'. The default is whitespace.
1128 .. versionchanged:: 1.23.0
1129 Only single character delimiters are supported. Newline characters
1130 cannot be used as the delimiter.
1132 converters : dict or callable, optional
1133 Converter functions to customize value parsing. If `converters` is
1134 callable, the function is applied to all columns, else it must be a
1135 dict that maps column number to a parser function.
1136 See examples for further details.
1137 Default: None.
1139 .. versionchanged:: 1.23.0
1140 The ability to pass a single callable to be applied to all columns
1141 was added.
1143 skiprows : int, optional
1144 Skip the first `skiprows` lines, including comments; default: 0.
1145 usecols : int or sequence, optional
1146 Which columns to read, with 0 being the first. For example,
1147 ``usecols = (1,4,5)`` will extract the 2nd, 5th and 6th columns.
1148 The default, None, results in all columns being read.
1150 .. versionchanged:: 1.11.0
1151 When a single column has to be read it is possible to use
1152 an integer instead of a tuple. E.g ``usecols = 3`` reads the
1153 fourth column the same way as ``usecols = (3,)`` would.
1154 unpack : bool, optional
1155 If True, the returned array is transposed, so that arguments may be
1156 unpacked using ``x, y, z = loadtxt(...)``. When used with a
1157 structured data-type, arrays are returned for each field.
1158 Default is False.
1159 ndmin : int, optional
1160 The returned array will have at least `ndmin` dimensions.
1161 Otherwise mono-dimensional axes will be squeezed.
1162 Legal values: 0 (default), 1 or 2.
1164 .. versionadded:: 1.6.0
1165 encoding : str, optional
1166 Encoding used to decode the inputfile. Does not apply to input streams.
1167 The special value 'bytes' enables backward compatibility workarounds
1168 that ensures you receive byte arrays as results if possible and passes
1169 'latin1' encoded strings to converters. Override this value to receive
1170 unicode arrays and pass strings as input to converters. If set to None
1171 the system default is used. The default value is 'bytes'.
1173 .. versionadded:: 1.14.0
1174 max_rows : int, optional
1175 Read `max_rows` rows of content after `skiprows` lines. The default is
1176 to read all the rows. Note that empty rows containing no data such as
1177 empty lines and comment lines are not counted towards `max_rows`,
1178 while such lines are counted in `skiprows`.
1180 .. versionadded:: 1.16.0
1182 .. versionchanged:: 1.23.0
1183 Lines containing no data, including comment lines (e.g., lines
1184 starting with '#' or as specified via `comments`) are not counted
1185 towards `max_rows`.
1186 quotechar : unicode character or None, optional
1187 The character used to denote the start and end of a quoted item.
1188 Occurrences of the delimiter or comment characters are ignored within
1189 a quoted item. The default value is ``quotechar=None``, which means
1190 quoting support is disabled.
1192 If two consecutive instances of `quotechar` are found within a quoted
1193 field, the first is treated as an escape character. See examples.
1195 .. versionadded:: 1.23.0
1196 ${ARRAY_FUNCTION_LIKE}
1198 .. versionadded:: 1.20.0
1200 Returns
1201 -------
1202 out : ndarray
1203 Data read from the text file.
1205 See Also
1206 --------
1207 load, fromstring, fromregex
1208 genfromtxt : Load data with missing values handled as specified.
1209 scipy.io.loadmat : reads MATLAB data files
1211 Notes
1212 -----
1213 This function aims to be a fast reader for simply formatted files. The
1214 `genfromtxt` function provides more sophisticated handling of, e.g.,
1215 lines with missing values.
1217 Each row in the input text file must have the same number of values to be
1218 able to read all values. If all rows do not have same number of values, a
1219 subset of up to n columns (where n is the least number of values present
1220 in all rows) can be read by specifying the columns via `usecols`.
1222 .. versionadded:: 1.10.0
1224 The strings produced by the Python float.hex method can be used as
1225 input for floats.
1227 Examples
1228 --------
1229 >>> from io import StringIO # StringIO behaves like a file object
1230 >>> c = StringIO("0 1\n2 3")
1231 >>> np.loadtxt(c)
1232 array([[0., 1.],
1233 [2., 3.]])
1235 >>> d = StringIO("M 21 72\nF 35 58")
1236 >>> np.loadtxt(d, dtype={'names': ('gender', 'age', 'weight'),
1237 ... 'formats': ('S1', 'i4', 'f4')})
1238 array([(b'M', 21, 72.), (b'F', 35, 58.)],
1239 dtype=[('gender', 'S1'), ('age', '<i4'), ('weight', '<f4')])
1241 >>> c = StringIO("1,0,2\n3,0,4")
1242 >>> x, y = np.loadtxt(c, delimiter=',', usecols=(0, 2), unpack=True)
1243 >>> x
1244 array([1., 3.])
1245 >>> y
1246 array([2., 4.])
1248 The `converters` argument is used to specify functions to preprocess the
1249 text prior to parsing. `converters` can be a dictionary that maps
1250 preprocessing functions to each column:
1252 >>> s = StringIO("1.618, 2.296\n3.141, 4.669\n")
1253 >>> conv = {
1254 ... 0: lambda x: np.floor(float(x)), # conversion fn for column 0
1255 ... 1: lambda x: np.ceil(float(x)), # conversion fn for column 1
1256 ... }
1257 >>> np.loadtxt(s, delimiter=",", converters=conv)
1258 array([[1., 3.],
1259 [3., 5.]])
1261 `converters` can be a callable instead of a dictionary, in which case it
1262 is applied to all columns:
1264 >>> s = StringIO("0xDE 0xAD\n0xC0 0xDE")
1265 >>> import functools
1266 >>> conv = functools.partial(int, base=16)
1267 >>> np.loadtxt(s, converters=conv)
1268 array([[222., 173.],
1269 [192., 222.]])
1271 This example shows how `converters` can be used to convert a field
1272 with a trailing minus sign into a negative number.
1274 >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1275 >>> def conv(fld):
1276 ... return -float(fld[:-1]) if fld.endswith(b'-') else float(fld)
1277 ...
1278 >>> np.loadtxt(s, converters=conv)
1279 array([[ 10.01, -31.25],
1280 [ 19.22, 64.31],
1281 [-17.57, 63.94]])
1283 Using a callable as the converter can be particularly useful for handling
1284 values with different formatting, e.g. floats with underscores:
1286 >>> s = StringIO("1 2.7 100_000")
1287 >>> np.loadtxt(s, converters=float)
1288 array([1.e+00, 2.7e+00, 1.e+05])
1290 This idea can be extended to automatically handle values specified in
1291 many different formats:
1293 >>> def conv(val):
1294 ... try:
1295 ... return float(val)
1296 ... except ValueError:
1297 ... return float.fromhex(val)
1298 >>> s = StringIO("1, 2.5, 3_000, 0b4, 0x1.4000000000000p+2")
1299 >>> np.loadtxt(s, delimiter=",", converters=conv, encoding=None)
1300 array([1.0e+00, 2.5e+00, 3.0e+03, 1.8e+02, 5.0e+00])
1302 Note that with the default ``encoding="bytes"``, the inputs to the
1303 converter function are latin-1 encoded byte strings. To deactivate the
1304 implicit encoding prior to conversion, use ``encoding=None``
1306 >>> s = StringIO('10.01 31.25-\n19.22 64.31\n17.57- 63.94')
1307 >>> conv = lambda x: -float(x[:-1]) if x.endswith('-') else float(x)
1308 >>> np.loadtxt(s, converters=conv, encoding=None)
1309 array([[ 10.01, -31.25],
1310 [ 19.22, 64.31],
1311 [-17.57, 63.94]])
1313 Support for quoted fields is enabled with the `quotechar` parameter.
1314 Comment and delimiter characters are ignored when they appear within a
1315 quoted item delineated by `quotechar`:
1317 >>> s = StringIO('"alpha, #42", 10.0\n"beta, #64", 2.0\n')
1318 >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1319 >>> np.loadtxt(s, dtype=dtype, delimiter=",", quotechar='"')
1320 array([('alpha, #42', 10.), ('beta, #64', 2.)],
1321 dtype=[('label', '<U12'), ('value', '<f8')])
1323 Quoted fields can be separated by multiple whitespace characters:
1325 >>> s = StringIO('"alpha, #42" 10.0\n"beta, #64" 2.0\n')
1326 >>> dtype = np.dtype([("label", "U12"), ("value", float)])
1327 >>> np.loadtxt(s, dtype=dtype, delimiter=None, quotechar='"')
1328 array([('alpha, #42', 10.), ('beta, #64', 2.)],
1329 dtype=[('label', '<U12'), ('value', '<f8')])
1331 Two consecutive quote characters within a quoted field are treated as a
1332 single escaped character:
1334 >>> s = StringIO('"Hello, my name is ""Monty""!"')
1335 >>> np.loadtxt(s, dtype="U", delimiter=",", quotechar='"')
1336 array('Hello, my name is "Monty"!', dtype='<U26')
1338 Read subset of columns when all rows do not contain equal number of values:
1340 >>> d = StringIO("1 2\n2 4\n3 9 12\n4 16 20")
1341 >>> np.loadtxt(d, usecols=(0, 1))
1342 array([[ 1., 2.],
1343 [ 2., 4.],
1344 [ 3., 9.],
1345 [ 4., 16.]])
1347 """
1349 if like is not None:
1350 return _loadtxt_with_like(
1351 like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1352 converters=converters, skiprows=skiprows, usecols=usecols,
1353 unpack=unpack, ndmin=ndmin, encoding=encoding,
1354 max_rows=max_rows
1355 )
1357 if isinstance(delimiter, bytes):
1358 delimiter.decode("latin1")
1360 if dtype is None:
1361 dtype = np.float64
1363 comment = comments
1364 # Control character type conversions for Py3 convenience
1365 if comment is not None:
1366 if isinstance(comment, (str, bytes)):
1367 comment = [comment]
1368 comment = [
1369 x.decode('latin1') if isinstance(x, bytes) else x for x in comment]
1370 if isinstance(delimiter, bytes):
1371 delimiter = delimiter.decode('latin1')
1373 arr = _read(fname, dtype=dtype, comment=comment, delimiter=delimiter,
1374 converters=converters, skiplines=skiprows, usecols=usecols,
1375 unpack=unpack, ndmin=ndmin, encoding=encoding,
1376 max_rows=max_rows, quote=quotechar)
1378 return arr
1381_loadtxt_with_like = array_function_dispatch()(loadtxt)
1384def _savetxt_dispatcher(fname, X, fmt=None, delimiter=None, newline=None,
1385 header=None, footer=None, comments=None,
1386 encoding=None):
1387 return (X,)
1390@array_function_dispatch(_savetxt_dispatcher)
1391def savetxt(fname, X, fmt='%.18e', delimiter=' ', newline='\n', header='',
1392 footer='', comments='# ', encoding=None):
1393 """
1394 Save an array to a text file.
1396 Parameters
1397 ----------
1398 fname : filename or file handle
1399 If the filename ends in ``.gz``, the file is automatically saved in
1400 compressed gzip format. `loadtxt` understands gzipped files
1401 transparently.
1402 X : 1D or 2D array_like
1403 Data to be saved to a text file.
1404 fmt : str or sequence of strs, optional
1405 A single format (%10.5f), a sequence of formats, or a
1406 multi-format string, e.g. 'Iteration %d -- %10.5f', in which
1407 case `delimiter` is ignored. For complex `X`, the legal options
1408 for `fmt` are:
1410 * a single specifier, `fmt='%.4e'`, resulting in numbers formatted
1411 like `' (%s+%sj)' % (fmt, fmt)`
1412 * a full string specifying every real and imaginary part, e.g.
1413 `' %.4e %+.4ej %.4e %+.4ej %.4e %+.4ej'` for 3 columns
1414 * a list of specifiers, one per column - in this case, the real
1415 and imaginary part must have separate specifiers,
1416 e.g. `['%.3e + %.3ej', '(%.15e%+.15ej)']` for 2 columns
1417 delimiter : str, optional
1418 String or character separating columns.
1419 newline : str, optional
1420 String or character separating lines.
1422 .. versionadded:: 1.5.0
1423 header : str, optional
1424 String that will be written at the beginning of the file.
1426 .. versionadded:: 1.7.0
1427 footer : str, optional
1428 String that will be written at the end of the file.
1430 .. versionadded:: 1.7.0
1431 comments : str, optional
1432 String that will be prepended to the ``header`` and ``footer`` strings,
1433 to mark them as comments. Default: '# ', as expected by e.g.
1434 ``numpy.loadtxt``.
1436 .. versionadded:: 1.7.0
1437 encoding : {None, str}, optional
1438 Encoding used to encode the outputfile. Does not apply to output
1439 streams. If the encoding is something other than 'bytes' or 'latin1'
1440 you will not be able to load the file in NumPy versions < 1.14. Default
1441 is 'latin1'.
1443 .. versionadded:: 1.14.0
1446 See Also
1447 --------
1448 save : Save an array to a binary file in NumPy ``.npy`` format
1449 savez : Save several arrays into an uncompressed ``.npz`` archive
1450 savez_compressed : Save several arrays into a compressed ``.npz`` archive
1452 Notes
1453 -----
1454 Further explanation of the `fmt` parameter
1455 (``%[flag]width[.precision]specifier``):
1457 flags:
1458 ``-`` : left justify
1460 ``+`` : Forces to precede result with + or -.
1462 ``0`` : Left pad the number with zeros instead of space (see width).
1464 width:
1465 Minimum number of characters to be printed. The value is not truncated
1466 if it has more characters.
1468 precision:
1469 - For integer specifiers (eg. ``d,i,o,x``), the minimum number of
1470 digits.
1471 - For ``e, E`` and ``f`` specifiers, the number of digits to print
1472 after the decimal point.
1473 - For ``g`` and ``G``, the maximum number of significant digits.
1474 - For ``s``, the maximum number of characters.
1476 specifiers:
1477 ``c`` : character
1479 ``d`` or ``i`` : signed decimal integer
1481 ``e`` or ``E`` : scientific notation with ``e`` or ``E``.
1483 ``f`` : decimal floating point
1485 ``g,G`` : use the shorter of ``e,E`` or ``f``
1487 ``o`` : signed octal
1489 ``s`` : string of characters
1491 ``u`` : unsigned decimal integer
1493 ``x,X`` : unsigned hexadecimal integer
1495 This explanation of ``fmt`` is not complete, for an exhaustive
1496 specification see [1]_.
1498 References
1499 ----------
1500 .. [1] `Format Specification Mini-Language
1501 <https://docs.python.org/library/string.html#format-specification-mini-language>`_,
1502 Python Documentation.
1504 Examples
1505 --------
1506 >>> x = y = z = np.arange(0.0,5.0,1.0)
1507 >>> np.savetxt('test.out', x, delimiter=',') # X is an array
1508 >>> np.savetxt('test.out', (x,y,z)) # x,y,z equal sized 1D arrays
1509 >>> np.savetxt('test.out', x, fmt='%1.4e') # use exponential notation
1511 """
1513 # Py3 conversions first
1514 if isinstance(fmt, bytes):
1515 fmt = asstr(fmt)
1516 delimiter = asstr(delimiter)
1518 class WriteWrap:
1519 """Convert to bytes on bytestream inputs.
1521 """
1522 def __init__(self, fh, encoding):
1523 self.fh = fh
1524 self.encoding = encoding
1525 self.do_write = self.first_write
1527 def close(self):
1528 self.fh.close()
1530 def write(self, v):
1531 self.do_write(v)
1533 def write_bytes(self, v):
1534 if isinstance(v, bytes):
1535 self.fh.write(v)
1536 else:
1537 self.fh.write(v.encode(self.encoding))
1539 def write_normal(self, v):
1540 self.fh.write(asunicode(v))
1542 def first_write(self, v):
1543 try:
1544 self.write_normal(v)
1545 self.write = self.write_normal
1546 except TypeError:
1547 # input is probably a bytestream
1548 self.write_bytes(v)
1549 self.write = self.write_bytes
1551 own_fh = False
1552 if isinstance(fname, os_PathLike):
1553 fname = os_fspath(fname)
1554 if _is_string_like(fname):
1555 # datasource doesn't support creating a new file ...
1556 open(fname, 'wt').close()
1557 fh = np.lib._datasource.open(fname, 'wt', encoding=encoding)
1558 own_fh = True
1559 elif hasattr(fname, 'write'):
1560 # wrap to handle byte output streams
1561 fh = WriteWrap(fname, encoding or 'latin1')
1562 else:
1563 raise ValueError('fname must be a string or file handle')
1565 try:
1566 X = np.asarray(X)
1568 # Handle 1-dimensional arrays
1569 if X.ndim == 0 or X.ndim > 2:
1570 raise ValueError(
1571 "Expected 1D or 2D array, got %dD array instead" % X.ndim)
1572 elif X.ndim == 1:
1573 # Common case -- 1d array of numbers
1574 if X.dtype.names is None:
1575 X = np.atleast_2d(X).T
1576 ncol = 1
1578 # Complex dtype -- each field indicates a separate column
1579 else:
1580 ncol = len(X.dtype.names)
1581 else:
1582 ncol = X.shape[1]
1584 iscomplex_X = np.iscomplexobj(X)
1585 # `fmt` can be a string with multiple insertion points or a
1586 # list of formats. E.g. '%10.5f\t%10d' or ('%10.5f', '$10d')
1587 if type(fmt) in (list, tuple):
1588 if len(fmt) != ncol:
1589 raise AttributeError('fmt has wrong shape. %s' % str(fmt))
1590 format = asstr(delimiter).join(map(asstr, fmt))
1591 elif isinstance(fmt, str):
1592 n_fmt_chars = fmt.count('%')
1593 error = ValueError('fmt has wrong number of %% formats: %s' % fmt)
1594 if n_fmt_chars == 1:
1595 if iscomplex_X:
1596 fmt = [' (%s+%sj)' % (fmt, fmt), ] * ncol
1597 else:
1598 fmt = [fmt, ] * ncol
1599 format = delimiter.join(fmt)
1600 elif iscomplex_X and n_fmt_chars != (2 * ncol):
1601 raise error
1602 elif ((not iscomplex_X) and n_fmt_chars != ncol):
1603 raise error
1604 else:
1605 format = fmt
1606 else:
1607 raise ValueError('invalid fmt: %r' % (fmt,))
1609 if len(header) > 0:
1610 header = header.replace('\n', '\n' + comments)
1611 fh.write(comments + header + newline)
1612 if iscomplex_X:
1613 for row in X:
1614 row2 = []
1615 for number in row:
1616 row2.append(number.real)
1617 row2.append(number.imag)
1618 s = format % tuple(row2) + newline
1619 fh.write(s.replace('+-', '-'))
1620 else:
1621 for row in X:
1622 try:
1623 v = format % tuple(row) + newline
1624 except TypeError as e:
1625 raise TypeError("Mismatch between array dtype ('%s') and "
1626 "format specifier ('%s')"
1627 % (str(X.dtype), format)) from e
1628 fh.write(v)
1630 if len(footer) > 0:
1631 footer = footer.replace('\n', '\n' + comments)
1632 fh.write(comments + footer + newline)
1633 finally:
1634 if own_fh:
1635 fh.close()
1638@set_module('numpy')
1639def fromregex(file, regexp, dtype, encoding=None):
1640 r"""
1641 Construct an array from a text file, using regular expression parsing.
1643 The returned array is always a structured array, and is constructed from
1644 all matches of the regular expression in the file. Groups in the regular
1645 expression are converted to fields of the structured array.
1647 Parameters
1648 ----------
1649 file : path or file
1650 Filename or file object to read.
1652 .. versionchanged:: 1.22.0
1653 Now accepts `os.PathLike` implementations.
1654 regexp : str or regexp
1655 Regular expression used to parse the file.
1656 Groups in the regular expression correspond to fields in the dtype.
1657 dtype : dtype or list of dtypes
1658 Dtype for the structured array; must be a structured datatype.
1659 encoding : str, optional
1660 Encoding used to decode the inputfile. Does not apply to input streams.
1662 .. versionadded:: 1.14.0
1664 Returns
1665 -------
1666 output : ndarray
1667 The output array, containing the part of the content of `file` that
1668 was matched by `regexp`. `output` is always a structured array.
1670 Raises
1671 ------
1672 TypeError
1673 When `dtype` is not a valid dtype for a structured array.
1675 See Also
1676 --------
1677 fromstring, loadtxt
1679 Notes
1680 -----
1681 Dtypes for structured arrays can be specified in several forms, but all
1682 forms specify at least the data type and field name. For details see
1683 `basics.rec`.
1685 Examples
1686 --------
1687 >>> from io import StringIO
1688 >>> text = StringIO("1312 foo\n1534 bar\n444 qux")
1690 >>> regexp = r"(\d+)\s+(...)" # match [digits, whitespace, anything]
1691 >>> output = np.fromregex(text, regexp,
1692 ... [('num', np.int64), ('key', 'S3')])
1693 >>> output
1694 array([(1312, b'foo'), (1534, b'bar'), ( 444, b'qux')],
1695 dtype=[('num', '<i8'), ('key', 'S3')])
1696 >>> output['num']
1697 array([1312, 1534, 444])
1699 """
1700 own_fh = False
1701 if not hasattr(file, "read"):
1702 file = os.fspath(file)
1703 file = np.lib._datasource.open(file, 'rt', encoding=encoding)
1704 own_fh = True
1706 try:
1707 if not isinstance(dtype, np.dtype):
1708 dtype = np.dtype(dtype)
1709 if dtype.names is None:
1710 raise TypeError('dtype must be a structured datatype.')
1712 content = file.read()
1713 if isinstance(content, bytes) and isinstance(regexp, str):
1714 regexp = asbytes(regexp)
1715 elif isinstance(content, str) and isinstance(regexp, bytes):
1716 regexp = asstr(regexp)
1718 if not hasattr(regexp, 'match'):
1719 regexp = re.compile(regexp)
1720 seq = regexp.findall(content)
1721 if seq and not isinstance(seq[0], tuple):
1722 # Only one group is in the regexp.
1723 # Create the new array as a single data-type and then
1724 # re-interpret as a single-field structured array.
1725 newdtype = np.dtype(dtype[dtype.names[0]])
1726 output = np.array(seq, dtype=newdtype)
1727 output.dtype = dtype
1728 else:
1729 output = np.array(seq, dtype=dtype)
1731 return output
1732 finally:
1733 if own_fh:
1734 file.close()
1737#####--------------------------------------------------------------------------
1738#---- --- ASCII functions ---
1739#####--------------------------------------------------------------------------
1742@set_array_function_like_doc
1743@set_module('numpy')
1744def genfromtxt(fname, dtype=float, comments='#', delimiter=None,
1745 skip_header=0, skip_footer=0, converters=None,
1746 missing_values=None, filling_values=None, usecols=None,
1747 names=None, excludelist=None,
1748 deletechars=''.join(sorted(NameValidator.defaultdeletechars)),
1749 replace_space='_', autostrip=False, case_sensitive=True,
1750 defaultfmt="f%i", unpack=None, usemask=False, loose=True,
1751 invalid_raise=True, max_rows=None, encoding='bytes',
1752 *, ndmin=0, like=None):
1753 """
1754 Load data from a text file, with missing values handled as specified.
1756 Each line past the first `skip_header` lines is split at the `delimiter`
1757 character, and characters following the `comments` character are discarded.
1759 Parameters
1760 ----------
1761 fname : file, str, pathlib.Path, list of str, generator
1762 File, filename, list, or generator to read. If the filename
1763 extension is ``.gz`` or ``.bz2``, the file is first decompressed. Note
1764 that generators must return bytes or strings. The strings
1765 in a list or produced by a generator are treated as lines.
1766 dtype : dtype, optional
1767 Data type of the resulting array.
1768 If None, the dtypes will be determined by the contents of each
1769 column, individually.
1770 comments : str, optional
1771 The character used to indicate the start of a comment.
1772 All the characters occurring on a line after a comment are discarded.
1773 delimiter : str, int, or sequence, optional
1774 The string used to separate values. By default, any consecutive
1775 whitespaces act as delimiter. An integer or sequence of integers
1776 can also be provided as width(s) of each field.
1777 skiprows : int, optional
1778 `skiprows` was removed in numpy 1.10. Please use `skip_header` instead.
1779 skip_header : int, optional
1780 The number of lines to skip at the beginning of the file.
1781 skip_footer : int, optional
1782 The number of lines to skip at the end of the file.
1783 converters : variable, optional
1784 The set of functions that convert the data of a column to a value.
1785 The converters can also be used to provide a default value
1786 for missing data: ``converters = {3: lambda s: float(s or 0)}``.
1787 missing : variable, optional
1788 `missing` was removed in numpy 1.10. Please use `missing_values`
1789 instead.
1790 missing_values : variable, optional
1791 The set of strings corresponding to missing data.
1792 filling_values : variable, optional
1793 The set of values to be used as default when the data are missing.
1794 usecols : sequence, optional
1795 Which columns to read, with 0 being the first. For example,
1796 ``usecols = (1, 4, 5)`` will extract the 2nd, 5th and 6th columns.
1797 names : {None, True, str, sequence}, optional
1798 If `names` is True, the field names are read from the first line after
1799 the first `skip_header` lines. This line can optionally be preceded
1800 by a comment delimiter. If `names` is a sequence or a single-string of
1801 comma-separated names, the names will be used to define the field names
1802 in a structured dtype. If `names` is None, the names of the dtype
1803 fields will be used, if any.
1804 excludelist : sequence, optional
1805 A list of names to exclude. This list is appended to the default list
1806 ['return','file','print']. Excluded names are appended with an
1807 underscore: for example, `file` would become `file_`.
1808 deletechars : str, optional
1809 A string combining invalid characters that must be deleted from the
1810 names.
1811 defaultfmt : str, optional
1812 A format used to define default field names, such as "f%i" or "f_%02i".
1813 autostrip : bool, optional
1814 Whether to automatically strip white spaces from the variables.
1815 replace_space : char, optional
1816 Character(s) used in replacement of white spaces in the variable
1817 names. By default, use a '_'.
1818 case_sensitive : {True, False, 'upper', 'lower'}, optional
1819 If True, field names are case sensitive.
1820 If False or 'upper', field names are converted to upper case.
1821 If 'lower', field names are converted to lower case.
1822 unpack : bool, optional
1823 If True, the returned array is transposed, so that arguments may be
1824 unpacked using ``x, y, z = genfromtxt(...)``. When used with a
1825 structured data-type, arrays are returned for each field.
1826 Default is False.
1827 usemask : bool, optional
1828 If True, return a masked array.
1829 If False, return a regular array.
1830 loose : bool, optional
1831 If True, do not raise errors for invalid values.
1832 invalid_raise : bool, optional
1833 If True, an exception is raised if an inconsistency is detected in the
1834 number of columns.
1835 If False, a warning is emitted and the offending lines are skipped.
1836 max_rows : int, optional
1837 The maximum number of rows to read. Must not be used with skip_footer
1838 at the same time. If given, the value must be at least 1. Default is
1839 to read the entire file.
1841 .. versionadded:: 1.10.0
1842 encoding : str, optional
1843 Encoding used to decode the inputfile. Does not apply when `fname` is
1844 a file object. The special value 'bytes' enables backward compatibility
1845 workarounds that ensure that you receive byte arrays when possible
1846 and passes latin1 encoded strings to converters. Override this value to
1847 receive unicode arrays and pass strings as input to converters. If set
1848 to None the system default is used. The default value is 'bytes'.
1850 .. versionadded:: 1.14.0
1851 ndmin : int, optional
1852 Same parameter as `loadtxt`
1854 .. versionadded:: 1.23.0
1855 ${ARRAY_FUNCTION_LIKE}
1857 .. versionadded:: 1.20.0
1859 Returns
1860 -------
1861 out : ndarray
1862 Data read from the text file. If `usemask` is True, this is a
1863 masked array.
1865 See Also
1866 --------
1867 numpy.loadtxt : equivalent function when no data is missing.
1869 Notes
1870 -----
1871 * When spaces are used as delimiters, or when no delimiter has been given
1872 as input, there should not be any missing data between two fields.
1873 * When the variables are named (either by a flexible dtype or with `names`),
1874 there must not be any header in the file (else a ValueError
1875 exception is raised).
1876 * Individual values are not stripped of spaces by default.
1877 When using a custom converter, make sure the function does remove spaces.
1879 References
1880 ----------
1881 .. [1] NumPy User Guide, section `I/O with NumPy
1882 <https://docs.scipy.org/doc/numpy/user/basics.io.genfromtxt.html>`_.
1884 Examples
1885 --------
1886 >>> from io import StringIO
1887 >>> import numpy as np
1889 Comma delimited file with mixed dtype
1891 >>> s = StringIO(u"1,1.3,abcde")
1892 >>> data = np.genfromtxt(s, dtype=[('myint','i8'),('myfloat','f8'),
1893 ... ('mystring','S5')], delimiter=",")
1894 >>> data
1895 array((1, 1.3, b'abcde'),
1896 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1898 Using dtype = None
1900 >>> _ = s.seek(0) # needed for StringIO example only
1901 >>> data = np.genfromtxt(s, dtype=None,
1902 ... names = ['myint','myfloat','mystring'], delimiter=",")
1903 >>> data
1904 array((1, 1.3, b'abcde'),
1905 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1907 Specifying dtype and names
1909 >>> _ = s.seek(0)
1910 >>> data = np.genfromtxt(s, dtype="i8,f8,S5",
1911 ... names=['myint','myfloat','mystring'], delimiter=",")
1912 >>> data
1913 array((1, 1.3, b'abcde'),
1914 dtype=[('myint', '<i8'), ('myfloat', '<f8'), ('mystring', 'S5')])
1916 An example with fixed-width columns
1918 >>> s = StringIO(u"11.3abcde")
1919 >>> data = np.genfromtxt(s, dtype=None, names=['intvar','fltvar','strvar'],
1920 ... delimiter=[1,3,5])
1921 >>> data
1922 array((1, 1.3, b'abcde'),
1923 dtype=[('intvar', '<i8'), ('fltvar', '<f8'), ('strvar', 'S5')])
1925 An example to show comments
1927 >>> f = StringIO('''
1928 ... text,# of chars
1929 ... hello world,11
1930 ... numpy,5''')
1931 >>> np.genfromtxt(f, dtype='S12,S12', delimiter=',')
1932 array([(b'text', b''), (b'hello world', b'11'), (b'numpy', b'5')],
1933 dtype=[('f0', 'S12'), ('f1', 'S12')])
1935 """
1937 if like is not None:
1938 return _genfromtxt_with_like(
1939 like, fname, dtype=dtype, comments=comments, delimiter=delimiter,
1940 skip_header=skip_header, skip_footer=skip_footer,
1941 converters=converters, missing_values=missing_values,
1942 filling_values=filling_values, usecols=usecols, names=names,
1943 excludelist=excludelist, deletechars=deletechars,
1944 replace_space=replace_space, autostrip=autostrip,
1945 case_sensitive=case_sensitive, defaultfmt=defaultfmt,
1946 unpack=unpack, usemask=usemask, loose=loose,
1947 invalid_raise=invalid_raise, max_rows=max_rows, encoding=encoding,
1948 ndmin=ndmin,
1949 )
1951 _ensure_ndmin_ndarray_check_param(ndmin)
1953 if max_rows is not None:
1954 if skip_footer:
1955 raise ValueError(
1956 "The keywords 'skip_footer' and 'max_rows' can not be "
1957 "specified at the same time.")
1958 if max_rows < 1:
1959 raise ValueError("'max_rows' must be at least 1.")
1961 if usemask:
1962 from numpy.ma import MaskedArray, make_mask_descr
1963 # Check the input dictionary of converters
1964 user_converters = converters or {}
1965 if not isinstance(user_converters, dict):
1966 raise TypeError(
1967 "The input argument 'converter' should be a valid dictionary "
1968 "(got '%s' instead)" % type(user_converters))
1970 if encoding == 'bytes':
1971 encoding = None
1972 byte_converters = True
1973 else:
1974 byte_converters = False
1976 # Initialize the filehandle, the LineSplitter and the NameValidator
1977 if isinstance(fname, os_PathLike):
1978 fname = os_fspath(fname)
1979 if isinstance(fname, str):
1980 fid = np.lib._datasource.open(fname, 'rt', encoding=encoding)
1981 fid_ctx = contextlib.closing(fid)
1982 else:
1983 fid = fname
1984 fid_ctx = contextlib.nullcontext(fid)
1985 try:
1986 fhd = iter(fid)
1987 except TypeError as e:
1988 raise TypeError(
1989 "fname must be a string, a filehandle, a sequence of strings,\n"
1990 f"or an iterator of strings. Got {type(fname)} instead."
1991 ) from e
1992 with fid_ctx:
1993 split_line = LineSplitter(delimiter=delimiter, comments=comments,
1994 autostrip=autostrip, encoding=encoding)
1995 validate_names = NameValidator(excludelist=excludelist,
1996 deletechars=deletechars,
1997 case_sensitive=case_sensitive,
1998 replace_space=replace_space)
2000 # Skip the first `skip_header` rows
2001 try:
2002 for i in range(skip_header):
2003 next(fhd)
2005 # Keep on until we find the first valid values
2006 first_values = None
2008 while not first_values:
2009 first_line = _decode_line(next(fhd), encoding)
2010 if (names is True) and (comments is not None):
2011 if comments in first_line:
2012 first_line = (
2013 ''.join(first_line.split(comments)[1:]))
2014 first_values = split_line(first_line)
2015 except StopIteration:
2016 # return an empty array if the datafile is empty
2017 first_line = ''
2018 first_values = []
2019 warnings.warn('genfromtxt: Empty input file: "%s"' % fname, stacklevel=2)
2021 # Should we take the first values as names ?
2022 if names is True:
2023 fval = first_values[0].strip()
2024 if comments is not None:
2025 if fval in comments:
2026 del first_values[0]
2028 # Check the columns to use: make sure `usecols` is a list
2029 if usecols is not None:
2030 try:
2031 usecols = [_.strip() for _ in usecols.split(",")]
2032 except AttributeError:
2033 try:
2034 usecols = list(usecols)
2035 except TypeError:
2036 usecols = [usecols, ]
2037 nbcols = len(usecols or first_values)
2039 # Check the names and overwrite the dtype.names if needed
2040 if names is True:
2041 names = validate_names([str(_.strip()) for _ in first_values])
2042 first_line = ''
2043 elif _is_string_like(names):
2044 names = validate_names([_.strip() for _ in names.split(',')])
2045 elif names:
2046 names = validate_names(names)
2047 # Get the dtype
2048 if dtype is not None:
2049 dtype = easy_dtype(dtype, defaultfmt=defaultfmt, names=names,
2050 excludelist=excludelist,
2051 deletechars=deletechars,
2052 case_sensitive=case_sensitive,
2053 replace_space=replace_space)
2054 # Make sure the names is a list (for 2.5)
2055 if names is not None:
2056 names = list(names)
2058 if usecols:
2059 for (i, current) in enumerate(usecols):
2060 # if usecols is a list of names, convert to a list of indices
2061 if _is_string_like(current):
2062 usecols[i] = names.index(current)
2063 elif current < 0:
2064 usecols[i] = current + len(first_values)
2065 # If the dtype is not None, make sure we update it
2066 if (dtype is not None) and (len(dtype) > nbcols):
2067 descr = dtype.descr
2068 dtype = np.dtype([descr[_] for _ in usecols])
2069 names = list(dtype.names)
2070 # If `names` is not None, update the names
2071 elif (names is not None) and (len(names) > nbcols):
2072 names = [names[_] for _ in usecols]
2073 elif (names is not None) and (dtype is not None):
2074 names = list(dtype.names)
2076 # Process the missing values ...............................
2077 # Rename missing_values for convenience
2078 user_missing_values = missing_values or ()
2079 if isinstance(user_missing_values, bytes):
2080 user_missing_values = user_missing_values.decode('latin1')
2082 # Define the list of missing_values (one column: one list)
2083 missing_values = [list(['']) for _ in range(nbcols)]
2085 # We have a dictionary: process it field by field
2086 if isinstance(user_missing_values, dict):
2087 # Loop on the items
2088 for (key, val) in user_missing_values.items():
2089 # Is the key a string ?
2090 if _is_string_like(key):
2091 try:
2092 # Transform it into an integer
2093 key = names.index(key)
2094 except ValueError:
2095 # We couldn't find it: the name must have been dropped
2096 continue
2097 # Redefine the key as needed if it's a column number
2098 if usecols:
2099 try:
2100 key = usecols.index(key)
2101 except ValueError:
2102 pass
2103 # Transform the value as a list of string
2104 if isinstance(val, (list, tuple)):
2105 val = [str(_) for _ in val]
2106 else:
2107 val = [str(val), ]
2108 # Add the value(s) to the current list of missing
2109 if key is None:
2110 # None acts as default
2111 for miss in missing_values:
2112 miss.extend(val)
2113 else:
2114 missing_values[key].extend(val)
2115 # We have a sequence : each item matches a column
2116 elif isinstance(user_missing_values, (list, tuple)):
2117 for (value, entry) in zip(user_missing_values, missing_values):
2118 value = str(value)
2119 if value not in entry:
2120 entry.append(value)
2121 # We have a string : apply it to all entries
2122 elif isinstance(user_missing_values, str):
2123 user_value = user_missing_values.split(",")
2124 for entry in missing_values:
2125 entry.extend(user_value)
2126 # We have something else: apply it to all entries
2127 else:
2128 for entry in missing_values:
2129 entry.extend([str(user_missing_values)])
2131 # Process the filling_values ...............................
2132 # Rename the input for convenience
2133 user_filling_values = filling_values
2134 if user_filling_values is None:
2135 user_filling_values = []
2136 # Define the default
2137 filling_values = [None] * nbcols
2138 # We have a dictionary : update each entry individually
2139 if isinstance(user_filling_values, dict):
2140 for (key, val) in user_filling_values.items():
2141 if _is_string_like(key):
2142 try:
2143 # Transform it into an integer
2144 key = names.index(key)
2145 except ValueError:
2146 # We couldn't find it: the name must have been dropped,
2147 continue
2148 # Redefine the key if it's a column number and usecols is defined
2149 if usecols:
2150 try:
2151 key = usecols.index(key)
2152 except ValueError:
2153 pass
2154 # Add the value to the list
2155 filling_values[key] = val
2156 # We have a sequence : update on a one-to-one basis
2157 elif isinstance(user_filling_values, (list, tuple)):
2158 n = len(user_filling_values)
2159 if (n <= nbcols):
2160 filling_values[:n] = user_filling_values
2161 else:
2162 filling_values = user_filling_values[:nbcols]
2163 # We have something else : use it for all entries
2164 else:
2165 filling_values = [user_filling_values] * nbcols
2167 # Initialize the converters ................................
2168 if dtype is None:
2169 # Note: we can't use a [...]*nbcols, as we would have 3 times the same
2170 # ... converter, instead of 3 different converters.
2171 converters = [StringConverter(None, missing_values=miss, default=fill)
2172 for (miss, fill) in zip(missing_values, filling_values)]
2173 else:
2174 dtype_flat = flatten_dtype(dtype, flatten_base=True)
2175 # Initialize the converters
2176 if len(dtype_flat) > 1:
2177 # Flexible type : get a converter from each dtype
2178 zipit = zip(dtype_flat, missing_values, filling_values)
2179 converters = [StringConverter(dt, locked=True,
2180 missing_values=miss, default=fill)
2181 for (dt, miss, fill) in zipit]
2182 else:
2183 # Set to a default converter (but w/ different missing values)
2184 zipit = zip(missing_values, filling_values)
2185 converters = [StringConverter(dtype, locked=True,
2186 missing_values=miss, default=fill)
2187 for (miss, fill) in zipit]
2188 # Update the converters to use the user-defined ones
2189 uc_update = []
2190 for (j, conv) in user_converters.items():
2191 # If the converter is specified by column names, use the index instead
2192 if _is_string_like(j):
2193 try:
2194 j = names.index(j)
2195 i = j
2196 except ValueError:
2197 continue
2198 elif usecols:
2199 try:
2200 i = usecols.index(j)
2201 except ValueError:
2202 # Unused converter specified
2203 continue
2204 else:
2205 i = j
2206 # Find the value to test - first_line is not filtered by usecols:
2207 if len(first_line):
2208 testing_value = first_values[j]
2209 else:
2210 testing_value = None
2211 if conv is bytes:
2212 user_conv = asbytes
2213 elif byte_converters:
2214 # converters may use decode to workaround numpy's old behaviour,
2215 # so encode the string again before passing to the user converter
2216 def tobytes_first(x, conv):
2217 if type(x) is bytes:
2218 return conv(x)
2219 return conv(x.encode("latin1"))
2220 user_conv = functools.partial(tobytes_first, conv=conv)
2221 else:
2222 user_conv = conv
2223 converters[i].update(user_conv, locked=True,
2224 testing_value=testing_value,
2225 default=filling_values[i],
2226 missing_values=missing_values[i],)
2227 uc_update.append((i, user_conv))
2228 # Make sure we have the corrected keys in user_converters...
2229 user_converters.update(uc_update)
2231 # Fixme: possible error as following variable never used.
2232 # miss_chars = [_.missing_values for _ in converters]
2234 # Initialize the output lists ...
2235 # ... rows
2236 rows = []
2237 append_to_rows = rows.append
2238 # ... masks
2239 if usemask:
2240 masks = []
2241 append_to_masks = masks.append
2242 # ... invalid
2243 invalid = []
2244 append_to_invalid = invalid.append
2246 # Parse each line
2247 for (i, line) in enumerate(itertools.chain([first_line, ], fhd)):
2248 values = split_line(line)
2249 nbvalues = len(values)
2250 # Skip an empty line
2251 if nbvalues == 0:
2252 continue
2253 if usecols:
2254 # Select only the columns we need
2255 try:
2256 values = [values[_] for _ in usecols]
2257 except IndexError:
2258 append_to_invalid((i + skip_header + 1, nbvalues))
2259 continue
2260 elif nbvalues != nbcols:
2261 append_to_invalid((i + skip_header + 1, nbvalues))
2262 continue
2263 # Store the values
2264 append_to_rows(tuple(values))
2265 if usemask:
2266 append_to_masks(tuple([v.strip() in m
2267 for (v, m) in zip(values,
2268 missing_values)]))
2269 if len(rows) == max_rows:
2270 break
2272 # Upgrade the converters (if needed)
2273 if dtype is None:
2274 for (i, converter) in enumerate(converters):
2275 current_column = [itemgetter(i)(_m) for _m in rows]
2276 try:
2277 converter.iterupgrade(current_column)
2278 except ConverterLockError:
2279 errmsg = "Converter #%i is locked and cannot be upgraded: " % i
2280 current_column = map(itemgetter(i), rows)
2281 for (j, value) in enumerate(current_column):
2282 try:
2283 converter.upgrade(value)
2284 except (ConverterError, ValueError):
2285 errmsg += "(occurred line #%i for value '%s')"
2286 errmsg %= (j + 1 + skip_header, value)
2287 raise ConverterError(errmsg)
2289 # Check that we don't have invalid values
2290 nbinvalid = len(invalid)
2291 if nbinvalid > 0:
2292 nbrows = len(rows) + nbinvalid - skip_footer
2293 # Construct the error message
2294 template = " Line #%%i (got %%i columns instead of %i)" % nbcols
2295 if skip_footer > 0:
2296 nbinvalid_skipped = len([_ for _ in invalid
2297 if _[0] > nbrows + skip_header])
2298 invalid = invalid[:nbinvalid - nbinvalid_skipped]
2299 skip_footer -= nbinvalid_skipped
2300#
2301# nbrows -= skip_footer
2302# errmsg = [template % (i, nb)
2303# for (i, nb) in invalid if i < nbrows]
2304# else:
2305 errmsg = [template % (i, nb)
2306 for (i, nb) in invalid]
2307 if len(errmsg):
2308 errmsg.insert(0, "Some errors were detected !")
2309 errmsg = "\n".join(errmsg)
2310 # Raise an exception ?
2311 if invalid_raise:
2312 raise ValueError(errmsg)
2313 # Issue a warning ?
2314 else:
2315 warnings.warn(errmsg, ConversionWarning, stacklevel=2)
2317 # Strip the last skip_footer data
2318 if skip_footer > 0:
2319 rows = rows[:-skip_footer]
2320 if usemask:
2321 masks = masks[:-skip_footer]
2323 # Convert each value according to the converter:
2324 # We want to modify the list in place to avoid creating a new one...
2325 if loose:
2326 rows = list(
2327 zip(*[[conv._loose_call(_r) for _r in map(itemgetter(i), rows)]
2328 for (i, conv) in enumerate(converters)]))
2329 else:
2330 rows = list(
2331 zip(*[[conv._strict_call(_r) for _r in map(itemgetter(i), rows)]
2332 for (i, conv) in enumerate(converters)]))
2334 # Reset the dtype
2335 data = rows
2336 if dtype is None:
2337 # Get the dtypes from the types of the converters
2338 column_types = [conv.type for conv in converters]
2339 # Find the columns with strings...
2340 strcolidx = [i for (i, v) in enumerate(column_types)
2341 if v == np.str_]
2343 if byte_converters and strcolidx:
2344 # convert strings back to bytes for backward compatibility
2345 warnings.warn(
2346 "Reading unicode strings without specifying the encoding "
2347 "argument is deprecated. Set the encoding, use None for the "
2348 "system default.",
2349 np.VisibleDeprecationWarning, stacklevel=2)
2350 def encode_unicode_cols(row_tup):
2351 row = list(row_tup)
2352 for i in strcolidx:
2353 row[i] = row[i].encode('latin1')
2354 return tuple(row)
2356 try:
2357 data = [encode_unicode_cols(r) for r in data]
2358 except UnicodeEncodeError:
2359 pass
2360 else:
2361 for i in strcolidx:
2362 column_types[i] = np.bytes_
2364 # Update string types to be the right length
2365 sized_column_types = column_types[:]
2366 for i, col_type in enumerate(column_types):
2367 if np.issubdtype(col_type, np.character):
2368 n_chars = max(len(row[i]) for row in data)
2369 sized_column_types[i] = (col_type, n_chars)
2371 if names is None:
2372 # If the dtype is uniform (before sizing strings)
2373 base = {
2374 c_type
2375 for c, c_type in zip(converters, column_types)
2376 if c._checked}
2377 if len(base) == 1:
2378 uniform_type, = base
2379 (ddtype, mdtype) = (uniform_type, bool)
2380 else:
2381 ddtype = [(defaultfmt % i, dt)
2382 for (i, dt) in enumerate(sized_column_types)]
2383 if usemask:
2384 mdtype = [(defaultfmt % i, bool)
2385 for (i, dt) in enumerate(sized_column_types)]
2386 else:
2387 ddtype = list(zip(names, sized_column_types))
2388 mdtype = list(zip(names, [bool] * len(sized_column_types)))
2389 output = np.array(data, dtype=ddtype)
2390 if usemask:
2391 outputmask = np.array(masks, dtype=mdtype)
2392 else:
2393 # Overwrite the initial dtype names if needed
2394 if names and dtype.names is not None:
2395 dtype.names = names
2396 # Case 1. We have a structured type
2397 if len(dtype_flat) > 1:
2398 # Nested dtype, eg [('a', int), ('b', [('b0', int), ('b1', 'f4')])]
2399 # First, create the array using a flattened dtype:
2400 # [('a', int), ('b1', int), ('b2', float)]
2401 # Then, view the array using the specified dtype.
2402 if 'O' in (_.char for _ in dtype_flat):
2403 if has_nested_fields(dtype):
2404 raise NotImplementedError(
2405 "Nested fields involving objects are not supported...")
2406 else:
2407 output = np.array(data, dtype=dtype)
2408 else:
2409 rows = np.array(data, dtype=[('', _) for _ in dtype_flat])
2410 output = rows.view(dtype)
2411 # Now, process the rowmasks the same way
2412 if usemask:
2413 rowmasks = np.array(
2414 masks, dtype=np.dtype([('', bool) for t in dtype_flat]))
2415 # Construct the new dtype
2416 mdtype = make_mask_descr(dtype)
2417 outputmask = rowmasks.view(mdtype)
2418 # Case #2. We have a basic dtype
2419 else:
2420 # We used some user-defined converters
2421 if user_converters:
2422 ishomogeneous = True
2423 descr = []
2424 for i, ttype in enumerate([conv.type for conv in converters]):
2425 # Keep the dtype of the current converter
2426 if i in user_converters:
2427 ishomogeneous &= (ttype == dtype.type)
2428 if np.issubdtype(ttype, np.character):
2429 ttype = (ttype, max(len(row[i]) for row in data))
2430 descr.append(('', ttype))
2431 else:
2432 descr.append(('', dtype))
2433 # So we changed the dtype ?
2434 if not ishomogeneous:
2435 # We have more than one field
2436 if len(descr) > 1:
2437 dtype = np.dtype(descr)
2438 # We have only one field: drop the name if not needed.
2439 else:
2440 dtype = np.dtype(ttype)
2441 #
2442 output = np.array(data, dtype)
2443 if usemask:
2444 if dtype.names is not None:
2445 mdtype = [(_, bool) for _ in dtype.names]
2446 else:
2447 mdtype = bool
2448 outputmask = np.array(masks, dtype=mdtype)
2449 # Try to take care of the missing data we missed
2450 names = output.dtype.names
2451 if usemask and names:
2452 for (name, conv) in zip(names, converters):
2453 missing_values = [conv(_) for _ in conv.missing_values
2454 if _ != '']
2455 for mval in missing_values:
2456 outputmask[name] |= (output[name] == mval)
2457 # Construct the final array
2458 if usemask:
2459 output = output.view(MaskedArray)
2460 output._mask = outputmask
2462 output = _ensure_ndmin_ndarray(output, ndmin=ndmin)
2464 if unpack:
2465 if names is None:
2466 return output.T
2467 elif len(names) == 1:
2468 # squeeze single-name dtypes too
2469 return output[names[0]]
2470 else:
2471 # For structured arrays with multiple fields,
2472 # return an array for each field.
2473 return [output[field] for field in names]
2474 return output
2477_genfromtxt_with_like = array_function_dispatch()(genfromtxt)
2480def recfromtxt(fname, **kwargs):
2481 """
2482 Load ASCII data from a file and return it in a record array.
2484 If ``usemask=False`` a standard `recarray` is returned,
2485 if ``usemask=True`` a MaskedRecords array is returned.
2487 Parameters
2488 ----------
2489 fname, kwargs : For a description of input parameters, see `genfromtxt`.
2491 See Also
2492 --------
2493 numpy.genfromtxt : generic function
2495 Notes
2496 -----
2497 By default, `dtype` is None, which means that the data-type of the output
2498 array will be determined from the data.
2500 """
2501 kwargs.setdefault("dtype", None)
2502 usemask = kwargs.get('usemask', False)
2503 output = genfromtxt(fname, **kwargs)
2504 if usemask:
2505 from numpy.ma.mrecords import MaskedRecords
2506 output = output.view(MaskedRecords)
2507 else:
2508 output = output.view(np.recarray)
2509 return output
2512def recfromcsv(fname, **kwargs):
2513 """
2514 Load ASCII data stored in a comma-separated file.
2516 The returned array is a record array (if ``usemask=False``, see
2517 `recarray`) or a masked record array (if ``usemask=True``,
2518 see `ma.mrecords.MaskedRecords`).
2520 Parameters
2521 ----------
2522 fname, kwargs : For a description of input parameters, see `genfromtxt`.
2524 See Also
2525 --------
2526 numpy.genfromtxt : generic function to load ASCII data.
2528 Notes
2529 -----
2530 By default, `dtype` is None, which means that the data-type of the output
2531 array will be determined from the data.
2533 """
2534 # Set default kwargs for genfromtxt as relevant to csv import.
2535 kwargs.setdefault("case_sensitive", "lower")
2536 kwargs.setdefault("names", True)
2537 kwargs.setdefault("delimiter", ",")
2538 kwargs.setdefault("dtype", None)
2539 output = genfromtxt(fname, **kwargs)
2541 usemask = kwargs.get("usemask", False)
2542 if usemask:
2543 from numpy.ma.mrecords import MaskedRecords
2544 output = output.view(MaskedRecords)
2545 else:
2546 output = output.view(np.recarray)
2547 return output