Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/numpy/lib/utils.py: 9%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1import os
2import sys
3import textwrap
4import types
5import re
6import warnings
7import functools
8import platform
10from .._utils import set_module
11from numpy.core.numerictypes import issubclass_, issubsctype, issubdtype
12from numpy.core import ndarray, ufunc, asarray
13import numpy as np
15__all__ = [
16 'issubclass_', 'issubsctype', 'issubdtype', 'deprecate',
17 'deprecate_with_doc', 'get_include', 'info', 'source', 'who',
18 'lookfor', 'byte_bounds', 'safe_eval', 'show_runtime'
19 ]
22def show_runtime():
23 """
24 Print information about various resources in the system
25 including available intrinsic support and BLAS/LAPACK library
26 in use
28 .. versionadded:: 1.24.0
30 See Also
31 --------
32 show_config : Show libraries in the system on which NumPy was built.
34 Notes
35 -----
36 1. Information is derived with the help of `threadpoolctl <https://pypi.org/project/threadpoolctl/>`_
37 library if available.
38 2. SIMD related information is derived from ``__cpu_features__``,
39 ``__cpu_baseline__`` and ``__cpu_dispatch__``
41 """
42 from numpy.core._multiarray_umath import (
43 __cpu_features__, __cpu_baseline__, __cpu_dispatch__
44 )
45 from pprint import pprint
46 config_found = [{
47 "numpy_version": np.__version__,
48 "python": sys.version,
49 "uname": platform.uname(),
50 }]
51 features_found, features_not_found = [], []
52 for feature in __cpu_dispatch__:
53 if __cpu_features__[feature]:
54 features_found.append(feature)
55 else:
56 features_not_found.append(feature)
57 config_found.append({
58 "simd_extensions": {
59 "baseline": __cpu_baseline__,
60 "found": features_found,
61 "not_found": features_not_found
62 }
63 })
64 try:
65 from threadpoolctl import threadpool_info
66 config_found.extend(threadpool_info())
67 except ImportError:
68 print("WARNING: `threadpoolctl` not found in system!"
69 " Install it by `pip install threadpoolctl`."
70 " Once installed, try `np.show_runtime` again"
71 " for more detailed build information")
72 pprint(config_found)
75def get_include():
76 """
77 Return the directory that contains the NumPy \\*.h header files.
79 Extension modules that need to compile against NumPy should use this
80 function to locate the appropriate include directory.
82 Notes
83 -----
84 When using ``distutils``, for example in ``setup.py``::
86 import numpy as np
87 ...
88 Extension('extension_name', ...
89 include_dirs=[np.get_include()])
90 ...
92 """
93 import numpy
94 if numpy.show_config is None:
95 # running from numpy source directory
96 d = os.path.join(os.path.dirname(numpy.__file__), 'core', 'include')
97 else:
98 # using installed numpy core headers
99 import numpy.core as core
100 d = os.path.join(os.path.dirname(core.__file__), 'include')
101 return d
104class _Deprecate:
105 """
106 Decorator class to deprecate old functions.
108 Refer to `deprecate` for details.
110 See Also
111 --------
112 deprecate
114 """
116 def __init__(self, old_name=None, new_name=None, message=None):
117 self.old_name = old_name
118 self.new_name = new_name
119 self.message = message
121 def __call__(self, func, *args, **kwargs):
122 """
123 Decorator call. Refer to ``decorate``.
125 """
126 old_name = self.old_name
127 new_name = self.new_name
128 message = self.message
130 if old_name is None:
131 old_name = func.__name__
132 if new_name is None:
133 depdoc = "`%s` is deprecated!" % old_name
134 else:
135 depdoc = "`%s` is deprecated, use `%s` instead!" % \
136 (old_name, new_name)
138 if message is not None:
139 depdoc += "\n" + message
141 @functools.wraps(func)
142 def newfunc(*args, **kwds):
143 warnings.warn(depdoc, DeprecationWarning, stacklevel=2)
144 return func(*args, **kwds)
146 newfunc.__name__ = old_name
147 doc = func.__doc__
148 if doc is None:
149 doc = depdoc
150 else:
151 lines = doc.expandtabs().split('\n')
152 indent = _get_indent(lines[1:])
153 if lines[0].lstrip():
154 # Indent the original first line to let inspect.cleandoc()
155 # dedent the docstring despite the deprecation notice.
156 doc = indent * ' ' + doc
157 else:
158 # Remove the same leading blank lines as cleandoc() would.
159 skip = len(lines[0]) + 1
160 for line in lines[1:]:
161 if len(line) > indent:
162 break
163 skip += len(line) + 1
164 doc = doc[skip:]
165 depdoc = textwrap.indent(depdoc, ' ' * indent)
166 doc = '\n\n'.join([depdoc, doc])
167 newfunc.__doc__ = doc
169 return newfunc
172def _get_indent(lines):
173 """
174 Determines the leading whitespace that could be removed from all the lines.
175 """
176 indent = sys.maxsize
177 for line in lines:
178 content = len(line.lstrip())
179 if content:
180 indent = min(indent, len(line) - content)
181 if indent == sys.maxsize:
182 indent = 0
183 return indent
186def deprecate(*args, **kwargs):
187 """
188 Issues a DeprecationWarning, adds warning to `old_name`'s
189 docstring, rebinds ``old_name.__name__`` and returns the new
190 function object.
192 This function may also be used as a decorator.
194 Parameters
195 ----------
196 func : function
197 The function to be deprecated.
198 old_name : str, optional
199 The name of the function to be deprecated. Default is None, in
200 which case the name of `func` is used.
201 new_name : str, optional
202 The new name for the function. Default is None, in which case the
203 deprecation message is that `old_name` is deprecated. If given, the
204 deprecation message is that `old_name` is deprecated and `new_name`
205 should be used instead.
206 message : str, optional
207 Additional explanation of the deprecation. Displayed in the
208 docstring after the warning.
210 Returns
211 -------
212 old_func : function
213 The deprecated function.
215 Examples
216 --------
217 Note that ``olduint`` returns a value after printing Deprecation
218 Warning:
220 >>> olduint = np.deprecate(np.uint)
221 DeprecationWarning: `uint64` is deprecated! # may vary
222 >>> olduint(6)
223 6
225 """
226 # Deprecate may be run as a function or as a decorator
227 # If run as a function, we initialise the decorator class
228 # and execute its __call__ method.
230 if args:
231 fn = args[0]
232 args = args[1:]
234 return _Deprecate(*args, **kwargs)(fn)
235 else:
236 return _Deprecate(*args, **kwargs)
239def deprecate_with_doc(msg):
240 """
241 Deprecates a function and includes the deprecation in its docstring.
243 This function is used as a decorator. It returns an object that can be
244 used to issue a DeprecationWarning, by passing the to-be decorated
245 function as argument, this adds warning to the to-be decorated function's
246 docstring and returns the new function object.
248 See Also
249 --------
250 deprecate : Decorate a function such that it issues a `DeprecationWarning`
252 Parameters
253 ----------
254 msg : str
255 Additional explanation of the deprecation. Displayed in the
256 docstring after the warning.
258 Returns
259 -------
260 obj : object
262 """
263 return _Deprecate(message=msg)
266#--------------------------------------------
267# Determine if two arrays can share memory
268#--------------------------------------------
270def byte_bounds(a):
271 """
272 Returns pointers to the end-points of an array.
274 Parameters
275 ----------
276 a : ndarray
277 Input array. It must conform to the Python-side of the array
278 interface.
280 Returns
281 -------
282 (low, high) : tuple of 2 integers
283 The first integer is the first byte of the array, the second
284 integer is just past the last byte of the array. If `a` is not
285 contiguous it will not use every byte between the (`low`, `high`)
286 values.
288 Examples
289 --------
290 >>> I = np.eye(2, dtype='f'); I.dtype
291 dtype('float32')
292 >>> low, high = np.byte_bounds(I)
293 >>> high - low == I.size*I.itemsize
294 True
295 >>> I = np.eye(2); I.dtype
296 dtype('float64')
297 >>> low, high = np.byte_bounds(I)
298 >>> high - low == I.size*I.itemsize
299 True
301 """
302 ai = a.__array_interface__
303 a_data = ai['data'][0]
304 astrides = ai['strides']
305 ashape = ai['shape']
306 bytes_a = asarray(a).dtype.itemsize
308 a_low = a_high = a_data
309 if astrides is None:
310 # contiguous case
311 a_high += a.size * bytes_a
312 else:
313 for shape, stride in zip(ashape, astrides):
314 if stride < 0:
315 a_low += (shape-1)*stride
316 else:
317 a_high += (shape-1)*stride
318 a_high += bytes_a
319 return a_low, a_high
322#-----------------------------------------------------------------------------
323# Function for output and information on the variables used.
324#-----------------------------------------------------------------------------
327def who(vardict=None):
328 """
329 Print the NumPy arrays in the given dictionary.
331 If there is no dictionary passed in or `vardict` is None then returns
332 NumPy arrays in the globals() dictionary (all NumPy arrays in the
333 namespace).
335 Parameters
336 ----------
337 vardict : dict, optional
338 A dictionary possibly containing ndarrays. Default is globals().
340 Returns
341 -------
342 out : None
343 Returns 'None'.
345 Notes
346 -----
347 Prints out the name, shape, bytes and type of all of the ndarrays
348 present in `vardict`.
350 Examples
351 --------
352 >>> a = np.arange(10)
353 >>> b = np.ones(20)
354 >>> np.who()
355 Name Shape Bytes Type
356 ===========================================================
357 a 10 80 int64
358 b 20 160 float64
359 Upper bound on total bytes = 240
361 >>> d = {'x': np.arange(2.0), 'y': np.arange(3.0), 'txt': 'Some str',
362 ... 'idx':5}
363 >>> np.who(d)
364 Name Shape Bytes Type
365 ===========================================================
366 x 2 16 float64
367 y 3 24 float64
368 Upper bound on total bytes = 40
370 """
371 if vardict is None:
372 frame = sys._getframe().f_back
373 vardict = frame.f_globals
374 sta = []
375 cache = {}
376 for name in vardict.keys():
377 if isinstance(vardict[name], ndarray):
378 var = vardict[name]
379 idv = id(var)
380 if idv in cache.keys():
381 namestr = name + " (%s)" % cache[idv]
382 original = 0
383 else:
384 cache[idv] = name
385 namestr = name
386 original = 1
387 shapestr = " x ".join(map(str, var.shape))
388 bytestr = str(var.nbytes)
389 sta.append([namestr, shapestr, bytestr, var.dtype.name,
390 original])
392 maxname = 0
393 maxshape = 0
394 maxbyte = 0
395 totalbytes = 0
396 for val in sta:
397 if maxname < len(val[0]):
398 maxname = len(val[0])
399 if maxshape < len(val[1]):
400 maxshape = len(val[1])
401 if maxbyte < len(val[2]):
402 maxbyte = len(val[2])
403 if val[4]:
404 totalbytes += int(val[2])
406 if len(sta) > 0:
407 sp1 = max(10, maxname)
408 sp2 = max(10, maxshape)
409 sp3 = max(10, maxbyte)
410 prval = "Name %s Shape %s Bytes %s Type" % (sp1*' ', sp2*' ', sp3*' ')
411 print(prval + "\n" + "="*(len(prval)+5) + "\n")
413 for val in sta:
414 print("%s %s %s %s %s %s %s" % (val[0], ' '*(sp1-len(val[0])+4),
415 val[1], ' '*(sp2-len(val[1])+5),
416 val[2], ' '*(sp3-len(val[2])+5),
417 val[3]))
418 print("\nUpper bound on total bytes = %d" % totalbytes)
419 return
421#-----------------------------------------------------------------------------
424# NOTE: pydoc defines a help function which works similarly to this
425# except it uses a pager to take over the screen.
427# combine name and arguments and split to multiple lines of width
428# characters. End lines on a comma and begin argument list indented with
429# the rest of the arguments.
430def _split_line(name, arguments, width):
431 firstwidth = len(name)
432 k = firstwidth
433 newstr = name
434 sepstr = ", "
435 arglist = arguments.split(sepstr)
436 for argument in arglist:
437 if k == firstwidth:
438 addstr = ""
439 else:
440 addstr = sepstr
441 k = k + len(argument) + len(addstr)
442 if k > width:
443 k = firstwidth + 1 + len(argument)
444 newstr = newstr + ",\n" + " "*(firstwidth+2) + argument
445 else:
446 newstr = newstr + addstr + argument
447 return newstr
449_namedict = None
450_dictlist = None
452# Traverse all module directories underneath globals
453# to see if something is defined
454def _makenamedict(module='numpy'):
455 module = __import__(module, globals(), locals(), [])
456 thedict = {module.__name__:module.__dict__}
457 dictlist = [module.__name__]
458 totraverse = [module.__dict__]
459 while True:
460 if len(totraverse) == 0:
461 break
462 thisdict = totraverse.pop(0)
463 for x in thisdict.keys():
464 if isinstance(thisdict[x], types.ModuleType):
465 modname = thisdict[x].__name__
466 if modname not in dictlist:
467 moddict = thisdict[x].__dict__
468 dictlist.append(modname)
469 totraverse.append(moddict)
470 thedict[modname] = moddict
471 return thedict, dictlist
474def _info(obj, output=None):
475 """Provide information about ndarray obj.
477 Parameters
478 ----------
479 obj : ndarray
480 Must be ndarray, not checked.
481 output
482 Where printed output goes.
484 Notes
485 -----
486 Copied over from the numarray module prior to its removal.
487 Adapted somewhat as only numpy is an option now.
489 Called by info.
491 """
492 extra = ""
493 tic = ""
494 bp = lambda x: x
495 cls = getattr(obj, '__class__', type(obj))
496 nm = getattr(cls, '__name__', cls)
497 strides = obj.strides
498 endian = obj.dtype.byteorder
500 if output is None:
501 output = sys.stdout
503 print("class: ", nm, file=output)
504 print("shape: ", obj.shape, file=output)
505 print("strides: ", strides, file=output)
506 print("itemsize: ", obj.itemsize, file=output)
507 print("aligned: ", bp(obj.flags.aligned), file=output)
508 print("contiguous: ", bp(obj.flags.contiguous), file=output)
509 print("fortran: ", obj.flags.fortran, file=output)
510 print(
511 "data pointer: %s%s" % (hex(obj.ctypes._as_parameter_.value), extra),
512 file=output
513 )
514 print("byteorder: ", end=' ', file=output)
515 if endian in ['|', '=']:
516 print("%s%s%s" % (tic, sys.byteorder, tic), file=output)
517 byteswap = False
518 elif endian == '>':
519 print("%sbig%s" % (tic, tic), file=output)
520 byteswap = sys.byteorder != "big"
521 else:
522 print("%slittle%s" % (tic, tic), file=output)
523 byteswap = sys.byteorder != "little"
524 print("byteswap: ", bp(byteswap), file=output)
525 print("type: %s" % obj.dtype, file=output)
528@set_module('numpy')
529def info(object=None, maxwidth=76, output=None, toplevel='numpy'):
530 """
531 Get help information for an array, function, class, or module.
533 Parameters
534 ----------
535 object : object or str, optional
536 Input object or name to get information about. If `object` is
537 an `ndarray` instance, information about the array is printed.
538 If `object` is a numpy object, its docstring is given. If it is
539 a string, available modules are searched for matching objects.
540 If None, information about `info` itself is returned.
541 maxwidth : int, optional
542 Printing width.
543 output : file like object, optional
544 File like object that the output is written to, default is
545 ``None``, in which case ``sys.stdout`` will be used.
546 The object has to be opened in 'w' or 'a' mode.
547 toplevel : str, optional
548 Start search at this level.
550 See Also
551 --------
552 source, lookfor
554 Notes
555 -----
556 When used interactively with an object, ``np.info(obj)`` is equivalent
557 to ``help(obj)`` on the Python prompt or ``obj?`` on the IPython
558 prompt.
560 Examples
561 --------
562 >>> np.info(np.polyval) # doctest: +SKIP
563 polyval(p, x)
564 Evaluate the polynomial p at x.
565 ...
567 When using a string for `object` it is possible to get multiple results.
569 >>> np.info('fft') # doctest: +SKIP
570 *** Found in numpy ***
571 Core FFT routines
572 ...
573 *** Found in numpy.fft ***
574 fft(a, n=None, axis=-1)
575 ...
576 *** Repeat reference found in numpy.fft.fftpack ***
577 *** Total of 3 references found. ***
579 When the argument is an array, information about the array is printed.
581 >>> a = np.array([[1 + 2j, 3, -4], [-5j, 6, 0]], dtype=np.complex64)
582 >>> np.info(a)
583 class: ndarray
584 shape: (2, 3)
585 strides: (24, 8)
586 itemsize: 8
587 aligned: True
588 contiguous: True
589 fortran: False
590 data pointer: 0x562b6e0d2860 # may vary
591 byteorder: little
592 byteswap: False
593 type: complex64
595 """
596 global _namedict, _dictlist
597 # Local import to speed up numpy's import time.
598 import pydoc
599 import inspect
601 if (hasattr(object, '_ppimport_importer') or
602 hasattr(object, '_ppimport_module')):
603 object = object._ppimport_module
604 elif hasattr(object, '_ppimport_attr'):
605 object = object._ppimport_attr
607 if output is None:
608 output = sys.stdout
610 if object is None:
611 info(info)
612 elif isinstance(object, ndarray):
613 _info(object, output=output)
614 elif isinstance(object, str):
615 if _namedict is None:
616 _namedict, _dictlist = _makenamedict(toplevel)
617 numfound = 0
618 objlist = []
619 for namestr in _dictlist:
620 try:
621 obj = _namedict[namestr][object]
622 if id(obj) in objlist:
623 print("\n "
624 "*** Repeat reference found in %s *** " % namestr,
625 file=output
626 )
627 else:
628 objlist.append(id(obj))
629 print(" *** Found in %s ***" % namestr, file=output)
630 info(obj)
631 print("-"*maxwidth, file=output)
632 numfound += 1
633 except KeyError:
634 pass
635 if numfound == 0:
636 print("Help for %s not found." % object, file=output)
637 else:
638 print("\n "
639 "*** Total of %d references found. ***" % numfound,
640 file=output
641 )
643 elif inspect.isfunction(object) or inspect.ismethod(object):
644 name = object.__name__
645 try:
646 arguments = str(inspect.signature(object))
647 except Exception:
648 arguments = "()"
650 if len(name+arguments) > maxwidth:
651 argstr = _split_line(name, arguments, maxwidth)
652 else:
653 argstr = name + arguments
655 print(" " + argstr + "\n", file=output)
656 print(inspect.getdoc(object), file=output)
658 elif inspect.isclass(object):
659 name = object.__name__
660 try:
661 arguments = str(inspect.signature(object))
662 except Exception:
663 arguments = "()"
665 if len(name+arguments) > maxwidth:
666 argstr = _split_line(name, arguments, maxwidth)
667 else:
668 argstr = name + arguments
670 print(" " + argstr + "\n", file=output)
671 doc1 = inspect.getdoc(object)
672 if doc1 is None:
673 if hasattr(object, '__init__'):
674 print(inspect.getdoc(object.__init__), file=output)
675 else:
676 print(inspect.getdoc(object), file=output)
678 methods = pydoc.allmethods(object)
680 public_methods = [meth for meth in methods if meth[0] != '_']
681 if public_methods:
682 print("\n\nMethods:\n", file=output)
683 for meth in public_methods:
684 thisobj = getattr(object, meth, None)
685 if thisobj is not None:
686 methstr, other = pydoc.splitdoc(
687 inspect.getdoc(thisobj) or "None"
688 )
689 print(" %s -- %s" % (meth, methstr), file=output)
691 elif hasattr(object, '__doc__'):
692 print(inspect.getdoc(object), file=output)
695@set_module('numpy')
696def source(object, output=sys.stdout):
697 """
698 Print or write to a file the source code for a NumPy object.
700 The source code is only returned for objects written in Python. Many
701 functions and classes are defined in C and will therefore not return
702 useful information.
704 Parameters
705 ----------
706 object : numpy object
707 Input object. This can be any object (function, class, module,
708 ...).
709 output : file object, optional
710 If `output` not supplied then source code is printed to screen
711 (sys.stdout). File object must be created with either write 'w' or
712 append 'a' modes.
714 See Also
715 --------
716 lookfor, info
718 Examples
719 --------
720 >>> np.source(np.interp) #doctest: +SKIP
721 In file: /usr/lib/python2.6/dist-packages/numpy/lib/function_base.py
722 def interp(x, xp, fp, left=None, right=None):
723 \"\"\".... (full docstring printed)\"\"\"
724 if isinstance(x, (float, int, number)):
725 return compiled_interp([x], xp, fp, left, right).item()
726 else:
727 return compiled_interp(x, xp, fp, left, right)
729 The source code is only returned for objects written in Python.
731 >>> np.source(np.array) #doctest: +SKIP
732 Not available for this object.
734 """
735 # Local import to speed up numpy's import time.
736 import inspect
737 try:
738 print("In file: %s\n" % inspect.getsourcefile(object), file=output)
739 print(inspect.getsource(object), file=output)
740 except Exception:
741 print("Not available for this object.", file=output)
744# Cache for lookfor: {id(module): {name: (docstring, kind, index), ...}...}
745# where kind: "func", "class", "module", "object"
746# and index: index in breadth-first namespace traversal
747_lookfor_caches = {}
749# regexp whose match indicates that the string may contain a function
750# signature
751_function_signature_re = re.compile(r"[a-z0-9_]+\(.*[,=].*\)", re.I)
754@set_module('numpy')
755def lookfor(what, module=None, import_modules=True, regenerate=False,
756 output=None):
757 """
758 Do a keyword search on docstrings.
760 A list of objects that matched the search is displayed,
761 sorted by relevance. All given keywords need to be found in the
762 docstring for it to be returned as a result, but the order does
763 not matter.
765 Parameters
766 ----------
767 what : str
768 String containing words to look for.
769 module : str or list, optional
770 Name of module(s) whose docstrings to go through.
771 import_modules : bool, optional
772 Whether to import sub-modules in packages. Default is True.
773 regenerate : bool, optional
774 Whether to re-generate the docstring cache. Default is False.
775 output : file-like, optional
776 File-like object to write the output to. If omitted, use a pager.
778 See Also
779 --------
780 source, info
782 Notes
783 -----
784 Relevance is determined only roughly, by checking if the keywords occur
785 in the function name, at the start of a docstring, etc.
787 Examples
788 --------
789 >>> np.lookfor('binary representation') # doctest: +SKIP
790 Search results for 'binary representation'
791 ------------------------------------------
792 numpy.binary_repr
793 Return the binary representation of the input number as a string.
794 numpy.core.setup_common.long_double_representation
795 Given a binary dump as given by GNU od -b, look for long double
796 numpy.base_repr
797 Return a string representation of a number in the given base system.
798 ...
800 """
801 import pydoc
803 # Cache
804 cache = _lookfor_generate_cache(module, import_modules, regenerate)
806 # Search
807 # XXX: maybe using a real stemming search engine would be better?
808 found = []
809 whats = str(what).lower().split()
810 if not whats:
811 return
813 for name, (docstring, kind, index) in cache.items():
814 if kind in ('module', 'object'):
815 # don't show modules or objects
816 continue
817 doc = docstring.lower()
818 if all(w in doc for w in whats):
819 found.append(name)
821 # Relevance sort
822 # XXX: this is full Harrison-Stetson heuristics now,
823 # XXX: it probably could be improved
825 kind_relevance = {'func': 1000, 'class': 1000,
826 'module': -1000, 'object': -1000}
828 def relevance(name, docstr, kind, index):
829 r = 0
830 # do the keywords occur within the start of the docstring?
831 first_doc = "\n".join(docstr.lower().strip().split("\n")[:3])
832 r += sum([200 for w in whats if w in first_doc])
833 # do the keywords occur in the function name?
834 r += sum([30 for w in whats if w in name])
835 # is the full name long?
836 r += -len(name) * 5
837 # is the object of bad type?
838 r += kind_relevance.get(kind, -1000)
839 # is the object deep in namespace hierarchy?
840 r += -name.count('.') * 10
841 r += max(-index / 100, -100)
842 return r
844 def relevance_value(a):
845 return relevance(a, *cache[a])
846 found.sort(key=relevance_value)
848 # Pretty-print
849 s = "Search results for '%s'" % (' '.join(whats))
850 help_text = [s, "-"*len(s)]
851 for name in found[::-1]:
852 doc, kind, ix = cache[name]
854 doclines = [line.strip() for line in doc.strip().split("\n")
855 if line.strip()]
857 # find a suitable short description
858 try:
859 first_doc = doclines[0].strip()
860 if _function_signature_re.search(first_doc):
861 first_doc = doclines[1].strip()
862 except IndexError:
863 first_doc = ""
864 help_text.append("%s\n %s" % (name, first_doc))
866 if not found:
867 help_text.append("Nothing found.")
869 # Output
870 if output is not None:
871 output.write("\n".join(help_text))
872 elif len(help_text) > 10:
873 pager = pydoc.getpager()
874 pager("\n".join(help_text))
875 else:
876 print("\n".join(help_text))
878def _lookfor_generate_cache(module, import_modules, regenerate):
879 """
880 Generate docstring cache for given module.
882 Parameters
883 ----------
884 module : str, None, module
885 Module for which to generate docstring cache
886 import_modules : bool
887 Whether to import sub-modules in packages.
888 regenerate : bool
889 Re-generate the docstring cache
891 Returns
892 -------
893 cache : dict {obj_full_name: (docstring, kind, index), ...}
894 Docstring cache for the module, either cached one (regenerate=False)
895 or newly generated.
897 """
898 # Local import to speed up numpy's import time.
899 import inspect
901 from io import StringIO
903 if module is None:
904 module = "numpy"
906 if isinstance(module, str):
907 try:
908 __import__(module)
909 except ImportError:
910 return {}
911 module = sys.modules[module]
912 elif isinstance(module, list) or isinstance(module, tuple):
913 cache = {}
914 for mod in module:
915 cache.update(_lookfor_generate_cache(mod, import_modules,
916 regenerate))
917 return cache
919 if id(module) in _lookfor_caches and not regenerate:
920 return _lookfor_caches[id(module)]
922 # walk items and collect docstrings
923 cache = {}
924 _lookfor_caches[id(module)] = cache
925 seen = {}
926 index = 0
927 stack = [(module.__name__, module)]
928 while stack:
929 name, item = stack.pop(0)
930 if id(item) in seen:
931 continue
932 seen[id(item)] = True
934 index += 1
935 kind = "object"
937 if inspect.ismodule(item):
938 kind = "module"
939 try:
940 _all = item.__all__
941 except AttributeError:
942 _all = None
944 # import sub-packages
945 if import_modules and hasattr(item, '__path__'):
946 for pth in item.__path__:
947 for mod_path in os.listdir(pth):
948 this_py = os.path.join(pth, mod_path)
949 init_py = os.path.join(pth, mod_path, '__init__.py')
950 if (os.path.isfile(this_py) and
951 mod_path.endswith('.py')):
952 to_import = mod_path[:-3]
953 elif os.path.isfile(init_py):
954 to_import = mod_path
955 else:
956 continue
957 if to_import == '__init__':
958 continue
960 try:
961 old_stdout = sys.stdout
962 old_stderr = sys.stderr
963 try:
964 sys.stdout = StringIO()
965 sys.stderr = StringIO()
966 __import__("%s.%s" % (name, to_import))
967 finally:
968 sys.stdout = old_stdout
969 sys.stderr = old_stderr
970 except KeyboardInterrupt:
971 # Assume keyboard interrupt came from a user
972 raise
973 except BaseException:
974 # Ignore also SystemExit and pytests.importorskip
975 # `Skipped` (these are BaseExceptions; gh-22345)
976 continue
978 for n, v in _getmembers(item):
979 try:
980 item_name = getattr(v, '__name__', "%s.%s" % (name, n))
981 mod_name = getattr(v, '__module__', None)
982 except NameError:
983 # ref. SWIG's global cvars
984 # NameError: Unknown C global variable
985 item_name = "%s.%s" % (name, n)
986 mod_name = None
987 if '.' not in item_name and mod_name:
988 item_name = "%s.%s" % (mod_name, item_name)
990 if not item_name.startswith(name + '.'):
991 # don't crawl "foreign" objects
992 if isinstance(v, ufunc):
993 # ... unless they are ufuncs
994 pass
995 else:
996 continue
997 elif not (inspect.ismodule(v) or _all is None or n in _all):
998 continue
999 stack.append(("%s.%s" % (name, n), v))
1000 elif inspect.isclass(item):
1001 kind = "class"
1002 for n, v in _getmembers(item):
1003 stack.append(("%s.%s" % (name, n), v))
1004 elif hasattr(item, "__call__"):
1005 kind = "func"
1007 try:
1008 doc = inspect.getdoc(item)
1009 except NameError:
1010 # ref SWIG's NameError: Unknown C global variable
1011 doc = None
1012 if doc is not None:
1013 cache[name] = (doc, kind, index)
1015 return cache
1017def _getmembers(item):
1018 import inspect
1019 try:
1020 members = inspect.getmembers(item)
1021 except Exception:
1022 members = [(x, getattr(item, x)) for x in dir(item)
1023 if hasattr(item, x)]
1024 return members
1027def safe_eval(source):
1028 """
1029 Protected string evaluation.
1031 Evaluate a string containing a Python literal expression without
1032 allowing the execution of arbitrary non-literal code.
1034 .. warning::
1036 This function is identical to :py:meth:`ast.literal_eval` and
1037 has the same security implications. It may not always be safe
1038 to evaluate large input strings.
1040 Parameters
1041 ----------
1042 source : str
1043 The string to evaluate.
1045 Returns
1046 -------
1047 obj : object
1048 The result of evaluating `source`.
1050 Raises
1051 ------
1052 SyntaxError
1053 If the code has invalid Python syntax, or if it contains
1054 non-literal code.
1056 Examples
1057 --------
1058 >>> np.safe_eval('1')
1059 1
1060 >>> np.safe_eval('[1, 2, 3]')
1061 [1, 2, 3]
1062 >>> np.safe_eval('{"foo": ("bar", 10.0)}')
1063 {'foo': ('bar', 10.0)}
1065 >>> np.safe_eval('import os')
1066 Traceback (most recent call last):
1067 ...
1068 SyntaxError: invalid syntax
1070 >>> np.safe_eval('open("/home/user/.ssh/id_dsa").read()')
1071 Traceback (most recent call last):
1072 ...
1073 ValueError: malformed node or string: <_ast.Call object at 0x...>
1075 """
1076 # Local import to speed up numpy's import time.
1077 import ast
1078 return ast.literal_eval(source)
1081def _median_nancheck(data, result, axis):
1082 """
1083 Utility function to check median result from data for NaN values at the end
1084 and return NaN in that case. Input result can also be a MaskedArray.
1086 Parameters
1087 ----------
1088 data : array
1089 Sorted input data to median function
1090 result : Array or MaskedArray
1091 Result of median function.
1092 axis : int
1093 Axis along which the median was computed.
1095 Returns
1096 -------
1097 result : scalar or ndarray
1098 Median or NaN in axes which contained NaN in the input. If the input
1099 was an array, NaN will be inserted in-place. If a scalar, either the
1100 input itself or a scalar NaN.
1101 """
1102 if data.size == 0:
1103 return result
1104 potential_nans = data.take(-1, axis=axis)
1105 n = np.isnan(potential_nans)
1106 # masked NaN values are ok, although for masked the copyto may fail for
1107 # unmasked ones (this was always broken) when the result is a scalar.
1108 if np.ma.isMaskedArray(n):
1109 n = n.filled(False)
1111 if not n.any():
1112 return result
1114 # Without given output, it is possible that the current result is a
1115 # numpy scalar, which is not writeable. If so, just return nan.
1116 if isinstance(result, np.generic):
1117 return potential_nans
1119 # Otherwise copy NaNs (if there are any)
1120 np.copyto(result, potential_nans, where=n)
1121 return result
1123def _opt_info():
1124 """
1125 Returns a string contains the supported CPU features by the current build.
1127 The string format can be explained as follows:
1128 - dispatched features that are supported by the running machine
1129 end with `*`.
1130 - dispatched features that are "not" supported by the running machine
1131 end with `?`.
1132 - remained features are representing the baseline.
1133 """
1134 from numpy.core._multiarray_umath import (
1135 __cpu_features__, __cpu_baseline__, __cpu_dispatch__
1136 )
1138 if len(__cpu_baseline__) == 0 and len(__cpu_dispatch__) == 0:
1139 return ''
1141 enabled_features = ' '.join(__cpu_baseline__)
1142 for feature in __cpu_dispatch__:
1143 if __cpu_features__[feature]:
1144 enabled_features += f" {feature}*"
1145 else:
1146 enabled_features += f" {feature}?"
1148 return enabled_features
1151def drop_metadata(dtype, /):
1152 """
1153 Returns the dtype unchanged if it contained no metadata or a copy of the
1154 dtype if it (or any of its structure dtypes) contained metadata.
1156 This utility is used by `np.save` and `np.savez` to drop metadata before
1157 saving.
1159 .. note::
1161 Due to its limitation this function may move to a more appropriate
1162 home or change in the future and is considered semi-public API only.
1164 .. warning::
1166 This function does not preserve more strange things like record dtypes
1167 and user dtypes may simply return the wrong thing. If you need to be
1168 sure about the latter, check the result with:
1169 ``np.can_cast(new_dtype, dtype, casting="no")``.
1171 """
1172 if dtype.fields is not None:
1173 found_metadata = dtype.metadata is not None
1175 names = []
1176 formats = []
1177 offsets = []
1178 titles = []
1179 for name, field in dtype.fields.items():
1180 field_dt = drop_metadata(field[0])
1181 if field_dt is not field[0]:
1182 found_metadata = True
1184 names.append(name)
1185 formats.append(field_dt)
1186 offsets.append(field[1])
1187 titles.append(None if len(field) < 3 else field[2])
1189 if not found_metadata:
1190 return dtype
1192 structure = dict(
1193 names=names, formats=formats, offsets=offsets, titles=titles,
1194 itemsize=dtype.itemsize)
1196 # NOTE: Could pass (dtype.type, structure) to preserve record dtypes...
1197 return np.dtype(structure, align=dtype.isalignedstruct)
1198 elif dtype.subdtype is not None:
1199 # subarray dtype
1200 subdtype, shape = dtype.subdtype
1201 new_subdtype = drop_metadata(subdtype)
1202 if dtype.metadata is None and new_subdtype is subdtype:
1203 return dtype
1205 return np.dtype((new_subdtype, shape))
1206 else:
1207 # Normal unstructured dtype
1208 if dtype.metadata is None:
1209 return dtype
1210 # Note that `dt.str` doesn't round-trip e.g. for user-dtypes.
1211 return np.dtype(dtype.str)