Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/psutil/__init__.py: 29%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# -*- coding: utf-8 -*-
3# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
7"""psutil is a cross-platform library for retrieving information on
8running processes and system utilization (CPU, memory, disks, network,
9sensors) in Python. Supported platforms:
11 - Linux
12 - Windows
13 - macOS
14 - FreeBSD
15 - OpenBSD
16 - NetBSD
17 - Sun Solaris
18 - AIX
20Works with Python versions 2.7 and 3.6+.
21"""
23from __future__ import division
25import collections
26import contextlib
27import datetime
28import functools
29import os
30import signal
31import subprocess
32import sys
33import threading
34import time
37try:
38 import pwd
39except ImportError:
40 pwd = None
42from . import _common
43from ._common import AIX
44from ._common import BSD
45from ._common import CONN_CLOSE
46from ._common import CONN_CLOSE_WAIT
47from ._common import CONN_CLOSING
48from ._common import CONN_ESTABLISHED
49from ._common import CONN_FIN_WAIT1
50from ._common import CONN_FIN_WAIT2
51from ._common import CONN_LAST_ACK
52from ._common import CONN_LISTEN
53from ._common import CONN_NONE
54from ._common import CONN_SYN_RECV
55from ._common import CONN_SYN_SENT
56from ._common import CONN_TIME_WAIT
57from ._common import FREEBSD # NOQA
58from ._common import LINUX
59from ._common import MACOS
60from ._common import NETBSD # NOQA
61from ._common import NIC_DUPLEX_FULL
62from ._common import NIC_DUPLEX_HALF
63from ._common import NIC_DUPLEX_UNKNOWN
64from ._common import OPENBSD # NOQA
65from ._common import OSX # deprecated alias
66from ._common import POSIX # NOQA
67from ._common import POWER_TIME_UNKNOWN
68from ._common import POWER_TIME_UNLIMITED
69from ._common import STATUS_DEAD
70from ._common import STATUS_DISK_SLEEP
71from ._common import STATUS_IDLE
72from ._common import STATUS_LOCKED
73from ._common import STATUS_PARKED
74from ._common import STATUS_RUNNING
75from ._common import STATUS_SLEEPING
76from ._common import STATUS_STOPPED
77from ._common import STATUS_TRACING_STOP
78from ._common import STATUS_WAITING
79from ._common import STATUS_WAKING
80from ._common import STATUS_ZOMBIE
81from ._common import SUNOS
82from ._common import WINDOWS
83from ._common import AccessDenied
84from ._common import Error
85from ._common import NoSuchProcess
86from ._common import TimeoutExpired
87from ._common import ZombieProcess
88from ._common import memoize_when_activated
89from ._common import wrap_numbers as _wrap_numbers
90from ._compat import PY3 as _PY3
91from ._compat import PermissionError
92from ._compat import ProcessLookupError
93from ._compat import SubprocessTimeoutExpired as _SubprocessTimeoutExpired
94from ._compat import long
97if LINUX:
98 # This is public API and it will be retrieved from _pslinux.py
99 # via sys.modules.
100 PROCFS_PATH = "/proc"
102 from . import _pslinux as _psplatform
103 from ._pslinux import IOPRIO_CLASS_BE # NOQA
104 from ._pslinux import IOPRIO_CLASS_IDLE # NOQA
105 from ._pslinux import IOPRIO_CLASS_NONE # NOQA
106 from ._pslinux import IOPRIO_CLASS_RT # NOQA
108elif WINDOWS:
109 from . import _pswindows as _psplatform
110 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA
111 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA
112 from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA
113 from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA
114 from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA
115 from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA
116 from ._pswindows import CONN_DELETE_TCB # NOQA
117 from ._pswindows import IOPRIO_HIGH # NOQA
118 from ._pswindows import IOPRIO_LOW # NOQA
119 from ._pswindows import IOPRIO_NORMAL # NOQA
120 from ._pswindows import IOPRIO_VERYLOW # NOQA
122elif MACOS:
123 from . import _psosx as _psplatform
125elif BSD:
126 from . import _psbsd as _psplatform
128elif SUNOS:
129 from . import _pssunos as _psplatform
130 from ._pssunos import CONN_BOUND # NOQA
131 from ._pssunos import CONN_IDLE # NOQA
133 # This is public writable API which is read from _pslinux.py and
134 # _pssunos.py via sys.modules.
135 PROCFS_PATH = "/proc"
137elif AIX:
138 from . import _psaix as _psplatform
140 # This is public API and it will be retrieved from _pslinux.py
141 # via sys.modules.
142 PROCFS_PATH = "/proc"
144else: # pragma: no cover
145 raise NotImplementedError('platform %s is not supported' % sys.platform)
148# fmt: off
149__all__ = [
150 # exceptions
151 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
152 "TimeoutExpired",
154 # constants
155 "version_info", "__version__",
157 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
158 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
159 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
160 "STATUS_PARKED",
162 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
163 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
164 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
165 # "CONN_IDLE", "CONN_BOUND",
167 "AF_LINK",
169 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
171 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
173 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
174 "SUNOS", "WINDOWS", "AIX",
176 # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
177 # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
178 # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
179 # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
181 # classes
182 "Process", "Popen",
184 # functions
185 "pid_exists", "pids", "process_iter", "wait_procs", # proc
186 "virtual_memory", "swap_memory", # memory
187 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
188 "cpu_stats", # "cpu_freq", "getloadavg"
189 "net_io_counters", "net_connections", "net_if_addrs", # network
190 "net_if_stats",
191 "disk_io_counters", "disk_partitions", "disk_usage", # disk
192 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
193 "users", "boot_time", # others
194]
195# fmt: on
198__all__.extend(_psplatform.__extra__all__)
200# Linux, FreeBSD
201if hasattr(_psplatform.Process, "rlimit"):
202 # Populate global namespace with RLIM* constants.
203 from . import _psutil_posix
205 _globals = globals()
206 _name = None
207 for _name in dir(_psutil_posix):
208 if _name.startswith('RLIM') and _name.isupper():
209 _globals[_name] = getattr(_psutil_posix, _name)
210 __all__.append(_name)
211 del _globals, _name
213AF_LINK = _psplatform.AF_LINK
215__author__ = "Giampaolo Rodola'"
216__version__ = "5.9.8"
217version_info = tuple([int(num) for num in __version__.split('.')])
219_timer = getattr(time, 'monotonic', time.time)
220_TOTAL_PHYMEM = None
221_LOWEST_PID = None
222_SENTINEL = object()
224# Sanity check in case the user messed up with psutil installation
225# or did something weird with sys.path. In this case we might end
226# up importing a python module using a C extension module which
227# was compiled for a different version of psutil.
228# We want to prevent that by failing sooner rather than later.
229# See: https://github.com/giampaolo/psutil/issues/564
230if int(__version__.replace('.', '')) != getattr(
231 _psplatform.cext, 'version', None
232):
233 msg = "version conflict: %r C extension " % _psplatform.cext.__file__
234 msg += "module was built for another version of psutil"
235 if hasattr(_psplatform.cext, 'version'):
236 msg += " (%s instead of %s)" % (
237 '.'.join([x for x in str(_psplatform.cext.version)]),
238 __version__,
239 )
240 else:
241 msg += " (different than %s)" % __version__
242 msg += "; you may try to 'pip uninstall psutil', manually remove %s" % (
243 getattr(
244 _psplatform.cext,
245 "__file__",
246 "the existing psutil install directory",
247 )
248 )
249 msg += " or clean the virtual env somehow, then reinstall"
250 raise ImportError(msg)
253# =====================================================================
254# --- Utils
255# =====================================================================
258if hasattr(_psplatform, 'ppid_map'):
259 # Faster version (Windows and Linux).
260 _ppid_map = _psplatform.ppid_map
261else: # pragma: no cover
263 def _ppid_map():
264 """Return a {pid: ppid, ...} dict for all running processes in
265 one shot. Used to speed up Process.children().
266 """
267 ret = {}
268 for pid in pids():
269 try:
270 ret[pid] = _psplatform.Process(pid).ppid()
271 except (NoSuchProcess, ZombieProcess):
272 pass
273 return ret
276def _pprint_secs(secs):
277 """Format seconds in a human readable form."""
278 now = time.time()
279 secs_ago = int(now - secs)
280 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S"
281 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
284# =====================================================================
285# --- Process class
286# =====================================================================
289class Process(object): # noqa: UP004
290 """Represents an OS process with the given PID.
291 If PID is omitted current process PID (os.getpid()) is used.
292 Raise NoSuchProcess if PID does not exist.
294 Note that most of the methods of this class do not make sure
295 the PID of the process being queried has been reused over time.
296 That means you might end up retrieving an information referring
297 to another process in case the original one this instance
298 refers to is gone in the meantime.
300 The only exceptions for which process identity is pre-emptively
301 checked and guaranteed are:
303 - parent()
304 - children()
305 - nice() (set)
306 - ionice() (set)
307 - rlimit() (set)
308 - cpu_affinity (set)
309 - suspend()
310 - resume()
311 - send_signal()
312 - terminate()
313 - kill()
315 To prevent this problem for all other methods you can:
316 - use is_running() before querying the process
317 - if you're continuously iterating over a set of Process
318 instances use process_iter() which pre-emptively checks
319 process identity for every yielded instance
320 """
322 def __init__(self, pid=None):
323 self._init(pid)
325 def _init(self, pid, _ignore_nsp=False):
326 if pid is None:
327 pid = os.getpid()
328 else:
329 if not _PY3 and not isinstance(pid, (int, long)):
330 msg = "pid must be an integer (got %r)" % pid
331 raise TypeError(msg)
332 if pid < 0:
333 msg = "pid must be a positive integer (got %s)" % pid
334 raise ValueError(msg)
335 try:
336 _psplatform.cext.check_pid_range(pid)
337 except OverflowError:
338 msg = "process PID out of range (got %s)" % pid
339 raise NoSuchProcess(pid, msg=msg)
341 self._pid = pid
342 self._name = None
343 self._exe = None
344 self._create_time = None
345 self._gone = False
346 self._pid_reused = False
347 self._hash = None
348 self._lock = threading.RLock()
349 # used for caching on Windows only (on POSIX ppid may change)
350 self._ppid = None
351 # platform-specific modules define an _psplatform.Process
352 # implementation class
353 self._proc = _psplatform.Process(pid)
354 self._last_sys_cpu_times = None
355 self._last_proc_cpu_times = None
356 self._exitcode = _SENTINEL
357 # cache creation time for later use in is_running() method
358 try:
359 self.create_time()
360 except AccessDenied:
361 # We should never get here as AFAIK we're able to get
362 # process creation time on all platforms even as a
363 # limited user.
364 pass
365 except ZombieProcess:
366 # Zombies can still be queried by this class (although
367 # not always) and pids() return them so just go on.
368 pass
369 except NoSuchProcess:
370 if not _ignore_nsp:
371 msg = "process PID not found"
372 raise NoSuchProcess(pid, msg=msg)
373 else:
374 self._gone = True
375 # This pair is supposed to identify a Process instance
376 # univocally over time (the PID alone is not enough as
377 # it might refer to a process whose PID has been reused).
378 # This will be used later in __eq__() and is_running().
379 self._ident = (self.pid, self._create_time)
381 def __str__(self):
382 info = collections.OrderedDict()
383 info["pid"] = self.pid
384 if self._name:
385 info['name'] = self._name
386 with self.oneshot():
387 try:
388 info["name"] = self.name()
389 info["status"] = self.status()
390 except ZombieProcess:
391 info["status"] = "zombie"
392 except NoSuchProcess:
393 info["status"] = "terminated"
394 except AccessDenied:
395 pass
396 if self._exitcode not in (_SENTINEL, None):
397 info["exitcode"] = self._exitcode
398 if self._create_time is not None:
399 info['started'] = _pprint_secs(self._create_time)
400 return "%s.%s(%s)" % (
401 self.__class__.__module__,
402 self.__class__.__name__,
403 ", ".join(["%s=%r" % (k, v) for k, v in info.items()]),
404 )
406 __repr__ = __str__
408 def __eq__(self, other):
409 # Test for equality with another Process object based
410 # on PID and creation time.
411 if not isinstance(other, Process):
412 return NotImplemented
413 if OPENBSD or NETBSD: # pragma: no cover
414 # Zombie processes on Open/NetBSD have a creation time of
415 # 0.0. This covers the case when a process started normally
416 # (so it has a ctime), then it turned into a zombie. It's
417 # important to do this because is_running() depends on
418 # __eq__.
419 pid1, ctime1 = self._ident
420 pid2, ctime2 = other._ident
421 if pid1 == pid2:
422 if ctime1 and not ctime2:
423 try:
424 return self.status() == STATUS_ZOMBIE
425 except Error:
426 pass
427 return self._ident == other._ident
429 def __ne__(self, other):
430 return not self == other
432 def __hash__(self):
433 if self._hash is None:
434 self._hash = hash(self._ident)
435 return self._hash
437 def _raise_if_pid_reused(self):
438 """Raises NoSuchProcess in case process PID has been reused."""
439 if not self.is_running() and self._pid_reused:
440 # We may directly raise NSP in here already if PID is just
441 # not running, but I prefer NSP to be raised naturally by
442 # the actual Process API call. This way unit tests will tell
443 # us if the API is broken (aka don't raise NSP when it
444 # should). We also remain consistent with all other "get"
445 # APIs which don't use _raise_if_pid_reused().
446 msg = "process no longer exists and its PID has been reused"
447 raise NoSuchProcess(self.pid, self._name, msg=msg)
449 @property
450 def pid(self):
451 """The process PID."""
452 return self._pid
454 # --- utility methods
456 @contextlib.contextmanager
457 def oneshot(self):
458 """Utility context manager which considerably speeds up the
459 retrieval of multiple process information at the same time.
461 Internally different process info (e.g. name, ppid, uids,
462 gids, ...) may be fetched by using the same routine, but
463 only one information is returned and the others are discarded.
464 When using this context manager the internal routine is
465 executed once (in the example below on name()) and the
466 other info are cached.
468 The cache is cleared when exiting the context manager block.
469 The advice is to use this every time you retrieve more than
470 one information about the process. If you're lucky, you'll
471 get a hell of a speedup.
473 >>> import psutil
474 >>> p = psutil.Process()
475 >>> with p.oneshot():
476 ... p.name() # collect multiple info
477 ... p.cpu_times() # return cached value
478 ... p.cpu_percent() # return cached value
479 ... p.create_time() # return cached value
480 ...
481 >>>
482 """
483 with self._lock:
484 if hasattr(self, "_cache"):
485 # NOOP: this covers the use case where the user enters the
486 # context twice:
487 #
488 # >>> with p.oneshot():
489 # ... with p.oneshot():
490 # ...
491 #
492 # Also, since as_dict() internally uses oneshot()
493 # I expect that the code below will be a pretty common
494 # "mistake" that the user will make, so let's guard
495 # against that:
496 #
497 # >>> with p.oneshot():
498 # ... p.as_dict()
499 # ...
500 yield
501 else:
502 try:
503 # cached in case cpu_percent() is used
504 self.cpu_times.cache_activate(self)
505 # cached in case memory_percent() is used
506 self.memory_info.cache_activate(self)
507 # cached in case parent() is used
508 self.ppid.cache_activate(self)
509 # cached in case username() is used
510 if POSIX:
511 self.uids.cache_activate(self)
512 # specific implementation cache
513 self._proc.oneshot_enter()
514 yield
515 finally:
516 self.cpu_times.cache_deactivate(self)
517 self.memory_info.cache_deactivate(self)
518 self.ppid.cache_deactivate(self)
519 if POSIX:
520 self.uids.cache_deactivate(self)
521 self._proc.oneshot_exit()
523 def as_dict(self, attrs=None, ad_value=None):
524 """Utility method returning process information as a
525 hashable dictionary.
526 If *attrs* is specified it must be a list of strings
527 reflecting available Process class' attribute names
528 (e.g. ['cpu_times', 'name']) else all public (read
529 only) attributes are assumed.
530 *ad_value* is the value which gets assigned in case
531 AccessDenied or ZombieProcess exception is raised when
532 retrieving that particular process information.
533 """
534 valid_names = _as_dict_attrnames
535 if attrs is not None:
536 if not isinstance(attrs, (list, tuple, set, frozenset)):
537 msg = "invalid attrs type %s" % type(attrs)
538 raise TypeError(msg)
539 attrs = set(attrs)
540 invalid_names = attrs - valid_names
541 if invalid_names:
542 msg = "invalid attr name%s %s" % (
543 "s" if len(invalid_names) > 1 else "",
544 ", ".join(map(repr, invalid_names)),
545 )
546 raise ValueError(msg)
548 retdict = {}
549 ls = attrs or valid_names
550 with self.oneshot():
551 for name in ls:
552 try:
553 if name == 'pid':
554 ret = self.pid
555 else:
556 meth = getattr(self, name)
557 ret = meth()
558 except (AccessDenied, ZombieProcess):
559 ret = ad_value
560 except NotImplementedError:
561 # in case of not implemented functionality (may happen
562 # on old or exotic systems) we want to crash only if
563 # the user explicitly asked for that particular attr
564 if attrs:
565 raise
566 continue
567 retdict[name] = ret
568 return retdict
570 def parent(self):
571 """Return the parent process as a Process object pre-emptively
572 checking whether PID has been reused.
573 If no parent is known return None.
574 """
575 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
576 if self.pid == lowest_pid:
577 return None
578 ppid = self.ppid()
579 if ppid is not None:
580 ctime = self.create_time()
581 try:
582 parent = Process(ppid)
583 if parent.create_time() <= ctime:
584 return parent
585 # ...else ppid has been reused by another process
586 except NoSuchProcess:
587 pass
589 def parents(self):
590 """Return the parents of this process as a list of Process
591 instances. If no parents are known return an empty list.
592 """
593 parents = []
594 proc = self.parent()
595 while proc is not None:
596 parents.append(proc)
597 proc = proc.parent()
598 return parents
600 def is_running(self):
601 """Return whether this process is running.
602 It also checks if PID has been reused by another process in
603 which case return False.
604 """
605 if self._gone or self._pid_reused:
606 return False
607 try:
608 # Checking if PID is alive is not enough as the PID might
609 # have been reused by another process: we also want to
610 # verify process identity.
611 # Process identity / uniqueness over time is guaranteed by
612 # (PID + creation time) and that is verified in __eq__.
613 self._pid_reused = self != Process(self.pid)
614 return not self._pid_reused
615 except ZombieProcess:
616 # We should never get here as it's already handled in
617 # Process.__init__; here just for extra safety.
618 return True
619 except NoSuchProcess:
620 self._gone = True
621 return False
623 # --- actual API
625 @memoize_when_activated
626 def ppid(self):
627 """The process parent PID.
628 On Windows the return value is cached after first call.
629 """
630 # On POSIX we don't want to cache the ppid as it may unexpectedly
631 # change to 1 (init) in case this process turns into a zombie:
632 # https://github.com/giampaolo/psutil/issues/321
633 # http://stackoverflow.com/questions/356722/
635 # XXX should we check creation time here rather than in
636 # Process.parent()?
637 self._raise_if_pid_reused()
638 if POSIX:
639 return self._proc.ppid()
640 else: # pragma: no cover
641 self._ppid = self._ppid or self._proc.ppid()
642 return self._ppid
644 def name(self):
645 """The process name. The return value is cached after first call."""
646 # Process name is only cached on Windows as on POSIX it may
647 # change, see:
648 # https://github.com/giampaolo/psutil/issues/692
649 if WINDOWS and self._name is not None:
650 return self._name
651 name = self._proc.name()
652 if POSIX and len(name) >= 15:
653 # On UNIX the name gets truncated to the first 15 characters.
654 # If it matches the first part of the cmdline we return that
655 # one instead because it's usually more explicative.
656 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
657 try:
658 cmdline = self.cmdline()
659 except (AccessDenied, ZombieProcess):
660 # Just pass and return the truncated name: it's better
661 # than nothing. Note: there are actual cases where a
662 # zombie process can return a name() but not a
663 # cmdline(), see:
664 # https://github.com/giampaolo/psutil/issues/2239
665 pass
666 else:
667 if cmdline:
668 extended_name = os.path.basename(cmdline[0])
669 if extended_name.startswith(name):
670 name = extended_name
671 self._name = name
672 self._proc._name = name
673 return name
675 def exe(self):
676 """The process executable as an absolute path.
677 May also be an empty string.
678 The return value is cached after first call.
679 """
681 def guess_it(fallback):
682 # try to guess exe from cmdline[0] in absence of a native
683 # exe representation
684 cmdline = self.cmdline()
685 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
686 exe = cmdline[0] # the possible exe
687 # Attempt to guess only in case of an absolute path.
688 # It is not safe otherwise as the process might have
689 # changed cwd.
690 if (
691 os.path.isabs(exe)
692 and os.path.isfile(exe)
693 and os.access(exe, os.X_OK)
694 ):
695 return exe
696 if isinstance(fallback, AccessDenied):
697 raise fallback
698 return fallback
700 if self._exe is None:
701 try:
702 exe = self._proc.exe()
703 except AccessDenied as err:
704 return guess_it(fallback=err)
705 else:
706 if not exe:
707 # underlying implementation can legitimately return an
708 # empty string; if that's the case we don't want to
709 # raise AD while guessing from the cmdline
710 try:
711 exe = guess_it(fallback=exe)
712 except AccessDenied:
713 pass
714 self._exe = exe
715 return self._exe
717 def cmdline(self):
718 """The command line this process has been called with."""
719 return self._proc.cmdline()
721 def status(self):
722 """The process current status as a STATUS_* constant."""
723 try:
724 return self._proc.status()
725 except ZombieProcess:
726 return STATUS_ZOMBIE
728 def username(self):
729 """The name of the user that owns the process.
730 On UNIX this is calculated by using *real* process uid.
731 """
732 if POSIX:
733 if pwd is None:
734 # might happen if python was installed from sources
735 msg = "requires pwd module shipped with standard python"
736 raise ImportError(msg)
737 real_uid = self.uids().real
738 try:
739 return pwd.getpwuid(real_uid).pw_name
740 except KeyError:
741 # the uid can't be resolved by the system
742 return str(real_uid)
743 else:
744 return self._proc.username()
746 def create_time(self):
747 """The process creation time as a floating point number
748 expressed in seconds since the epoch.
749 The return value is cached after first call.
750 """
751 if self._create_time is None:
752 self._create_time = self._proc.create_time()
753 return self._create_time
755 def cwd(self):
756 """Process current working directory as an absolute path."""
757 return self._proc.cwd()
759 def nice(self, value=None):
760 """Get or set process niceness (priority)."""
761 if value is None:
762 return self._proc.nice_get()
763 else:
764 self._raise_if_pid_reused()
765 self._proc.nice_set(value)
767 if POSIX:
769 @memoize_when_activated
770 def uids(self):
771 """Return process UIDs as a (real, effective, saved)
772 namedtuple.
773 """
774 return self._proc.uids()
776 def gids(self):
777 """Return process GIDs as a (real, effective, saved)
778 namedtuple.
779 """
780 return self._proc.gids()
782 def terminal(self):
783 """The terminal associated with this process, if any,
784 else None.
785 """
786 return self._proc.terminal()
788 def num_fds(self):
789 """Return the number of file descriptors opened by this
790 process (POSIX only).
791 """
792 return self._proc.num_fds()
794 # Linux, BSD, AIX and Windows only
795 if hasattr(_psplatform.Process, "io_counters"):
797 def io_counters(self):
798 """Return process I/O statistics as a
799 (read_count, write_count, read_bytes, write_bytes)
800 namedtuple.
801 Those are the number of read/write calls performed and the
802 amount of bytes read and written by the process.
803 """
804 return self._proc.io_counters()
806 # Linux and Windows
807 if hasattr(_psplatform.Process, "ionice_get"):
809 def ionice(self, ioclass=None, value=None):
810 """Get or set process I/O niceness (priority).
812 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
813 *value* is a number which goes from 0 to 7. The higher the
814 value, the lower the I/O priority of the process.
816 On Windows only *ioclass* is used and it can be set to 2
817 (normal), 1 (low) or 0 (very low).
819 Available on Linux and Windows > Vista only.
820 """
821 if ioclass is None:
822 if value is not None:
823 msg = "'ioclass' argument must be specified"
824 raise ValueError(msg)
825 return self._proc.ionice_get()
826 else:
827 self._raise_if_pid_reused()
828 return self._proc.ionice_set(ioclass, value)
830 # Linux / FreeBSD only
831 if hasattr(_psplatform.Process, "rlimit"):
833 def rlimit(self, resource, limits=None):
834 """Get or set process resource limits as a (soft, hard)
835 tuple.
837 *resource* is one of the RLIMIT_* constants.
838 *limits* is supposed to be a (soft, hard) tuple.
840 See "man prlimit" for further info.
841 Available on Linux and FreeBSD only.
842 """
843 if limits is not None:
844 self._raise_if_pid_reused()
845 return self._proc.rlimit(resource, limits)
847 # Windows, Linux and FreeBSD only
848 if hasattr(_psplatform.Process, "cpu_affinity_get"):
850 def cpu_affinity(self, cpus=None):
851 """Get or set process CPU affinity.
852 If specified, *cpus* must be a list of CPUs for which you
853 want to set the affinity (e.g. [0, 1]).
854 If an empty list is passed, all egible CPUs are assumed
855 (and set).
856 (Windows, Linux and BSD only).
857 """
858 if cpus is None:
859 return sorted(set(self._proc.cpu_affinity_get()))
860 else:
861 self._raise_if_pid_reused()
862 if not cpus:
863 if hasattr(self._proc, "_get_eligible_cpus"):
864 cpus = self._proc._get_eligible_cpus()
865 else:
866 cpus = tuple(range(len(cpu_times(percpu=True))))
867 self._proc.cpu_affinity_set(list(set(cpus)))
869 # Linux, FreeBSD, SunOS
870 if hasattr(_psplatform.Process, "cpu_num"):
872 def cpu_num(self):
873 """Return what CPU this process is currently running on.
874 The returned number should be <= psutil.cpu_count()
875 and <= len(psutil.cpu_percent(percpu=True)).
876 It may be used in conjunction with
877 psutil.cpu_percent(percpu=True) to observe the system
878 workload distributed across CPUs.
879 """
880 return self._proc.cpu_num()
882 # All platforms has it, but maybe not in the future.
883 if hasattr(_psplatform.Process, "environ"):
885 def environ(self):
886 """The environment variables of the process as a dict. Note: this
887 might not reflect changes made after the process started.
888 """
889 return self._proc.environ()
891 if WINDOWS:
893 def num_handles(self):
894 """Return the number of handles opened by this process
895 (Windows only).
896 """
897 return self._proc.num_handles()
899 def num_ctx_switches(self):
900 """Return the number of voluntary and involuntary context
901 switches performed by this process.
902 """
903 return self._proc.num_ctx_switches()
905 def num_threads(self):
906 """Return the number of threads used by this process."""
907 return self._proc.num_threads()
909 if hasattr(_psplatform.Process, "threads"):
911 def threads(self):
912 """Return threads opened by process as a list of
913 (id, user_time, system_time) namedtuples representing
914 thread id and thread CPU times (user/system).
915 On OpenBSD this method requires root access.
916 """
917 return self._proc.threads()
919 def children(self, recursive=False):
920 """Return the children of this process as a list of Process
921 instances, pre-emptively checking whether PID has been reused.
922 If *recursive* is True return all the parent descendants.
924 Example (A == this process):
926 A ─┐
927 │
928 ├─ B (child) ─┐
929 │ └─ X (grandchild) ─┐
930 │ └─ Y (great grandchild)
931 ├─ C (child)
932 └─ D (child)
934 >>> import psutil
935 >>> p = psutil.Process()
936 >>> p.children()
937 B, C, D
938 >>> p.children(recursive=True)
939 B, X, Y, C, D
941 Note that in the example above if process X disappears
942 process Y won't be listed as the reference to process A
943 is lost.
944 """
945 self._raise_if_pid_reused()
946 ppid_map = _ppid_map()
947 ret = []
948 if not recursive:
949 for pid, ppid in ppid_map.items():
950 if ppid == self.pid:
951 try:
952 child = Process(pid)
953 # if child happens to be older than its parent
954 # (self) it means child's PID has been reused
955 if self.create_time() <= child.create_time():
956 ret.append(child)
957 except (NoSuchProcess, ZombieProcess):
958 pass
959 else:
960 # Construct a {pid: [child pids]} dict
961 reverse_ppid_map = collections.defaultdict(list)
962 for pid, ppid in ppid_map.items():
963 reverse_ppid_map[ppid].append(pid)
964 # Recursively traverse that dict, starting from self.pid,
965 # such that we only call Process() on actual children
966 seen = set()
967 stack = [self.pid]
968 while stack:
969 pid = stack.pop()
970 if pid in seen:
971 # Since pids can be reused while the ppid_map is
972 # constructed, there may be rare instances where
973 # there's a cycle in the recorded process "tree".
974 continue
975 seen.add(pid)
976 for child_pid in reverse_ppid_map[pid]:
977 try:
978 child = Process(child_pid)
979 # if child happens to be older than its parent
980 # (self) it means child's PID has been reused
981 intime = self.create_time() <= child.create_time()
982 if intime:
983 ret.append(child)
984 stack.append(child_pid)
985 except (NoSuchProcess, ZombieProcess):
986 pass
987 return ret
989 def cpu_percent(self, interval=None):
990 """Return a float representing the current process CPU
991 utilization as a percentage.
993 When *interval* is 0.0 or None (default) compares process times
994 to system CPU times elapsed since last call, returning
995 immediately (non-blocking). That means that the first time
996 this is called it will return a meaningful 0.0 value.
998 When *interval* is > 0.0 compares process times to system CPU
999 times elapsed before and after the interval (blocking).
1001 In this case is recommended for accuracy that this function
1002 be called with at least 0.1 seconds between calls.
1004 A value > 100.0 can be returned in case of processes running
1005 multiple threads on different CPU cores.
1007 The returned value is explicitly NOT split evenly between
1008 all available logical CPUs. This means that a busy loop process
1009 running on a system with 2 logical CPUs will be reported as
1010 having 100% CPU utilization instead of 50%.
1012 Examples:
1014 >>> import psutil
1015 >>> p = psutil.Process(os.getpid())
1016 >>> # blocking
1017 >>> p.cpu_percent(interval=1)
1018 2.0
1019 >>> # non-blocking (percentage since last call)
1020 >>> p.cpu_percent(interval=None)
1021 2.9
1022 >>>
1023 """
1024 blocking = interval is not None and interval > 0.0
1025 if interval is not None and interval < 0:
1026 msg = "interval is not positive (got %r)" % interval
1027 raise ValueError(msg)
1028 num_cpus = cpu_count() or 1
1030 def timer():
1031 return _timer() * num_cpus
1033 if blocking:
1034 st1 = timer()
1035 pt1 = self._proc.cpu_times()
1036 time.sleep(interval)
1037 st2 = timer()
1038 pt2 = self._proc.cpu_times()
1039 else:
1040 st1 = self._last_sys_cpu_times
1041 pt1 = self._last_proc_cpu_times
1042 st2 = timer()
1043 pt2 = self._proc.cpu_times()
1044 if st1 is None or pt1 is None:
1045 self._last_sys_cpu_times = st2
1046 self._last_proc_cpu_times = pt2
1047 return 0.0
1049 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1050 delta_time = st2 - st1
1051 # reset values for next call in case of interval == None
1052 self._last_sys_cpu_times = st2
1053 self._last_proc_cpu_times = pt2
1055 try:
1056 # This is the utilization split evenly between all CPUs.
1057 # E.g. a busy loop process on a 2-CPU-cores system at this
1058 # point is reported as 50% instead of 100%.
1059 overall_cpus_percent = (delta_proc / delta_time) * 100
1060 except ZeroDivisionError:
1061 # interval was too low
1062 return 0.0
1063 else:
1064 # Note 1:
1065 # in order to emulate "top" we multiply the value for the num
1066 # of CPU cores. This way the busy process will be reported as
1067 # having 100% (or more) usage.
1068 #
1069 # Note 2:
1070 # taskmgr.exe on Windows differs in that it will show 50%
1071 # instead.
1072 #
1073 # Note 3:
1074 # a percentage > 100 is legitimate as it can result from a
1075 # process with multiple threads running on different CPU
1076 # cores (top does the same), see:
1077 # http://stackoverflow.com/questions/1032357
1078 # https://github.com/giampaolo/psutil/issues/474
1079 single_cpu_percent = overall_cpus_percent * num_cpus
1080 return round(single_cpu_percent, 1)
1082 @memoize_when_activated
1083 def cpu_times(self):
1084 """Return a (user, system, children_user, children_system)
1085 namedtuple representing the accumulated process time, in
1086 seconds.
1087 This is similar to os.times() but per-process.
1088 On macOS and Windows children_user and children_system are
1089 always set to 0.
1090 """
1091 return self._proc.cpu_times()
1093 @memoize_when_activated
1094 def memory_info(self):
1095 """Return a namedtuple with variable fields depending on the
1096 platform, representing memory information about the process.
1098 The "portable" fields available on all platforms are `rss` and `vms`.
1100 All numbers are expressed in bytes.
1101 """
1102 return self._proc.memory_info()
1104 @_common.deprecated_method(replacement="memory_info")
1105 def memory_info_ex(self):
1106 return self.memory_info()
1108 def memory_full_info(self):
1109 """This method returns the same information as memory_info(),
1110 plus, on some platform (Linux, macOS, Windows), also provides
1111 additional metrics (USS, PSS and swap).
1112 The additional metrics provide a better representation of actual
1113 process memory usage.
1115 Namely USS is the memory which is unique to a process and which
1116 would be freed if the process was terminated right now.
1118 It does so by passing through the whole process address.
1119 As such it usually requires higher user privileges than
1120 memory_info() and is considerably slower.
1121 """
1122 return self._proc.memory_full_info()
1124 def memory_percent(self, memtype="rss"):
1125 """Compare process memory to total physical system memory and
1126 calculate process memory utilization as a percentage.
1127 *memtype* argument is a string that dictates what type of
1128 process memory you want to compare against (defaults to "rss").
1129 The list of available strings can be obtained like this:
1131 >>> psutil.Process().memory_info()._fields
1132 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1133 """
1134 valid_types = list(_psplatform.pfullmem._fields)
1135 if memtype not in valid_types:
1136 msg = "invalid memtype %r; valid types are %r" % (
1137 memtype,
1138 tuple(valid_types),
1139 )
1140 raise ValueError(msg)
1141 fun = (
1142 self.memory_info
1143 if memtype in _psplatform.pmem._fields
1144 else self.memory_full_info
1145 )
1146 metrics = fun()
1147 value = getattr(metrics, memtype)
1149 # use cached value if available
1150 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1151 if not total_phymem > 0:
1152 # we should never get here
1153 msg = (
1154 "can't calculate process memory percent because total physical"
1155 " system memory is not positive (%r)" % (total_phymem)
1156 )
1157 raise ValueError(msg)
1158 return (value / float(total_phymem)) * 100
1160 if hasattr(_psplatform.Process, "memory_maps"):
1162 def memory_maps(self, grouped=True):
1163 """Return process' mapped memory regions as a list of namedtuples
1164 whose fields are variable depending on the platform.
1166 If *grouped* is True the mapped regions with the same 'path'
1167 are grouped together and the different memory fields are summed.
1169 If *grouped* is False every mapped region is shown as a single
1170 entity and the namedtuple will also include the mapped region's
1171 address space ('addr') and permission set ('perms').
1172 """
1173 it = self._proc.memory_maps()
1174 if grouped:
1175 d = {}
1176 for tupl in it:
1177 path = tupl[2]
1178 nums = tupl[3:]
1179 try:
1180 d[path] = map(lambda x, y: x + y, d[path], nums)
1181 except KeyError:
1182 d[path] = nums
1183 nt = _psplatform.pmmap_grouped
1184 return [nt(path, *d[path]) for path in d] # NOQA
1185 else:
1186 nt = _psplatform.pmmap_ext
1187 return [nt(*x) for x in it]
1189 def open_files(self):
1190 """Return files opened by process as a list of
1191 (path, fd) namedtuples including the absolute file name
1192 and file descriptor number.
1193 """
1194 return self._proc.open_files()
1196 def connections(self, kind='inet'):
1197 """Return socket connections opened by process as a list of
1198 (fd, family, type, laddr, raddr, status) namedtuples.
1199 The *kind* parameter filters for connections that match the
1200 following criteria:
1202 +------------+----------------------------------------------------+
1203 | Kind Value | Connections using |
1204 +------------+----------------------------------------------------+
1205 | inet | IPv4 and IPv6 |
1206 | inet4 | IPv4 |
1207 | inet6 | IPv6 |
1208 | tcp | TCP |
1209 | tcp4 | TCP over IPv4 |
1210 | tcp6 | TCP over IPv6 |
1211 | udp | UDP |
1212 | udp4 | UDP over IPv4 |
1213 | udp6 | UDP over IPv6 |
1214 | unix | UNIX socket (both UDP and TCP protocols) |
1215 | all | the sum of all the possible families and protocols |
1216 +------------+----------------------------------------------------+
1217 """
1218 return self._proc.connections(kind)
1220 # --- signals
1222 if POSIX:
1224 def _send_signal(self, sig):
1225 assert not self.pid < 0, self.pid
1226 self._raise_if_pid_reused()
1227 if self.pid == 0:
1228 # see "man 2 kill"
1229 msg = (
1230 "preventing sending signal to process with PID 0 as it "
1231 "would affect every process in the process group of the "
1232 "calling process (os.getpid()) instead of PID 0"
1233 )
1234 raise ValueError(msg)
1235 try:
1236 os.kill(self.pid, sig)
1237 except ProcessLookupError:
1238 if OPENBSD and pid_exists(self.pid):
1239 # We do this because os.kill() lies in case of
1240 # zombie processes.
1241 raise ZombieProcess(self.pid, self._name, self._ppid)
1242 else:
1243 self._gone = True
1244 raise NoSuchProcess(self.pid, self._name)
1245 except PermissionError:
1246 raise AccessDenied(self.pid, self._name)
1248 def send_signal(self, sig):
1249 """Send a signal *sig* to process pre-emptively checking
1250 whether PID has been reused (see signal module constants) .
1251 On Windows only SIGTERM is valid and is treated as an alias
1252 for kill().
1253 """
1254 if POSIX:
1255 self._send_signal(sig)
1256 else: # pragma: no cover
1257 self._raise_if_pid_reused()
1258 if sig != signal.SIGTERM and not self.is_running():
1259 msg = "process no longer exists"
1260 raise NoSuchProcess(self.pid, self._name, msg=msg)
1261 self._proc.send_signal(sig)
1263 def suspend(self):
1264 """Suspend process execution with SIGSTOP pre-emptively checking
1265 whether PID has been reused.
1266 On Windows this has the effect of suspending all process threads.
1267 """
1268 if POSIX:
1269 self._send_signal(signal.SIGSTOP)
1270 else: # pragma: no cover
1271 self._raise_if_pid_reused()
1272 self._proc.suspend()
1274 def resume(self):
1275 """Resume process execution with SIGCONT pre-emptively checking
1276 whether PID has been reused.
1277 On Windows this has the effect of resuming all process threads.
1278 """
1279 if POSIX:
1280 self._send_signal(signal.SIGCONT)
1281 else: # pragma: no cover
1282 self._raise_if_pid_reused()
1283 self._proc.resume()
1285 def terminate(self):
1286 """Terminate the process with SIGTERM pre-emptively checking
1287 whether PID has been reused.
1288 On Windows this is an alias for kill().
1289 """
1290 if POSIX:
1291 self._send_signal(signal.SIGTERM)
1292 else: # pragma: no cover
1293 self._raise_if_pid_reused()
1294 self._proc.kill()
1296 def kill(self):
1297 """Kill the current process with SIGKILL pre-emptively checking
1298 whether PID has been reused.
1299 """
1300 if POSIX:
1301 self._send_signal(signal.SIGKILL)
1302 else: # pragma: no cover
1303 self._raise_if_pid_reused()
1304 self._proc.kill()
1306 def wait(self, timeout=None):
1307 """Wait for process to terminate and, if process is a children
1308 of os.getpid(), also return its exit code, else None.
1309 On Windows there's no such limitation (exit code is always
1310 returned).
1312 If the process is already terminated immediately return None
1313 instead of raising NoSuchProcess.
1315 If *timeout* (in seconds) is specified and process is still
1316 alive raise TimeoutExpired.
1318 To wait for multiple Process(es) use psutil.wait_procs().
1319 """
1320 if timeout is not None and not timeout >= 0:
1321 msg = "timeout must be a positive integer"
1322 raise ValueError(msg)
1323 if self._exitcode is not _SENTINEL:
1324 return self._exitcode
1325 self._exitcode = self._proc.wait(timeout)
1326 return self._exitcode
1329# The valid attr names which can be processed by Process.as_dict().
1330# fmt: off
1331_as_dict_attrnames = set(
1332 [x for x in dir(Process) if not x.startswith('_') and x not in
1333 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1334 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1335 'memory_info_ex', 'oneshot'}])
1336# fmt: on
1339# =====================================================================
1340# --- Popen class
1341# =====================================================================
1344class Popen(Process):
1345 """Same as subprocess.Popen, but in addition it provides all
1346 psutil.Process methods in a single class.
1347 For the following methods which are common to both classes, psutil
1348 implementation takes precedence:
1350 * send_signal()
1351 * terminate()
1352 * kill()
1354 This is done in order to avoid killing another process in case its
1355 PID has been reused, fixing BPO-6973.
1357 >>> import psutil
1358 >>> from subprocess import PIPE
1359 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1360 >>> p.name()
1361 'python'
1362 >>> p.uids()
1363 user(real=1000, effective=1000, saved=1000)
1364 >>> p.username()
1365 'giampaolo'
1366 >>> p.communicate()
1367 ('hi', None)
1368 >>> p.terminate()
1369 >>> p.wait(timeout=2)
1370 0
1371 >>>
1372 """
1374 def __init__(self, *args, **kwargs):
1375 # Explicitly avoid to raise NoSuchProcess in case the process
1376 # spawned by subprocess.Popen terminates too quickly, see:
1377 # https://github.com/giampaolo/psutil/issues/193
1378 self.__subproc = subprocess.Popen(*args, **kwargs)
1379 self._init(self.__subproc.pid, _ignore_nsp=True)
1381 def __dir__(self):
1382 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1384 def __enter__(self):
1385 if hasattr(self.__subproc, '__enter__'):
1386 self.__subproc.__enter__()
1387 return self
1389 def __exit__(self, *args, **kwargs):
1390 if hasattr(self.__subproc, '__exit__'):
1391 return self.__subproc.__exit__(*args, **kwargs)
1392 else:
1393 if self.stdout:
1394 self.stdout.close()
1395 if self.stderr:
1396 self.stderr.close()
1397 try:
1398 # Flushing a BufferedWriter may raise an error.
1399 if self.stdin:
1400 self.stdin.close()
1401 finally:
1402 # Wait for the process to terminate, to avoid zombies.
1403 self.wait()
1405 def __getattribute__(self, name):
1406 try:
1407 return object.__getattribute__(self, name)
1408 except AttributeError:
1409 try:
1410 return object.__getattribute__(self.__subproc, name)
1411 except AttributeError:
1412 msg = "%s instance has no attribute '%s'" % (
1413 self.__class__.__name__,
1414 name,
1415 )
1416 raise AttributeError(msg)
1418 def wait(self, timeout=None):
1419 if self.__subproc.returncode is not None:
1420 return self.__subproc.returncode
1421 ret = super(Popen, self).wait(timeout) # noqa
1422 self.__subproc.returncode = ret
1423 return ret
1426# =====================================================================
1427# --- system processes related functions
1428# =====================================================================
1431def pids():
1432 """Return a list of current running PIDs."""
1433 global _LOWEST_PID
1434 ret = sorted(_psplatform.pids())
1435 _LOWEST_PID = ret[0]
1436 return ret
1439def pid_exists(pid):
1440 """Return True if given PID exists in the current process list.
1441 This is faster than doing "pid in psutil.pids()" and
1442 should be preferred.
1443 """
1444 if pid < 0:
1445 return False
1446 elif pid == 0 and POSIX:
1447 # On POSIX we use os.kill() to determine PID existence.
1448 # According to "man 2 kill" PID 0 has a special meaning
1449 # though: it refers to <<every process in the process
1450 # group of the calling process>> and that is not we want
1451 # to do here.
1452 return pid in pids()
1453 else:
1454 return _psplatform.pid_exists(pid)
1457_pmap = {}
1460def process_iter(attrs=None, ad_value=None):
1461 """Return a generator yielding a Process instance for all
1462 running processes.
1464 Every new Process instance is only created once and then cached
1465 into an internal table which is updated every time this is used.
1467 Cached Process instances are checked for identity so that you're
1468 safe in case a PID has been reused by another process, in which
1469 case the cached instance is updated.
1471 The sorting order in which processes are yielded is based on
1472 their PIDs.
1474 *attrs* and *ad_value* have the same meaning as in
1475 Process.as_dict(). If *attrs* is specified as_dict() is called
1476 and the resulting dict is stored as a 'info' attribute attached
1477 to returned Process instance.
1478 If *attrs* is an empty list it will retrieve all process info
1479 (slow).
1480 """
1481 global _pmap
1483 def add(pid):
1484 proc = Process(pid)
1485 if attrs is not None:
1486 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1487 pmap[proc.pid] = proc
1488 return proc
1490 def remove(pid):
1491 pmap.pop(pid, None)
1493 pmap = _pmap.copy()
1494 a = set(pids())
1495 b = set(pmap.keys())
1496 new_pids = a - b
1497 gone_pids = b - a
1498 for pid in gone_pids:
1499 remove(pid)
1500 try:
1501 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1502 for pid, proc in ls:
1503 try:
1504 if proc is None: # new process
1505 yield add(pid)
1506 else:
1507 # use is_running() to check whether PID has been
1508 # reused by another process in which case yield a
1509 # new Process instance
1510 if proc.is_running():
1511 if attrs is not None:
1512 proc.info = proc.as_dict(
1513 attrs=attrs, ad_value=ad_value
1514 )
1515 yield proc
1516 else:
1517 yield add(pid)
1518 except NoSuchProcess:
1519 remove(pid)
1520 except AccessDenied:
1521 # Process creation time can't be determined hence there's
1522 # no way to tell whether the pid of the cached process
1523 # has been reused. Just return the cached version.
1524 if proc is None and pid in pmap:
1525 try:
1526 yield pmap[pid]
1527 except KeyError:
1528 # If we get here it is likely that 2 threads were
1529 # using process_iter().
1530 pass
1531 else:
1532 raise
1533 finally:
1534 _pmap = pmap
1537def wait_procs(procs, timeout=None, callback=None):
1538 """Convenience function which waits for a list of processes to
1539 terminate.
1541 Return a (gone, alive) tuple indicating which processes
1542 are gone and which ones are still alive.
1544 The gone ones will have a new *returncode* attribute indicating
1545 process exit status (may be None).
1547 *callback* is a function which gets called every time a process
1548 terminates (a Process instance is passed as callback argument).
1550 Function will return as soon as all processes terminate or when
1551 *timeout* occurs.
1552 Differently from Process.wait() it will not raise TimeoutExpired if
1553 *timeout* occurs.
1555 Typical use case is:
1557 - send SIGTERM to a list of processes
1558 - give them some time to terminate
1559 - send SIGKILL to those ones which are still alive
1561 Example:
1563 >>> def on_terminate(proc):
1564 ... print("process {} terminated".format(proc))
1565 ...
1566 >>> for p in procs:
1567 ... p.terminate()
1568 ...
1569 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1570 >>> for p in alive:
1571 ... p.kill()
1572 """
1574 def check_gone(proc, timeout):
1575 try:
1576 returncode = proc.wait(timeout=timeout)
1577 except TimeoutExpired:
1578 pass
1579 except _SubprocessTimeoutExpired:
1580 pass
1581 else:
1582 if returncode is not None or not proc.is_running():
1583 # Set new Process instance attribute.
1584 proc.returncode = returncode
1585 gone.add(proc)
1586 if callback is not None:
1587 callback(proc)
1589 if timeout is not None and not timeout >= 0:
1590 msg = "timeout must be a positive integer, got %s" % timeout
1591 raise ValueError(msg)
1592 gone = set()
1593 alive = set(procs)
1594 if callback is not None and not callable(callback):
1595 msg = "callback %r is not a callable" % callback
1596 raise TypeError(msg)
1597 if timeout is not None:
1598 deadline = _timer() + timeout
1600 while alive:
1601 if timeout is not None and timeout <= 0:
1602 break
1603 for proc in alive:
1604 # Make sure that every complete iteration (all processes)
1605 # will last max 1 sec.
1606 # We do this because we don't want to wait too long on a
1607 # single process: in case it terminates too late other
1608 # processes may disappear in the meantime and their PID
1609 # reused.
1610 max_timeout = 1.0 / len(alive)
1611 if timeout is not None:
1612 timeout = min((deadline - _timer()), max_timeout)
1613 if timeout <= 0:
1614 break
1615 check_gone(proc, timeout)
1616 else:
1617 check_gone(proc, max_timeout)
1618 alive = alive - gone
1620 if alive:
1621 # Last attempt over processes survived so far.
1622 # timeout == 0 won't make this function wait any further.
1623 for proc in alive:
1624 check_gone(proc, 0)
1625 alive = alive - gone
1627 return (list(gone), list(alive))
1630# =====================================================================
1631# --- CPU related functions
1632# =====================================================================
1635def cpu_count(logical=True):
1636 """Return the number of logical CPUs in the system (same as
1637 os.cpu_count() in Python 3.4).
1639 If *logical* is False return the number of physical cores only
1640 (e.g. hyper thread CPUs are excluded).
1642 Return None if undetermined.
1644 The return value is cached after first call.
1645 If desired cache can be cleared like this:
1647 >>> psutil.cpu_count.cache_clear()
1648 """
1649 if logical:
1650 ret = _psplatform.cpu_count_logical()
1651 else:
1652 ret = _psplatform.cpu_count_cores()
1653 if ret is not None and ret < 1:
1654 ret = None
1655 return ret
1658def cpu_times(percpu=False):
1659 """Return system-wide CPU times as a namedtuple.
1660 Every CPU time represents the seconds the CPU has spent in the
1661 given mode. The namedtuple's fields availability varies depending on the
1662 platform:
1664 - user
1665 - system
1666 - idle
1667 - nice (UNIX)
1668 - iowait (Linux)
1669 - irq (Linux, FreeBSD)
1670 - softirq (Linux)
1671 - steal (Linux >= 2.6.11)
1672 - guest (Linux >= 2.6.24)
1673 - guest_nice (Linux >= 3.2.0)
1675 When *percpu* is True return a list of namedtuples for each CPU.
1676 First element of the list refers to first CPU, second element
1677 to second CPU and so on.
1678 The order of the list is consistent across calls.
1679 """
1680 if not percpu:
1681 return _psplatform.cpu_times()
1682 else:
1683 return _psplatform.per_cpu_times()
1686try:
1687 _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1688except Exception: # noqa: BLE001
1689 # Don't want to crash at import time.
1690 _last_cpu_times = {}
1692try:
1693 _last_per_cpu_times = {
1694 threading.current_thread().ident: cpu_times(percpu=True)
1695 }
1696except Exception: # noqa: BLE001
1697 # Don't want to crash at import time.
1698 _last_per_cpu_times = {}
1701def _cpu_tot_time(times):
1702 """Given a cpu_time() ntuple calculates the total CPU time
1703 (including idle time).
1704 """
1705 tot = sum(times)
1706 if LINUX:
1707 # On Linux guest times are already accounted in "user" or
1708 # "nice" times, so we subtract them from total.
1709 # Htop does the same. References:
1710 # https://github.com/giampaolo/psutil/pull/940
1711 # http://unix.stackexchange.com/questions/178045
1712 # https://github.com/torvalds/linux/blob/
1713 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1714 # cputime.c#L158
1715 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1716 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1717 return tot
1720def _cpu_busy_time(times):
1721 """Given a cpu_time() ntuple calculates the busy CPU time.
1722 We do so by subtracting all idle CPU times.
1723 """
1724 busy = _cpu_tot_time(times)
1725 busy -= times.idle
1726 # Linux: "iowait" is time during which the CPU does not do anything
1727 # (waits for IO to complete). On Linux IO wait is *not* accounted
1728 # in "idle" time so we subtract it. Htop does the same.
1729 # References:
1730 # https://github.com/torvalds/linux/blob/
1731 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1732 busy -= getattr(times, "iowait", 0)
1733 return busy
1736def _cpu_times_deltas(t1, t2):
1737 assert t1._fields == t2._fields, (t1, t2)
1738 field_deltas = []
1739 for field in _psplatform.scputimes._fields:
1740 field_delta = getattr(t2, field) - getattr(t1, field)
1741 # CPU times are always supposed to increase over time
1742 # or at least remain the same and that's because time
1743 # cannot go backwards.
1744 # Surprisingly sometimes this might not be the case (at
1745 # least on Windows and Linux), see:
1746 # https://github.com/giampaolo/psutil/issues/392
1747 # https://github.com/giampaolo/psutil/issues/645
1748 # https://github.com/giampaolo/psutil/issues/1210
1749 # Trim negative deltas to zero to ignore decreasing fields.
1750 # top does the same. Reference:
1751 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1752 field_delta = max(0, field_delta)
1753 field_deltas.append(field_delta)
1754 return _psplatform.scputimes(*field_deltas)
1757def cpu_percent(interval=None, percpu=False):
1758 """Return a float representing the current system-wide CPU
1759 utilization as a percentage.
1761 When *interval* is > 0.0 compares system CPU times elapsed before
1762 and after the interval (blocking).
1764 When *interval* is 0.0 or None compares system CPU times elapsed
1765 since last call or module import, returning immediately (non
1766 blocking). That means the first time this is called it will
1767 return a meaningless 0.0 value which you should ignore.
1768 In this case is recommended for accuracy that this function be
1769 called with at least 0.1 seconds between calls.
1771 When *percpu* is True returns a list of floats representing the
1772 utilization as a percentage for each CPU.
1773 First element of the list refers to first CPU, second element
1774 to second CPU and so on.
1775 The order of the list is consistent across calls.
1777 Examples:
1779 >>> # blocking, system-wide
1780 >>> psutil.cpu_percent(interval=1)
1781 2.0
1782 >>>
1783 >>> # blocking, per-cpu
1784 >>> psutil.cpu_percent(interval=1, percpu=True)
1785 [2.0, 1.0]
1786 >>>
1787 >>> # non-blocking (percentage since last call)
1788 >>> psutil.cpu_percent(interval=None)
1789 2.9
1790 >>>
1791 """
1792 tid = threading.current_thread().ident
1793 blocking = interval is not None and interval > 0.0
1794 if interval is not None and interval < 0:
1795 msg = "interval is not positive (got %r)" % interval
1796 raise ValueError(msg)
1798 def calculate(t1, t2):
1799 times_delta = _cpu_times_deltas(t1, t2)
1800 all_delta = _cpu_tot_time(times_delta)
1801 busy_delta = _cpu_busy_time(times_delta)
1803 try:
1804 busy_perc = (busy_delta / all_delta) * 100
1805 except ZeroDivisionError:
1806 return 0.0
1807 else:
1808 return round(busy_perc, 1)
1810 # system-wide usage
1811 if not percpu:
1812 if blocking:
1813 t1 = cpu_times()
1814 time.sleep(interval)
1815 else:
1816 t1 = _last_cpu_times.get(tid) or cpu_times()
1817 _last_cpu_times[tid] = cpu_times()
1818 return calculate(t1, _last_cpu_times[tid])
1819 # per-cpu usage
1820 else:
1821 ret = []
1822 if blocking:
1823 tot1 = cpu_times(percpu=True)
1824 time.sleep(interval)
1825 else:
1826 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1827 _last_per_cpu_times[tid] = cpu_times(percpu=True)
1828 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1829 ret.append(calculate(t1, t2))
1830 return ret
1833# Use a separate dict for cpu_times_percent(), so it's independent from
1834# cpu_percent() and they can both be used within the same program.
1835_last_cpu_times_2 = _last_cpu_times.copy()
1836_last_per_cpu_times_2 = _last_per_cpu_times.copy()
1839def cpu_times_percent(interval=None, percpu=False):
1840 """Same as cpu_percent() but provides utilization percentages
1841 for each specific CPU time as is returned by cpu_times().
1842 For instance, on Linux we'll get:
1844 >>> cpu_times_percent()
1845 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1846 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1847 >>>
1849 *interval* and *percpu* arguments have the same meaning as in
1850 cpu_percent().
1851 """
1852 tid = threading.current_thread().ident
1853 blocking = interval is not None and interval > 0.0
1854 if interval is not None and interval < 0:
1855 msg = "interval is not positive (got %r)" % interval
1856 raise ValueError(msg)
1858 def calculate(t1, t2):
1859 nums = []
1860 times_delta = _cpu_times_deltas(t1, t2)
1861 all_delta = _cpu_tot_time(times_delta)
1862 # "scale" is the value to multiply each delta with to get percentages.
1863 # We use "max" to avoid division by zero (if all_delta is 0, then all
1864 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1865 # fraction because cpu times are integers)
1866 scale = 100.0 / max(1, all_delta)
1867 for field_delta in times_delta:
1868 field_perc = field_delta * scale
1869 field_perc = round(field_perc, 1)
1870 # make sure we don't return negative values or values over 100%
1871 field_perc = min(max(0.0, field_perc), 100.0)
1872 nums.append(field_perc)
1873 return _psplatform.scputimes(*nums)
1875 # system-wide usage
1876 if not percpu:
1877 if blocking:
1878 t1 = cpu_times()
1879 time.sleep(interval)
1880 else:
1881 t1 = _last_cpu_times_2.get(tid) or cpu_times()
1882 _last_cpu_times_2[tid] = cpu_times()
1883 return calculate(t1, _last_cpu_times_2[tid])
1884 # per-cpu usage
1885 else:
1886 ret = []
1887 if blocking:
1888 tot1 = cpu_times(percpu=True)
1889 time.sleep(interval)
1890 else:
1891 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1892 _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1893 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1894 ret.append(calculate(t1, t2))
1895 return ret
1898def cpu_stats():
1899 """Return CPU statistics."""
1900 return _psplatform.cpu_stats()
1903if hasattr(_psplatform, "cpu_freq"):
1905 def cpu_freq(percpu=False):
1906 """Return CPU frequency as a namedtuple including current,
1907 min and max frequency expressed in Mhz.
1909 If *percpu* is True and the system supports per-cpu frequency
1910 retrieval (Linux only) a list of frequencies is returned for
1911 each CPU. If not a list with one element is returned.
1912 """
1913 ret = _psplatform.cpu_freq()
1914 if percpu:
1915 return ret
1916 else:
1917 num_cpus = float(len(ret))
1918 if num_cpus == 0:
1919 return None
1920 elif num_cpus == 1:
1921 return ret[0]
1922 else:
1923 currs, mins, maxs = 0.0, 0.0, 0.0
1924 set_none = False
1925 for cpu in ret:
1926 currs += cpu.current
1927 # On Linux if /proc/cpuinfo is used min/max are set
1928 # to None.
1929 if LINUX and cpu.min is None:
1930 set_none = True
1931 continue
1932 mins += cpu.min
1933 maxs += cpu.max
1935 current = currs / num_cpus
1937 if set_none:
1938 min_ = max_ = None
1939 else:
1940 min_ = mins / num_cpus
1941 max_ = maxs / num_cpus
1943 return _common.scpufreq(current, min_, max_)
1945 __all__.append("cpu_freq")
1948if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1949 # Perform this hasattr check once on import time to either use the
1950 # platform based code or proxy straight from the os module.
1951 if hasattr(os, "getloadavg"):
1952 getloadavg = os.getloadavg
1953 else:
1954 getloadavg = _psplatform.getloadavg
1956 __all__.append("getloadavg")
1959# =====================================================================
1960# --- system memory related functions
1961# =====================================================================
1964def virtual_memory():
1965 """Return statistics about system memory usage as a namedtuple
1966 including the following fields, expressed in bytes:
1968 - total:
1969 total physical memory available.
1971 - available:
1972 the memory that can be given instantly to processes without the
1973 system going into swap.
1974 This is calculated by summing different memory values depending
1975 on the platform and it is supposed to be used to monitor actual
1976 memory usage in a cross platform fashion.
1978 - percent:
1979 the percentage usage calculated as (total - available) / total * 100
1981 - used:
1982 memory used, calculated differently depending on the platform and
1983 designed for informational purposes only:
1984 macOS: active + wired
1985 BSD: active + wired + cached
1986 Linux: total - free
1988 - free:
1989 memory not being used at all (zeroed) that is readily available;
1990 note that this doesn't reflect the actual memory available
1991 (use 'available' instead)
1993 Platform-specific fields:
1995 - active (UNIX):
1996 memory currently in use or very recently used, and so it is in RAM.
1998 - inactive (UNIX):
1999 memory that is marked as not used.
2001 - buffers (BSD, Linux):
2002 cache for things like file system metadata.
2004 - cached (BSD, macOS):
2005 cache for various things.
2007 - wired (macOS, BSD):
2008 memory that is marked to always stay in RAM. It is never moved to disk.
2010 - shared (BSD):
2011 memory that may be simultaneously accessed by multiple processes.
2013 The sum of 'used' and 'available' does not necessarily equal total.
2014 On Windows 'available' and 'free' are the same.
2015 """
2016 global _TOTAL_PHYMEM
2017 ret = _psplatform.virtual_memory()
2018 # cached for later use in Process.memory_percent()
2019 _TOTAL_PHYMEM = ret.total
2020 return ret
2023def swap_memory():
2024 """Return system swap memory statistics as a namedtuple including
2025 the following fields:
2027 - total: total swap memory in bytes
2028 - used: used swap memory in bytes
2029 - free: free swap memory in bytes
2030 - percent: the percentage usage
2031 - sin: no. of bytes the system has swapped in from disk (cumulative)
2032 - sout: no. of bytes the system has swapped out from disk (cumulative)
2034 'sin' and 'sout' on Windows are meaningless and always set to 0.
2035 """
2036 return _psplatform.swap_memory()
2039# =====================================================================
2040# --- disks/paritions related functions
2041# =====================================================================
2044def disk_usage(path):
2045 """Return disk usage statistics about the given *path* as a
2046 namedtuple including total, used and free space expressed in bytes
2047 plus the percentage usage.
2048 """
2049 return _psplatform.disk_usage(path)
2052def disk_partitions(all=False):
2053 """Return mounted partitions as a list of
2054 (device, mountpoint, fstype, opts) namedtuple.
2055 'opts' field is a raw string separated by commas indicating mount
2056 options which may vary depending on the platform.
2058 If *all* parameter is False return physical devices only and ignore
2059 all others.
2060 """
2062 def pathconf(path, name):
2063 try:
2064 return os.pathconf(path, name)
2065 except (OSError, AttributeError):
2066 pass
2068 ret = _psplatform.disk_partitions(all)
2069 if POSIX:
2070 new = []
2071 for item in ret:
2072 nt = item._replace(
2073 maxfile=pathconf(item.mountpoint, 'PC_NAME_MAX'),
2074 maxpath=pathconf(item.mountpoint, 'PC_PATH_MAX'),
2075 )
2076 new.append(nt)
2077 return new
2078 else:
2079 return ret
2082def disk_io_counters(perdisk=False, nowrap=True):
2083 """Return system disk I/O statistics as a namedtuple including
2084 the following fields:
2086 - read_count: number of reads
2087 - write_count: number of writes
2088 - read_bytes: number of bytes read
2089 - write_bytes: number of bytes written
2090 - read_time: time spent reading from disk (in ms)
2091 - write_time: time spent writing to disk (in ms)
2093 Platform specific:
2095 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2096 - read_merged_count (Linux): number of merged reads
2097 - write_merged_count (Linux): number of merged writes
2099 If *perdisk* is True return the same information for every
2100 physical disk installed on the system as a dictionary
2101 with partition names as the keys and the namedtuple
2102 described above as the values.
2104 If *nowrap* is True it detects and adjust the numbers which overflow
2105 and wrap (restart from 0) and add "old value" to "new value" so that
2106 the returned numbers will always be increasing or remain the same,
2107 but never decrease.
2108 "disk_io_counters.cache_clear()" can be used to invalidate the
2109 cache.
2111 On recent Windows versions 'diskperf -y' command may need to be
2112 executed first otherwise this function won't find any disk.
2113 """
2114 kwargs = dict(perdisk=perdisk) if LINUX else {}
2115 rawdict = _psplatform.disk_io_counters(**kwargs)
2116 if not rawdict:
2117 return {} if perdisk else None
2118 if nowrap:
2119 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2120 nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
2121 if perdisk:
2122 for disk, fields in rawdict.items():
2123 rawdict[disk] = nt(*fields)
2124 return rawdict
2125 else:
2126 return nt(*(sum(x) for x in zip(*rawdict.values())))
2129disk_io_counters.cache_clear = functools.partial(
2130 _wrap_numbers.cache_clear, 'psutil.disk_io_counters'
2131)
2132disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2135# =====================================================================
2136# --- network related functions
2137# =====================================================================
2140def net_io_counters(pernic=False, nowrap=True):
2141 """Return network I/O statistics as a namedtuple including
2142 the following fields:
2144 - bytes_sent: number of bytes sent
2145 - bytes_recv: number of bytes received
2146 - packets_sent: number of packets sent
2147 - packets_recv: number of packets received
2148 - errin: total number of errors while receiving
2149 - errout: total number of errors while sending
2150 - dropin: total number of incoming packets which were dropped
2151 - dropout: total number of outgoing packets which were dropped
2152 (always 0 on macOS and BSD)
2154 If *pernic* is True return the same information for every
2155 network interface installed on the system as a dictionary
2156 with network interface names as the keys and the namedtuple
2157 described above as the values.
2159 If *nowrap* is True it detects and adjust the numbers which overflow
2160 and wrap (restart from 0) and add "old value" to "new value" so that
2161 the returned numbers will always be increasing or remain the same,
2162 but never decrease.
2163 "net_io_counters.cache_clear()" can be used to invalidate the
2164 cache.
2165 """
2166 rawdict = _psplatform.net_io_counters()
2167 if not rawdict:
2168 return {} if pernic else None
2169 if nowrap:
2170 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2171 if pernic:
2172 for nic, fields in rawdict.items():
2173 rawdict[nic] = _common.snetio(*fields)
2174 return rawdict
2175 else:
2176 return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
2179net_io_counters.cache_clear = functools.partial(
2180 _wrap_numbers.cache_clear, 'psutil.net_io_counters'
2181)
2182net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2185def net_connections(kind='inet'):
2186 """Return system-wide socket connections as a list of
2187 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2188 In case of limited privileges 'fd' and 'pid' may be set to -1
2189 and None respectively.
2190 The *kind* parameter filters for connections that fit the
2191 following criteria:
2193 +------------+----------------------------------------------------+
2194 | Kind Value | Connections using |
2195 +------------+----------------------------------------------------+
2196 | inet | IPv4 and IPv6 |
2197 | inet4 | IPv4 |
2198 | inet6 | IPv6 |
2199 | tcp | TCP |
2200 | tcp4 | TCP over IPv4 |
2201 | tcp6 | TCP over IPv6 |
2202 | udp | UDP |
2203 | udp4 | UDP over IPv4 |
2204 | udp6 | UDP over IPv6 |
2205 | unix | UNIX socket (both UDP and TCP protocols) |
2206 | all | the sum of all the possible families and protocols |
2207 +------------+----------------------------------------------------+
2209 On macOS this function requires root privileges.
2210 """
2211 return _psplatform.net_connections(kind)
2214def net_if_addrs():
2215 """Return the addresses associated to each NIC (network interface
2216 card) installed on the system as a dictionary whose keys are the
2217 NIC names and value is a list of namedtuples for each address
2218 assigned to the NIC. Each namedtuple includes 5 fields:
2220 - family: can be either socket.AF_INET, socket.AF_INET6 or
2221 psutil.AF_LINK, which refers to a MAC address.
2222 - address: is the primary address and it is always set.
2223 - netmask: and 'broadcast' and 'ptp' may be None.
2224 - ptp: stands for "point to point" and references the
2225 destination address on a point to point interface
2226 (typically a VPN).
2227 - broadcast: and *ptp* are mutually exclusive.
2229 Note: you can have more than one address of the same family
2230 associated with each interface.
2231 """
2232 has_enums = _PY3
2233 if has_enums:
2234 import socket
2235 rawlist = _psplatform.net_if_addrs()
2236 rawlist.sort(key=lambda x: x[1]) # sort by family
2237 ret = collections.defaultdict(list)
2238 for name, fam, addr, mask, broadcast, ptp in rawlist:
2239 if has_enums:
2240 try:
2241 fam = socket.AddressFamily(fam)
2242 except ValueError:
2243 if WINDOWS and fam == -1:
2244 fam = _psplatform.AF_LINK
2245 elif (
2246 hasattr(_psplatform, "AF_LINK")
2247 and fam == _psplatform.AF_LINK
2248 ):
2249 # Linux defines AF_LINK as an alias for AF_PACKET.
2250 # We re-set the family here so that repr(family)
2251 # will show AF_LINK rather than AF_PACKET
2252 fam = _psplatform.AF_LINK
2253 if fam == _psplatform.AF_LINK:
2254 # The underlying C function may return an incomplete MAC
2255 # address in which case we fill it with null bytes, see:
2256 # https://github.com/giampaolo/psutil/issues/786
2257 separator = ":" if POSIX else "-"
2258 while addr.count(separator) < 5:
2259 addr += "%s00" % separator
2260 ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp))
2261 return dict(ret)
2264def net_if_stats():
2265 """Return information about each NIC (network interface card)
2266 installed on the system as a dictionary whose keys are the
2267 NIC names and value is a namedtuple with the following fields:
2269 - isup: whether the interface is up (bool)
2270 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2271 NIC_DUPLEX_UNKNOWN
2272 - speed: the NIC speed expressed in mega bits (MB); if it can't
2273 be determined (e.g. 'localhost') it will be set to 0.
2274 - mtu: the maximum transmission unit expressed in bytes.
2275 """
2276 return _psplatform.net_if_stats()
2279# =====================================================================
2280# --- sensors
2281# =====================================================================
2284# Linux, macOS
2285if hasattr(_psplatform, "sensors_temperatures"):
2287 def sensors_temperatures(fahrenheit=False):
2288 """Return hardware temperatures. Each entry is a namedtuple
2289 representing a certain hardware sensor (it may be a CPU, an
2290 hard disk or something else, depending on the OS and its
2291 configuration).
2292 All temperatures are expressed in celsius unless *fahrenheit*
2293 is set to True.
2294 """
2296 def convert(n):
2297 if n is not None:
2298 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2300 ret = collections.defaultdict(list)
2301 rawdict = _psplatform.sensors_temperatures()
2303 for name, values in rawdict.items():
2304 while values:
2305 label, current, high, critical = values.pop(0)
2306 current = convert(current)
2307 high = convert(high)
2308 critical = convert(critical)
2310 if high and not critical:
2311 critical = high
2312 elif critical and not high:
2313 high = critical
2315 ret[name].append(
2316 _common.shwtemp(label, current, high, critical)
2317 )
2319 return dict(ret)
2321 __all__.append("sensors_temperatures")
2324# Linux
2325if hasattr(_psplatform, "sensors_fans"):
2327 def sensors_fans():
2328 """Return fans speed. Each entry is a namedtuple
2329 representing a certain hardware sensor.
2330 All speed are expressed in RPM (rounds per minute).
2331 """
2332 return _psplatform.sensors_fans()
2334 __all__.append("sensors_fans")
2337# Linux, Windows, FreeBSD, macOS
2338if hasattr(_psplatform, "sensors_battery"):
2340 def sensors_battery():
2341 """Return battery information. If no battery is installed
2342 returns None.
2344 - percent: battery power left as a percentage.
2345 - secsleft: a rough approximation of how many seconds are left
2346 before the battery runs out of power. May be
2347 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2348 - power_plugged: True if the AC power cable is connected.
2349 """
2350 return _psplatform.sensors_battery()
2352 __all__.append("sensors_battery")
2355# =====================================================================
2356# --- other system related functions
2357# =====================================================================
2360def boot_time():
2361 """Return the system boot time expressed in seconds since the epoch."""
2362 # Note: we are not caching this because it is subject to
2363 # system clock updates.
2364 return _psplatform.boot_time()
2367def users():
2368 """Return users currently connected on the system as a list of
2369 namedtuples including the following fields.
2371 - user: the name of the user
2372 - terminal: the tty or pseudo-tty associated with the user, if any.
2373 - host: the host name associated with the entry, if any.
2374 - started: the creation time as a floating point number expressed in
2375 seconds since the epoch.
2376 """
2377 return _psplatform.users()
2380# =====================================================================
2381# --- Windows services
2382# =====================================================================
2385if WINDOWS:
2387 def win_service_iter():
2388 """Return a generator yielding a WindowsService instance for all
2389 Windows services installed.
2390 """
2391 return _psplatform.win_service_iter()
2393 def win_service_get(name):
2394 """Get a Windows service by *name*.
2395 Raise NoSuchProcess if no service with such name exists.
2396 """
2397 return _psplatform.win_service_get(name)
2400# =====================================================================
2403def _set_debug(value):
2404 """Enable or disable PSUTIL_DEBUG option, which prints debugging
2405 messages to stderr.
2406 """
2407 import psutil._common
2409 psutil._common.PSUTIL_DEBUG = bool(value)
2410 _psplatform.cext.set_debug(bool(value))
2413def test(): # pragma: no cover
2414 from ._common import bytes2human
2415 from ._compat import get_terminal_size
2417 today_day = datetime.date.today()
2418 # fmt: off
2419 templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s"
2420 attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times',
2421 'create_time', 'memory_info', 'status', 'nice', 'username']
2422 print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE", # NOQA
2423 "STATUS", "START", "TIME", "CMDLINE"))
2424 # fmt: on
2425 for p in process_iter(attrs, ad_value=None):
2426 if p.info['create_time']:
2427 ctime = datetime.datetime.fromtimestamp(p.info['create_time'])
2428 if ctime.date() == today_day:
2429 ctime = ctime.strftime("%H:%M")
2430 else:
2431 ctime = ctime.strftime("%b%d")
2432 else:
2433 ctime = ''
2434 if p.info['cpu_times']:
2435 cputime = time.strftime(
2436 "%M:%S", time.localtime(sum(p.info['cpu_times']))
2437 )
2438 else:
2439 cputime = ''
2441 user = p.info['username'] or ''
2442 if not user and POSIX:
2443 try:
2444 user = p.uids()[0]
2445 except Error:
2446 pass
2447 if user and WINDOWS and '\\' in user:
2448 user = user.split('\\')[1]
2449 user = user[:9]
2450 vms = (
2451 bytes2human(p.info['memory_info'].vms)
2452 if p.info['memory_info'] is not None
2453 else ''
2454 )
2455 rss = (
2456 bytes2human(p.info['memory_info'].rss)
2457 if p.info['memory_info'] is not None
2458 else ''
2459 )
2460 memp = (
2461 round(p.info['memory_percent'], 1)
2462 if p.info['memory_percent'] is not None
2463 else ''
2464 )
2465 nice = int(p.info['nice']) if p.info['nice'] else ''
2466 if p.info['cmdline']:
2467 cmdline = ' '.join(p.info['cmdline'])
2468 else:
2469 cmdline = p.info['name']
2470 status = p.info['status'][:5] if p.info['status'] else ''
2472 line = templ % (
2473 user[:10],
2474 p.info['pid'],
2475 memp,
2476 vms,
2477 rss,
2478 nice,
2479 status,
2480 ctime,
2481 cputime,
2482 cmdline,
2483 )
2484 print(line[: get_terminal_size()[0]]) # NOQA
2487del memoize_when_activated, division
2488if sys.version_info[0] < 3:
2489 del num, x # noqa
2491if __name__ == "__main__":
2492 test()