Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/psutil-5.9.6-py3.8-linux-x86_64.egg/psutil/__init__.py: 31%
935 statements
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-25 07:00 +0000
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-25 07:00 +0000
1# -*- coding: utf-8 -*-
3# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
4# Use of this source code is governed by a BSD-style license that can be
5# found in the LICENSE file.
7"""psutil is a cross-platform library for retrieving information on
8running processes and system utilization (CPU, memory, disks, network,
9sensors) in Python. Supported platforms:
11 - Linux
12 - Windows
13 - macOS
14 - FreeBSD
15 - OpenBSD
16 - NetBSD
17 - Sun Solaris
18 - AIX
20Works with Python versions 2.7 and 3.6+.
21"""
23from __future__ import division
25import collections
26import contextlib
27import datetime
28import functools
29import os
30import signal
31import subprocess
32import sys
33import threading
34import time
37try:
38 import pwd
39except ImportError:
40 pwd = None
42from . import _common
43from ._common import AIX
44from ._common import BSD
45from ._common import CONN_CLOSE
46from ._common import CONN_CLOSE_WAIT
47from ._common import CONN_CLOSING
48from ._common import CONN_ESTABLISHED
49from ._common import CONN_FIN_WAIT1
50from ._common import CONN_FIN_WAIT2
51from ._common import CONN_LAST_ACK
52from ._common import CONN_LISTEN
53from ._common import CONN_NONE
54from ._common import CONN_SYN_RECV
55from ._common import CONN_SYN_SENT
56from ._common import CONN_TIME_WAIT
57from ._common import FREEBSD # NOQA
58from ._common import LINUX
59from ._common import MACOS
60from ._common import NETBSD # NOQA
61from ._common import NIC_DUPLEX_FULL
62from ._common import NIC_DUPLEX_HALF
63from ._common import NIC_DUPLEX_UNKNOWN
64from ._common import OPENBSD # NOQA
65from ._common import OSX # deprecated alias
66from ._common import POSIX # NOQA
67from ._common import POWER_TIME_UNKNOWN
68from ._common import POWER_TIME_UNLIMITED
69from ._common import STATUS_DEAD
70from ._common import STATUS_DISK_SLEEP
71from ._common import STATUS_IDLE
72from ._common import STATUS_LOCKED
73from ._common import STATUS_PARKED
74from ._common import STATUS_RUNNING
75from ._common import STATUS_SLEEPING
76from ._common import STATUS_STOPPED
77from ._common import STATUS_TRACING_STOP
78from ._common import STATUS_WAITING
79from ._common import STATUS_WAKING
80from ._common import STATUS_ZOMBIE
81from ._common import SUNOS
82from ._common import WINDOWS
83from ._common import AccessDenied
84from ._common import Error
85from ._common import NoSuchProcess
86from ._common import TimeoutExpired
87from ._common import ZombieProcess
88from ._common import memoize_when_activated
89from ._common import wrap_numbers as _wrap_numbers
90from ._compat import PY3 as _PY3
91from ._compat import PermissionError
92from ._compat import ProcessLookupError
93from ._compat import SubprocessTimeoutExpired as _SubprocessTimeoutExpired
94from ._compat import long
97if LINUX:
98 # This is public API and it will be retrieved from _pslinux.py
99 # via sys.modules.
100 PROCFS_PATH = "/proc"
102 from . import _pslinux as _psplatform
103 from ._pslinux import IOPRIO_CLASS_BE # NOQA
104 from ._pslinux import IOPRIO_CLASS_IDLE # NOQA
105 from ._pslinux import IOPRIO_CLASS_NONE # NOQA
106 from ._pslinux import IOPRIO_CLASS_RT # NOQA
108elif WINDOWS:
109 from . import _pswindows as _psplatform
110 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # NOQA
111 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # NOQA
112 from ._psutil_windows import HIGH_PRIORITY_CLASS # NOQA
113 from ._psutil_windows import IDLE_PRIORITY_CLASS # NOQA
114 from ._psutil_windows import NORMAL_PRIORITY_CLASS # NOQA
115 from ._psutil_windows import REALTIME_PRIORITY_CLASS # NOQA
116 from ._pswindows import CONN_DELETE_TCB # NOQA
117 from ._pswindows import IOPRIO_HIGH # NOQA
118 from ._pswindows import IOPRIO_LOW # NOQA
119 from ._pswindows import IOPRIO_NORMAL # NOQA
120 from ._pswindows import IOPRIO_VERYLOW # NOQA
122elif MACOS:
123 from . import _psosx as _psplatform
125elif BSD:
126 from . import _psbsd as _psplatform
128elif SUNOS:
129 from . import _pssunos as _psplatform
130 from ._pssunos import CONN_BOUND # NOQA
131 from ._pssunos import CONN_IDLE # NOQA
133 # This is public writable API which is read from _pslinux.py and
134 # _pssunos.py via sys.modules.
135 PROCFS_PATH = "/proc"
137elif AIX:
138 from . import _psaix as _psplatform
140 # This is public API and it will be retrieved from _pslinux.py
141 # via sys.modules.
142 PROCFS_PATH = "/proc"
144else: # pragma: no cover
145 raise NotImplementedError('platform %s is not supported' % sys.platform)
148__all__ = [
149 # exceptions
150 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
151 "TimeoutExpired",
153 # constants
154 "version_info", "__version__",
156 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
157 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
158 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
159 "STATUS_PARKED",
161 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
162 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
163 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
164 # "CONN_IDLE", "CONN_BOUND",
166 "AF_LINK",
168 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
170 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
172 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
173 "SUNOS", "WINDOWS", "AIX",
175 # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
176 # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
177 # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
178 # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
180 # classes
181 "Process", "Popen",
183 # functions
184 "pid_exists", "pids", "process_iter", "wait_procs", # proc
185 "virtual_memory", "swap_memory", # memory
186 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
187 "cpu_stats", # "cpu_freq", "getloadavg"
188 "net_io_counters", "net_connections", "net_if_addrs", # network
189 "net_if_stats",
190 "disk_io_counters", "disk_partitions", "disk_usage", # disk
191 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
192 "users", "boot_time", # others
193]
196__all__.extend(_psplatform.__extra__all__)
198# Linux, FreeBSD
199if hasattr(_psplatform.Process, "rlimit"):
200 # Populate global namespace with RLIM* constants.
201 from . import _psutil_posix
203 _globals = globals()
204 _name = None
205 for _name in dir(_psutil_posix):
206 if _name.startswith('RLIM') and _name.isupper():
207 _globals[_name] = getattr(_psutil_posix, _name)
208 __all__.append(_name)
209 del _globals, _name
211AF_LINK = _psplatform.AF_LINK
213__author__ = "Giampaolo Rodola'"
214__version__ = "5.9.6"
215version_info = tuple([int(num) for num in __version__.split('.')])
217_timer = getattr(time, 'monotonic', time.time)
218_TOTAL_PHYMEM = None
219_LOWEST_PID = None
220_SENTINEL = object()
222# Sanity check in case the user messed up with psutil installation
223# or did something weird with sys.path. In this case we might end
224# up importing a python module using a C extension module which
225# was compiled for a different version of psutil.
226# We want to prevent that by failing sooner rather than later.
227# See: https://github.com/giampaolo/psutil/issues/564
228if (int(__version__.replace('.', '')) !=
229 getattr(_psplatform.cext, 'version', None)):
230 msg = "version conflict: %r C extension module was built for another " \
231 "version of psutil" % _psplatform.cext.__file__
232 if hasattr(_psplatform.cext, 'version'):
233 msg += " (%s instead of %s)" % (
234 '.'.join([x for x in str(_psplatform.cext.version)]), __version__)
235 else:
236 msg += " (different than %s)" % __version__
237 msg += "; you may try to 'pip uninstall psutil', manually remove %s" % (
238 getattr(_psplatform.cext, "__file__",
239 "the existing psutil install directory"))
240 msg += " or clean the virtual env somehow, then reinstall"
241 raise ImportError(msg)
244# =====================================================================
245# --- Utils
246# =====================================================================
249if hasattr(_psplatform, 'ppid_map'):
250 # Faster version (Windows and Linux).
251 _ppid_map = _psplatform.ppid_map
252else: # pragma: no cover
253 def _ppid_map():
254 """Return a {pid: ppid, ...} dict for all running processes in
255 one shot. Used to speed up Process.children().
256 """
257 ret = {}
258 for pid in pids():
259 try:
260 ret[pid] = _psplatform.Process(pid).ppid()
261 except (NoSuchProcess, ZombieProcess):
262 pass
263 return ret
266def _pprint_secs(secs):
267 """Format seconds in a human readable form."""
268 now = time.time()
269 secs_ago = int(now - secs)
270 if secs_ago < 60 * 60 * 24:
271 fmt = "%H:%M:%S"
272 else:
273 fmt = "%Y-%m-%d %H:%M:%S"
274 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
277# =====================================================================
278# --- Process class
279# =====================================================================
282class Process(object):
283 """Represents an OS process with the given PID.
284 If PID is omitted current process PID (os.getpid()) is used.
285 Raise NoSuchProcess if PID does not exist.
287 Note that most of the methods of this class do not make sure
288 the PID of the process being queried has been reused over time.
289 That means you might end up retrieving an information referring
290 to another process in case the original one this instance
291 refers to is gone in the meantime.
293 The only exceptions for which process identity is pre-emptively
294 checked and guaranteed are:
296 - parent()
297 - children()
298 - nice() (set)
299 - ionice() (set)
300 - rlimit() (set)
301 - cpu_affinity (set)
302 - suspend()
303 - resume()
304 - send_signal()
305 - terminate()
306 - kill()
308 To prevent this problem for all other methods you can:
309 - use is_running() before querying the process
310 - if you're continuously iterating over a set of Process
311 instances use process_iter() which pre-emptively checks
312 process identity for every yielded instance
313 """
315 def __init__(self, pid=None):
316 self._init(pid)
318 def _init(self, pid, _ignore_nsp=False):
319 if pid is None:
320 pid = os.getpid()
321 else:
322 if not _PY3 and not isinstance(pid, (int, long)):
323 raise TypeError('pid must be an integer (got %r)' % pid)
324 if pid < 0:
325 raise ValueError('pid must be a positive integer (got %s)'
326 % pid)
327 try:
328 _psplatform.cext.check_pid_range(pid)
329 except OverflowError:
330 raise NoSuchProcess(
331 pid,
332 msg='process PID out of range (got %s)' % pid,
333 )
335 self._pid = pid
336 self._name = None
337 self._exe = None
338 self._create_time = None
339 self._gone = False
340 self._pid_reused = False
341 self._hash = None
342 self._lock = threading.RLock()
343 # used for caching on Windows only (on POSIX ppid may change)
344 self._ppid = None
345 # platform-specific modules define an _psplatform.Process
346 # implementation class
347 self._proc = _psplatform.Process(pid)
348 self._last_sys_cpu_times = None
349 self._last_proc_cpu_times = None
350 self._exitcode = _SENTINEL
351 # cache creation time for later use in is_running() method
352 try:
353 self.create_time()
354 except AccessDenied:
355 # We should never get here as AFAIK we're able to get
356 # process creation time on all platforms even as a
357 # limited user.
358 pass
359 except ZombieProcess:
360 # Zombies can still be queried by this class (although
361 # not always) and pids() return them so just go on.
362 pass
363 except NoSuchProcess:
364 if not _ignore_nsp:
365 raise NoSuchProcess(pid, msg='process PID not found')
366 else:
367 self._gone = True
368 # This pair is supposed to identify a Process instance
369 # univocally over time (the PID alone is not enough as
370 # it might refer to a process whose PID has been reused).
371 # This will be used later in __eq__() and is_running().
372 self._ident = (self.pid, self._create_time)
374 def __str__(self):
375 info = collections.OrderedDict()
376 info["pid"] = self.pid
377 if self._name:
378 info['name'] = self._name
379 with self.oneshot():
380 try:
381 info["name"] = self.name()
382 info["status"] = self.status()
383 except ZombieProcess:
384 info["status"] = "zombie"
385 except NoSuchProcess:
386 info["status"] = "terminated"
387 except AccessDenied:
388 pass
389 if self._exitcode not in (_SENTINEL, None):
390 info["exitcode"] = self._exitcode
391 if self._create_time is not None:
392 info['started'] = _pprint_secs(self._create_time)
393 return "%s.%s(%s)" % (
394 self.__class__.__module__,
395 self.__class__.__name__,
396 ", ".join(["%s=%r" % (k, v) for k, v in info.items()]))
398 __repr__ = __str__
400 def __eq__(self, other):
401 # Test for equality with another Process object based
402 # on PID and creation time.
403 if not isinstance(other, Process):
404 return NotImplemented
405 if OPENBSD or NETBSD: # pragma: no cover
406 # Zombie processes on Open/NetBSD have a creation time of
407 # 0.0. This covers the case when a process started normally
408 # (so it has a ctime), then it turned into a zombie. It's
409 # important to do this because is_running() depends on
410 # __eq__.
411 pid1, ctime1 = self._ident
412 pid2, ctime2 = other._ident
413 if pid1 == pid2:
414 if ctime1 and not ctime2:
415 try:
416 return self.status() == STATUS_ZOMBIE
417 except Error:
418 pass
419 return self._ident == other._ident
421 def __ne__(self, other):
422 return not self == other
424 def __hash__(self):
425 if self._hash is None:
426 self._hash = hash(self._ident)
427 return self._hash
429 def _raise_if_pid_reused(self):
430 """Raises NoSuchProcess in case process PID has been reused."""
431 if not self.is_running() and self._pid_reused:
432 # We may directly raise NSP in here already if PID is just
433 # not running, but I prefer NSP to be raised naturally by
434 # the actual Process API call. This way unit tests will tell
435 # us if the API is broken (aka don't raise NSP when it
436 # should). We also remain consistent with all other "get"
437 # APIs which don't use _raise_if_pid_reused().
438 msg = "process no longer exists and its PID has been reused"
439 raise NoSuchProcess(self.pid, self._name, msg=msg)
441 @property
442 def pid(self):
443 """The process PID."""
444 return self._pid
446 # --- utility methods
448 @contextlib.contextmanager
449 def oneshot(self):
450 """Utility context manager which considerably speeds up the
451 retrieval of multiple process information at the same time.
453 Internally different process info (e.g. name, ppid, uids,
454 gids, ...) may be fetched by using the same routine, but
455 only one information is returned and the others are discarded.
456 When using this context manager the internal routine is
457 executed once (in the example below on name()) and the
458 other info are cached.
460 The cache is cleared when exiting the context manager block.
461 The advice is to use this every time you retrieve more than
462 one information about the process. If you're lucky, you'll
463 get a hell of a speedup.
465 >>> import psutil
466 >>> p = psutil.Process()
467 >>> with p.oneshot():
468 ... p.name() # collect multiple info
469 ... p.cpu_times() # return cached value
470 ... p.cpu_percent() # return cached value
471 ... p.create_time() # return cached value
472 ...
473 >>>
474 """
475 with self._lock:
476 if hasattr(self, "_cache"):
477 # NOOP: this covers the use case where the user enters the
478 # context twice:
479 #
480 # >>> with p.oneshot():
481 # ... with p.oneshot():
482 # ...
483 #
484 # Also, since as_dict() internally uses oneshot()
485 # I expect that the code below will be a pretty common
486 # "mistake" that the user will make, so let's guard
487 # against that:
488 #
489 # >>> with p.oneshot():
490 # ... p.as_dict()
491 # ...
492 yield
493 else:
494 try:
495 # cached in case cpu_percent() is used
496 self.cpu_times.cache_activate(self)
497 # cached in case memory_percent() is used
498 self.memory_info.cache_activate(self)
499 # cached in case parent() is used
500 self.ppid.cache_activate(self)
501 # cached in case username() is used
502 if POSIX:
503 self.uids.cache_activate(self)
504 # specific implementation cache
505 self._proc.oneshot_enter()
506 yield
507 finally:
508 self.cpu_times.cache_deactivate(self)
509 self.memory_info.cache_deactivate(self)
510 self.ppid.cache_deactivate(self)
511 if POSIX:
512 self.uids.cache_deactivate(self)
513 self._proc.oneshot_exit()
515 def as_dict(self, attrs=None, ad_value=None):
516 """Utility method returning process information as a
517 hashable dictionary.
518 If *attrs* is specified it must be a list of strings
519 reflecting available Process class' attribute names
520 (e.g. ['cpu_times', 'name']) else all public (read
521 only) attributes are assumed.
522 *ad_value* is the value which gets assigned in case
523 AccessDenied or ZombieProcess exception is raised when
524 retrieving that particular process information.
525 """
526 valid_names = _as_dict_attrnames
527 if attrs is not None:
528 if not isinstance(attrs, (list, tuple, set, frozenset)):
529 raise TypeError("invalid attrs type %s" % type(attrs))
530 attrs = set(attrs)
531 invalid_names = attrs - valid_names
532 if invalid_names:
533 raise ValueError("invalid attr name%s %s" % (
534 "s" if len(invalid_names) > 1 else "",
535 ", ".join(map(repr, invalid_names))))
537 retdict = {}
538 ls = attrs or valid_names
539 with self.oneshot():
540 for name in ls:
541 try:
542 if name == 'pid':
543 ret = self.pid
544 else:
545 meth = getattr(self, name)
546 ret = meth()
547 except (AccessDenied, ZombieProcess):
548 ret = ad_value
549 except NotImplementedError:
550 # in case of not implemented functionality (may happen
551 # on old or exotic systems) we want to crash only if
552 # the user explicitly asked for that particular attr
553 if attrs:
554 raise
555 continue
556 retdict[name] = ret
557 return retdict
559 def parent(self):
560 """Return the parent process as a Process object pre-emptively
561 checking whether PID has been reused.
562 If no parent is known return None.
563 """
564 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
565 if self.pid == lowest_pid:
566 return None
567 ppid = self.ppid()
568 if ppid is not None:
569 ctime = self.create_time()
570 try:
571 parent = Process(ppid)
572 if parent.create_time() <= ctime:
573 return parent
574 # ...else ppid has been reused by another process
575 except NoSuchProcess:
576 pass
578 def parents(self):
579 """Return the parents of this process as a list of Process
580 instances. If no parents are known return an empty list.
581 """
582 parents = []
583 proc = self.parent()
584 while proc is not None:
585 parents.append(proc)
586 proc = proc.parent()
587 return parents
589 def is_running(self):
590 """Return whether this process is running.
591 It also checks if PID has been reused by another process in
592 which case return False.
593 """
594 if self._gone or self._pid_reused:
595 return False
596 try:
597 # Checking if PID is alive is not enough as the PID might
598 # have been reused by another process: we also want to
599 # verify process identity.
600 # Process identity / uniqueness over time is guaranteed by
601 # (PID + creation time) and that is verified in __eq__.
602 self._pid_reused = self != Process(self.pid)
603 return not self._pid_reused
604 except ZombieProcess:
605 # We should never get here as it's already handled in
606 # Process.__init__; here just for extra safety.
607 return True
608 except NoSuchProcess:
609 self._gone = True
610 return False
612 # --- actual API
614 @memoize_when_activated
615 def ppid(self):
616 """The process parent PID.
617 On Windows the return value is cached after first call.
618 """
619 # On POSIX we don't want to cache the ppid as it may unexpectedly
620 # change to 1 (init) in case this process turns into a zombie:
621 # https://github.com/giampaolo/psutil/issues/321
622 # http://stackoverflow.com/questions/356722/
624 # XXX should we check creation time here rather than in
625 # Process.parent()?
626 self._raise_if_pid_reused()
627 if POSIX:
628 return self._proc.ppid()
629 else: # pragma: no cover
630 self._ppid = self._ppid or self._proc.ppid()
631 return self._ppid
633 def name(self):
634 """The process name. The return value is cached after first call."""
635 # Process name is only cached on Windows as on POSIX it may
636 # change, see:
637 # https://github.com/giampaolo/psutil/issues/692
638 if WINDOWS and self._name is not None:
639 return self._name
640 name = self._proc.name()
641 if POSIX and len(name) >= 15:
642 # On UNIX the name gets truncated to the first 15 characters.
643 # If it matches the first part of the cmdline we return that
644 # one instead because it's usually more explicative.
645 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
646 try:
647 cmdline = self.cmdline()
648 except (AccessDenied, ZombieProcess):
649 # Just pass and return the truncated name: it's better
650 # than nothing. Note: there are actual cases where a
651 # zombie process can return a name() but not a
652 # cmdline(), see:
653 # https://github.com/giampaolo/psutil/issues/2239
654 pass
655 else:
656 if cmdline:
657 extended_name = os.path.basename(cmdline[0])
658 if extended_name.startswith(name):
659 name = extended_name
660 self._name = name
661 self._proc._name = name
662 return name
664 def exe(self):
665 """The process executable as an absolute path.
666 May also be an empty string.
667 The return value is cached after first call.
668 """
669 def guess_it(fallback):
670 # try to guess exe from cmdline[0] in absence of a native
671 # exe representation
672 cmdline = self.cmdline()
673 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
674 exe = cmdline[0] # the possible exe
675 # Attempt to guess only in case of an absolute path.
676 # It is not safe otherwise as the process might have
677 # changed cwd.
678 if (os.path.isabs(exe) and
679 os.path.isfile(exe) and
680 os.access(exe, os.X_OK)):
681 return exe
682 if isinstance(fallback, AccessDenied):
683 raise fallback
684 return fallback
686 if self._exe is None:
687 try:
688 exe = self._proc.exe()
689 except AccessDenied as err:
690 return guess_it(fallback=err)
691 else:
692 if not exe:
693 # underlying implementation can legitimately return an
694 # empty string; if that's the case we don't want to
695 # raise AD while guessing from the cmdline
696 try:
697 exe = guess_it(fallback=exe)
698 except AccessDenied:
699 pass
700 self._exe = exe
701 return self._exe
703 def cmdline(self):
704 """The command line this process has been called with."""
705 return self._proc.cmdline()
707 def status(self):
708 """The process current status as a STATUS_* constant."""
709 try:
710 return self._proc.status()
711 except ZombieProcess:
712 return STATUS_ZOMBIE
714 def username(self):
715 """The name of the user that owns the process.
716 On UNIX this is calculated by using *real* process uid.
717 """
718 if POSIX:
719 if pwd is None:
720 # might happen if python was installed from sources
721 raise ImportError(
722 "requires pwd module shipped with standard python")
723 real_uid = self.uids().real
724 try:
725 return pwd.getpwuid(real_uid).pw_name
726 except KeyError:
727 # the uid can't be resolved by the system
728 return str(real_uid)
729 else:
730 return self._proc.username()
732 def create_time(self):
733 """The process creation time as a floating point number
734 expressed in seconds since the epoch.
735 The return value is cached after first call.
736 """
737 if self._create_time is None:
738 self._create_time = self._proc.create_time()
739 return self._create_time
741 def cwd(self):
742 """Process current working directory as an absolute path."""
743 return self._proc.cwd()
745 def nice(self, value=None):
746 """Get or set process niceness (priority)."""
747 if value is None:
748 return self._proc.nice_get()
749 else:
750 self._raise_if_pid_reused()
751 self._proc.nice_set(value)
753 if POSIX:
755 @memoize_when_activated
756 def uids(self):
757 """Return process UIDs as a (real, effective, saved)
758 namedtuple.
759 """
760 return self._proc.uids()
762 def gids(self):
763 """Return process GIDs as a (real, effective, saved)
764 namedtuple.
765 """
766 return self._proc.gids()
768 def terminal(self):
769 """The terminal associated with this process, if any,
770 else None.
771 """
772 return self._proc.terminal()
774 def num_fds(self):
775 """Return the number of file descriptors opened by this
776 process (POSIX only).
777 """
778 return self._proc.num_fds()
780 # Linux, BSD, AIX and Windows only
781 if hasattr(_psplatform.Process, "io_counters"):
783 def io_counters(self):
784 """Return process I/O statistics as a
785 (read_count, write_count, read_bytes, write_bytes)
786 namedtuple.
787 Those are the number of read/write calls performed and the
788 amount of bytes read and written by the process.
789 """
790 return self._proc.io_counters()
792 # Linux and Windows
793 if hasattr(_psplatform.Process, "ionice_get"):
795 def ionice(self, ioclass=None, value=None):
796 """Get or set process I/O niceness (priority).
798 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
799 *value* is a number which goes from 0 to 7. The higher the
800 value, the lower the I/O priority of the process.
802 On Windows only *ioclass* is used and it can be set to 2
803 (normal), 1 (low) or 0 (very low).
805 Available on Linux and Windows > Vista only.
806 """
807 if ioclass is None:
808 if value is not None:
809 raise ValueError("'ioclass' argument must be specified")
810 return self._proc.ionice_get()
811 else:
812 self._raise_if_pid_reused()
813 return self._proc.ionice_set(ioclass, value)
815 # Linux / FreeBSD only
816 if hasattr(_psplatform.Process, "rlimit"):
818 def rlimit(self, resource, limits=None):
819 """Get or set process resource limits as a (soft, hard)
820 tuple.
822 *resource* is one of the RLIMIT_* constants.
823 *limits* is supposed to be a (soft, hard) tuple.
825 See "man prlimit" for further info.
826 Available on Linux and FreeBSD only.
827 """
828 if limits is not None:
829 self._raise_if_pid_reused()
830 return self._proc.rlimit(resource, limits)
832 # Windows, Linux and FreeBSD only
833 if hasattr(_psplatform.Process, "cpu_affinity_get"):
835 def cpu_affinity(self, cpus=None):
836 """Get or set process CPU affinity.
837 If specified, *cpus* must be a list of CPUs for which you
838 want to set the affinity (e.g. [0, 1]).
839 If an empty list is passed, all egible CPUs are assumed
840 (and set).
841 (Windows, Linux and BSD only).
842 """
843 if cpus is None:
844 return sorted(set(self._proc.cpu_affinity_get()))
845 else:
846 self._raise_if_pid_reused()
847 if not cpus:
848 if hasattr(self._proc, "_get_eligible_cpus"):
849 cpus = self._proc._get_eligible_cpus()
850 else:
851 cpus = tuple(range(len(cpu_times(percpu=True))))
852 self._proc.cpu_affinity_set(list(set(cpus)))
854 # Linux, FreeBSD, SunOS
855 if hasattr(_psplatform.Process, "cpu_num"):
857 def cpu_num(self):
858 """Return what CPU this process is currently running on.
859 The returned number should be <= psutil.cpu_count()
860 and <= len(psutil.cpu_percent(percpu=True)).
861 It may be used in conjunction with
862 psutil.cpu_percent(percpu=True) to observe the system
863 workload distributed across CPUs.
864 """
865 return self._proc.cpu_num()
867 # All platforms has it, but maybe not in the future.
868 if hasattr(_psplatform.Process, "environ"):
870 def environ(self):
871 """The environment variables of the process as a dict. Note: this
872 might not reflect changes made after the process started. """
873 return self._proc.environ()
875 if WINDOWS:
877 def num_handles(self):
878 """Return the number of handles opened by this process
879 (Windows only).
880 """
881 return self._proc.num_handles()
883 def num_ctx_switches(self):
884 """Return the number of voluntary and involuntary context
885 switches performed by this process.
886 """
887 return self._proc.num_ctx_switches()
889 def num_threads(self):
890 """Return the number of threads used by this process."""
891 return self._proc.num_threads()
893 if hasattr(_psplatform.Process, "threads"):
895 def threads(self):
896 """Return threads opened by process as a list of
897 (id, user_time, system_time) namedtuples representing
898 thread id and thread CPU times (user/system).
899 On OpenBSD this method requires root access.
900 """
901 return self._proc.threads()
903 def children(self, recursive=False):
904 """Return the children of this process as a list of Process
905 instances, pre-emptively checking whether PID has been reused.
906 If *recursive* is True return all the parent descendants.
908 Example (A == this process):
910 A ─┐
911 │
912 ├─ B (child) ─┐
913 │ └─ X (grandchild) ─┐
914 │ └─ Y (great grandchild)
915 ├─ C (child)
916 └─ D (child)
918 >>> import psutil
919 >>> p = psutil.Process()
920 >>> p.children()
921 B, C, D
922 >>> p.children(recursive=True)
923 B, X, Y, C, D
925 Note that in the example above if process X disappears
926 process Y won't be listed as the reference to process A
927 is lost.
928 """
929 self._raise_if_pid_reused()
930 ppid_map = _ppid_map()
931 ret = []
932 if not recursive:
933 for pid, ppid in ppid_map.items():
934 if ppid == self.pid:
935 try:
936 child = Process(pid)
937 # if child happens to be older than its parent
938 # (self) it means child's PID has been reused
939 if self.create_time() <= child.create_time():
940 ret.append(child)
941 except (NoSuchProcess, ZombieProcess):
942 pass
943 else:
944 # Construct a {pid: [child pids]} dict
945 reverse_ppid_map = collections.defaultdict(list)
946 for pid, ppid in ppid_map.items():
947 reverse_ppid_map[ppid].append(pid)
948 # Recursively traverse that dict, starting from self.pid,
949 # such that we only call Process() on actual children
950 seen = set()
951 stack = [self.pid]
952 while stack:
953 pid = stack.pop()
954 if pid in seen:
955 # Since pids can be reused while the ppid_map is
956 # constructed, there may be rare instances where
957 # there's a cycle in the recorded process "tree".
958 continue
959 seen.add(pid)
960 for child_pid in reverse_ppid_map[pid]:
961 try:
962 child = Process(child_pid)
963 # if child happens to be older than its parent
964 # (self) it means child's PID has been reused
965 intime = self.create_time() <= child.create_time()
966 if intime:
967 ret.append(child)
968 stack.append(child_pid)
969 except (NoSuchProcess, ZombieProcess):
970 pass
971 return ret
973 def cpu_percent(self, interval=None):
974 """Return a float representing the current process CPU
975 utilization as a percentage.
977 When *interval* is 0.0 or None (default) compares process times
978 to system CPU times elapsed since last call, returning
979 immediately (non-blocking). That means that the first time
980 this is called it will return a meaningful 0.0 value.
982 When *interval* is > 0.0 compares process times to system CPU
983 times elapsed before and after the interval (blocking).
985 In this case is recommended for accuracy that this function
986 be called with at least 0.1 seconds between calls.
988 A value > 100.0 can be returned in case of processes running
989 multiple threads on different CPU cores.
991 The returned value is explicitly NOT split evenly between
992 all available logical CPUs. This means that a busy loop process
993 running on a system with 2 logical CPUs will be reported as
994 having 100% CPU utilization instead of 50%.
996 Examples:
998 >>> import psutil
999 >>> p = psutil.Process(os.getpid())
1000 >>> # blocking
1001 >>> p.cpu_percent(interval=1)
1002 2.0
1003 >>> # non-blocking (percentage since last call)
1004 >>> p.cpu_percent(interval=None)
1005 2.9
1006 >>>
1007 """
1008 blocking = interval is not None and interval > 0.0
1009 if interval is not None and interval < 0:
1010 raise ValueError("interval is not positive (got %r)" % interval)
1011 num_cpus = cpu_count() or 1
1013 def timer():
1014 return _timer() * num_cpus
1016 if blocking:
1017 st1 = timer()
1018 pt1 = self._proc.cpu_times()
1019 time.sleep(interval)
1020 st2 = timer()
1021 pt2 = self._proc.cpu_times()
1022 else:
1023 st1 = self._last_sys_cpu_times
1024 pt1 = self._last_proc_cpu_times
1025 st2 = timer()
1026 pt2 = self._proc.cpu_times()
1027 if st1 is None or pt1 is None:
1028 self._last_sys_cpu_times = st2
1029 self._last_proc_cpu_times = pt2
1030 return 0.0
1032 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1033 delta_time = st2 - st1
1034 # reset values for next call in case of interval == None
1035 self._last_sys_cpu_times = st2
1036 self._last_proc_cpu_times = pt2
1038 try:
1039 # This is the utilization split evenly between all CPUs.
1040 # E.g. a busy loop process on a 2-CPU-cores system at this
1041 # point is reported as 50% instead of 100%.
1042 overall_cpus_percent = ((delta_proc / delta_time) * 100)
1043 except ZeroDivisionError:
1044 # interval was too low
1045 return 0.0
1046 else:
1047 # Note 1:
1048 # in order to emulate "top" we multiply the value for the num
1049 # of CPU cores. This way the busy process will be reported as
1050 # having 100% (or more) usage.
1051 #
1052 # Note 2:
1053 # taskmgr.exe on Windows differs in that it will show 50%
1054 # instead.
1055 #
1056 # Note 3:
1057 # a percentage > 100 is legitimate as it can result from a
1058 # process with multiple threads running on different CPU
1059 # cores (top does the same), see:
1060 # http://stackoverflow.com/questions/1032357
1061 # https://github.com/giampaolo/psutil/issues/474
1062 single_cpu_percent = overall_cpus_percent * num_cpus
1063 return round(single_cpu_percent, 1)
1065 @memoize_when_activated
1066 def cpu_times(self):
1067 """Return a (user, system, children_user, children_system)
1068 namedtuple representing the accumulated process time, in
1069 seconds.
1070 This is similar to os.times() but per-process.
1071 On macOS and Windows children_user and children_system are
1072 always set to 0.
1073 """
1074 return self._proc.cpu_times()
1076 @memoize_when_activated
1077 def memory_info(self):
1078 """Return a namedtuple with variable fields depending on the
1079 platform, representing memory information about the process.
1081 The "portable" fields available on all platforms are `rss` and `vms`.
1083 All numbers are expressed in bytes.
1084 """
1085 return self._proc.memory_info()
1087 @_common.deprecated_method(replacement="memory_info")
1088 def memory_info_ex(self):
1089 return self.memory_info()
1091 def memory_full_info(self):
1092 """This method returns the same information as memory_info(),
1093 plus, on some platform (Linux, macOS, Windows), also provides
1094 additional metrics (USS, PSS and swap).
1095 The additional metrics provide a better representation of actual
1096 process memory usage.
1098 Namely USS is the memory which is unique to a process and which
1099 would be freed if the process was terminated right now.
1101 It does so by passing through the whole process address.
1102 As such it usually requires higher user privileges than
1103 memory_info() and is considerably slower.
1104 """
1105 return self._proc.memory_full_info()
1107 def memory_percent(self, memtype="rss"):
1108 """Compare process memory to total physical system memory and
1109 calculate process memory utilization as a percentage.
1110 *memtype* argument is a string that dictates what type of
1111 process memory you want to compare against (defaults to "rss").
1112 The list of available strings can be obtained like this:
1114 >>> psutil.Process().memory_info()._fields
1115 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1116 """
1117 valid_types = list(_psplatform.pfullmem._fields)
1118 if memtype not in valid_types:
1119 raise ValueError("invalid memtype %r; valid types are %r" % (
1120 memtype, tuple(valid_types)))
1121 fun = self.memory_info if memtype in _psplatform.pmem._fields else \
1122 self.memory_full_info
1123 metrics = fun()
1124 value = getattr(metrics, memtype)
1126 # use cached value if available
1127 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1128 if not total_phymem > 0:
1129 # we should never get here
1130 raise ValueError(
1131 "can't calculate process memory percent because "
1132 "total physical system memory is not positive (%r)"
1133 % total_phymem)
1134 return (value / float(total_phymem)) * 100
1136 if hasattr(_psplatform.Process, "memory_maps"):
1137 def memory_maps(self, grouped=True):
1138 """Return process' mapped memory regions as a list of namedtuples
1139 whose fields are variable depending on the platform.
1141 If *grouped* is True the mapped regions with the same 'path'
1142 are grouped together and the different memory fields are summed.
1144 If *grouped* is False every mapped region is shown as a single
1145 entity and the namedtuple will also include the mapped region's
1146 address space ('addr') and permission set ('perms').
1147 """
1148 it = self._proc.memory_maps()
1149 if grouped:
1150 d = {}
1151 for tupl in it:
1152 path = tupl[2]
1153 nums = tupl[3:]
1154 try:
1155 d[path] = map(lambda x, y: x + y, d[path], nums)
1156 except KeyError:
1157 d[path] = nums
1158 nt = _psplatform.pmmap_grouped
1159 return [nt(path, *d[path]) for path in d] # NOQA
1160 else:
1161 nt = _psplatform.pmmap_ext
1162 return [nt(*x) for x in it]
1164 def open_files(self):
1165 """Return files opened by process as a list of
1166 (path, fd) namedtuples including the absolute file name
1167 and file descriptor number.
1168 """
1169 return self._proc.open_files()
1171 def connections(self, kind='inet'):
1172 """Return socket connections opened by process as a list of
1173 (fd, family, type, laddr, raddr, status) namedtuples.
1174 The *kind* parameter filters for connections that match the
1175 following criteria:
1177 +------------+----------------------------------------------------+
1178 | Kind Value | Connections using |
1179 +------------+----------------------------------------------------+
1180 | inet | IPv4 and IPv6 |
1181 | inet4 | IPv4 |
1182 | inet6 | IPv6 |
1183 | tcp | TCP |
1184 | tcp4 | TCP over IPv4 |
1185 | tcp6 | TCP over IPv6 |
1186 | udp | UDP |
1187 | udp4 | UDP over IPv4 |
1188 | udp6 | UDP over IPv6 |
1189 | unix | UNIX socket (both UDP and TCP protocols) |
1190 | all | the sum of all the possible families and protocols |
1191 +------------+----------------------------------------------------+
1192 """
1193 return self._proc.connections(kind)
1195 # --- signals
1197 if POSIX:
1198 def _send_signal(self, sig):
1199 assert not self.pid < 0, self.pid
1200 self._raise_if_pid_reused()
1201 if self.pid == 0:
1202 # see "man 2 kill"
1203 raise ValueError(
1204 "preventing sending signal to process with PID 0 as it "
1205 "would affect every process in the process group of the "
1206 "calling process (os.getpid()) instead of PID 0")
1207 try:
1208 os.kill(self.pid, sig)
1209 except ProcessLookupError:
1210 if OPENBSD and pid_exists(self.pid):
1211 # We do this because os.kill() lies in case of
1212 # zombie processes.
1213 raise ZombieProcess(self.pid, self._name, self._ppid)
1214 else:
1215 self._gone = True
1216 raise NoSuchProcess(self.pid, self._name)
1217 except PermissionError:
1218 raise AccessDenied(self.pid, self._name)
1220 def send_signal(self, sig):
1221 """Send a signal *sig* to process pre-emptively checking
1222 whether PID has been reused (see signal module constants) .
1223 On Windows only SIGTERM is valid and is treated as an alias
1224 for kill().
1225 """
1226 if POSIX:
1227 self._send_signal(sig)
1228 else: # pragma: no cover
1229 self._raise_if_pid_reused()
1230 self._proc.send_signal(sig)
1232 def suspend(self):
1233 """Suspend process execution with SIGSTOP pre-emptively checking
1234 whether PID has been reused.
1235 On Windows this has the effect of suspending all process threads.
1236 """
1237 if POSIX:
1238 self._send_signal(signal.SIGSTOP)
1239 else: # pragma: no cover
1240 self._raise_if_pid_reused()
1241 self._proc.suspend()
1243 def resume(self):
1244 """Resume process execution with SIGCONT pre-emptively checking
1245 whether PID has been reused.
1246 On Windows this has the effect of resuming all process threads.
1247 """
1248 if POSIX:
1249 self._send_signal(signal.SIGCONT)
1250 else: # pragma: no cover
1251 self._raise_if_pid_reused()
1252 self._proc.resume()
1254 def terminate(self):
1255 """Terminate the process with SIGTERM pre-emptively checking
1256 whether PID has been reused.
1257 On Windows this is an alias for kill().
1258 """
1259 if POSIX:
1260 self._send_signal(signal.SIGTERM)
1261 else: # pragma: no cover
1262 self._raise_if_pid_reused()
1263 self._proc.kill()
1265 def kill(self):
1266 """Kill the current process with SIGKILL pre-emptively checking
1267 whether PID has been reused.
1268 """
1269 if POSIX:
1270 self._send_signal(signal.SIGKILL)
1271 else: # pragma: no cover
1272 self._raise_if_pid_reused()
1273 self._proc.kill()
1275 def wait(self, timeout=None):
1276 """Wait for process to terminate and, if process is a children
1277 of os.getpid(), also return its exit code, else None.
1278 On Windows there's no such limitation (exit code is always
1279 returned).
1281 If the process is already terminated immediately return None
1282 instead of raising NoSuchProcess.
1284 If *timeout* (in seconds) is specified and process is still
1285 alive raise TimeoutExpired.
1287 To wait for multiple Process(es) use psutil.wait_procs().
1288 """
1289 if timeout is not None and not timeout >= 0:
1290 raise ValueError("timeout must be a positive integer")
1291 if self._exitcode is not _SENTINEL:
1292 return self._exitcode
1293 self._exitcode = self._proc.wait(timeout)
1294 return self._exitcode
1297# The valid attr names which can be processed by Process.as_dict().
1298_as_dict_attrnames = set(
1299 [x for x in dir(Process) if not x.startswith('_') and x not in
1300 ['send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1301 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1302 'memory_info_ex', 'oneshot']])
1305# =====================================================================
1306# --- Popen class
1307# =====================================================================
1310class Popen(Process):
1311 """Same as subprocess.Popen, but in addition it provides all
1312 psutil.Process methods in a single class.
1313 For the following methods which are common to both classes, psutil
1314 implementation takes precedence:
1316 * send_signal()
1317 * terminate()
1318 * kill()
1320 This is done in order to avoid killing another process in case its
1321 PID has been reused, fixing BPO-6973.
1323 >>> import psutil
1324 >>> from subprocess import PIPE
1325 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1326 >>> p.name()
1327 'python'
1328 >>> p.uids()
1329 user(real=1000, effective=1000, saved=1000)
1330 >>> p.username()
1331 'giampaolo'
1332 >>> p.communicate()
1333 ('hi\n', None)
1334 >>> p.terminate()
1335 >>> p.wait(timeout=2)
1336 0
1337 >>>
1338 """
1340 def __init__(self, *args, **kwargs):
1341 # Explicitly avoid to raise NoSuchProcess in case the process
1342 # spawned by subprocess.Popen terminates too quickly, see:
1343 # https://github.com/giampaolo/psutil/issues/193
1344 self.__subproc = subprocess.Popen(*args, **kwargs)
1345 self._init(self.__subproc.pid, _ignore_nsp=True)
1347 def __dir__(self):
1348 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1350 def __enter__(self):
1351 if hasattr(self.__subproc, '__enter__'):
1352 self.__subproc.__enter__()
1353 return self
1355 def __exit__(self, *args, **kwargs):
1356 if hasattr(self.__subproc, '__exit__'):
1357 return self.__subproc.__exit__(*args, **kwargs)
1358 else:
1359 if self.stdout:
1360 self.stdout.close()
1361 if self.stderr:
1362 self.stderr.close()
1363 try:
1364 # Flushing a BufferedWriter may raise an error.
1365 if self.stdin:
1366 self.stdin.close()
1367 finally:
1368 # Wait for the process to terminate, to avoid zombies.
1369 self.wait()
1371 def __getattribute__(self, name):
1372 try:
1373 return object.__getattribute__(self, name)
1374 except AttributeError:
1375 try:
1376 return object.__getattribute__(self.__subproc, name)
1377 except AttributeError:
1378 raise AttributeError("%s instance has no attribute '%s'"
1379 % (self.__class__.__name__, name))
1381 def wait(self, timeout=None):
1382 if self.__subproc.returncode is not None:
1383 return self.__subproc.returncode
1384 ret = super(Popen, self).wait(timeout)
1385 self.__subproc.returncode = ret
1386 return ret
1389# =====================================================================
1390# --- system processes related functions
1391# =====================================================================
1394def pids():
1395 """Return a list of current running PIDs."""
1396 global _LOWEST_PID
1397 ret = sorted(_psplatform.pids())
1398 _LOWEST_PID = ret[0]
1399 return ret
1402def pid_exists(pid):
1403 """Return True if given PID exists in the current process list.
1404 This is faster than doing "pid in psutil.pids()" and
1405 should be preferred.
1406 """
1407 if pid < 0:
1408 return False
1409 elif pid == 0 and POSIX:
1410 # On POSIX we use os.kill() to determine PID existence.
1411 # According to "man 2 kill" PID 0 has a special meaning
1412 # though: it refers to <<every process in the process
1413 # group of the calling process>> and that is not we want
1414 # to do here.
1415 return pid in pids()
1416 else:
1417 return _psplatform.pid_exists(pid)
1420_pmap = {}
1423def process_iter(attrs=None, ad_value=None):
1424 """Return a generator yielding a Process instance for all
1425 running processes.
1427 Every new Process instance is only created once and then cached
1428 into an internal table which is updated every time this is used.
1430 Cached Process instances are checked for identity so that you're
1431 safe in case a PID has been reused by another process, in which
1432 case the cached instance is updated.
1434 The sorting order in which processes are yielded is based on
1435 their PIDs.
1437 *attrs* and *ad_value* have the same meaning as in
1438 Process.as_dict(). If *attrs* is specified as_dict() is called
1439 and the resulting dict is stored as a 'info' attribute attached
1440 to returned Process instance.
1441 If *attrs* is an empty list it will retrieve all process info
1442 (slow).
1443 """
1444 global _pmap
1446 def add(pid):
1447 proc = Process(pid)
1448 if attrs is not None:
1449 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1450 pmap[proc.pid] = proc
1451 return proc
1453 def remove(pid):
1454 pmap.pop(pid, None)
1456 pmap = _pmap.copy()
1457 a = set(pids())
1458 b = set(pmap.keys())
1459 new_pids = a - b
1460 gone_pids = b - a
1461 for pid in gone_pids:
1462 remove(pid)
1463 try:
1464 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1465 for pid, proc in ls:
1466 try:
1467 if proc is None: # new process
1468 yield add(pid)
1469 else:
1470 # use is_running() to check whether PID has been
1471 # reused by another process in which case yield a
1472 # new Process instance
1473 if proc.is_running():
1474 if attrs is not None:
1475 proc.info = proc.as_dict(
1476 attrs=attrs, ad_value=ad_value)
1477 yield proc
1478 else:
1479 yield add(pid)
1480 except NoSuchProcess:
1481 remove(pid)
1482 except AccessDenied:
1483 # Process creation time can't be determined hence there's
1484 # no way to tell whether the pid of the cached process
1485 # has been reused. Just return the cached version.
1486 if proc is None and pid in pmap:
1487 try:
1488 yield pmap[pid]
1489 except KeyError:
1490 # If we get here it is likely that 2 threads were
1491 # using process_iter().
1492 pass
1493 else:
1494 raise
1495 finally:
1496 _pmap = pmap
1499def wait_procs(procs, timeout=None, callback=None):
1500 """Convenience function which waits for a list of processes to
1501 terminate.
1503 Return a (gone, alive) tuple indicating which processes
1504 are gone and which ones are still alive.
1506 The gone ones will have a new *returncode* attribute indicating
1507 process exit status (may be None).
1509 *callback* is a function which gets called every time a process
1510 terminates (a Process instance is passed as callback argument).
1512 Function will return as soon as all processes terminate or when
1513 *timeout* occurs.
1514 Differently from Process.wait() it will not raise TimeoutExpired if
1515 *timeout* occurs.
1517 Typical use case is:
1519 - send SIGTERM to a list of processes
1520 - give them some time to terminate
1521 - send SIGKILL to those ones which are still alive
1523 Example:
1525 >>> def on_terminate(proc):
1526 ... print("process {} terminated".format(proc))
1527 ...
1528 >>> for p in procs:
1529 ... p.terminate()
1530 ...
1531 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1532 >>> for p in alive:
1533 ... p.kill()
1534 """
1535 def check_gone(proc, timeout):
1536 try:
1537 returncode = proc.wait(timeout=timeout)
1538 except TimeoutExpired:
1539 pass
1540 except _SubprocessTimeoutExpired:
1541 pass
1542 else:
1543 if returncode is not None or not proc.is_running():
1544 # Set new Process instance attribute.
1545 proc.returncode = returncode
1546 gone.add(proc)
1547 if callback is not None:
1548 callback(proc)
1550 if timeout is not None and not timeout >= 0:
1551 msg = "timeout must be a positive integer, got %s" % timeout
1552 raise ValueError(msg)
1553 gone = set()
1554 alive = set(procs)
1555 if callback is not None and not callable(callback):
1556 raise TypeError("callback %r is not a callable" % callable)
1557 if timeout is not None:
1558 deadline = _timer() + timeout
1560 while alive:
1561 if timeout is not None and timeout <= 0:
1562 break
1563 for proc in alive:
1564 # Make sure that every complete iteration (all processes)
1565 # will last max 1 sec.
1566 # We do this because we don't want to wait too long on a
1567 # single process: in case it terminates too late other
1568 # processes may disappear in the meantime and their PID
1569 # reused.
1570 max_timeout = 1.0 / len(alive)
1571 if timeout is not None:
1572 timeout = min((deadline - _timer()), max_timeout)
1573 if timeout <= 0:
1574 break
1575 check_gone(proc, timeout)
1576 else:
1577 check_gone(proc, max_timeout)
1578 alive = alive - gone
1580 if alive:
1581 # Last attempt over processes survived so far.
1582 # timeout == 0 won't make this function wait any further.
1583 for proc in alive:
1584 check_gone(proc, 0)
1585 alive = alive - gone
1587 return (list(gone), list(alive))
1590# =====================================================================
1591# --- CPU related functions
1592# =====================================================================
1595def cpu_count(logical=True):
1596 """Return the number of logical CPUs in the system (same as
1597 os.cpu_count() in Python 3.4).
1599 If *logical* is False return the number of physical cores only
1600 (e.g. hyper thread CPUs are excluded).
1602 Return None if undetermined.
1604 The return value is cached after first call.
1605 If desired cache can be cleared like this:
1607 >>> psutil.cpu_count.cache_clear()
1608 """
1609 if logical:
1610 ret = _psplatform.cpu_count_logical()
1611 else:
1612 ret = _psplatform.cpu_count_cores()
1613 if ret is not None and ret < 1:
1614 ret = None
1615 return ret
1618def cpu_times(percpu=False):
1619 """Return system-wide CPU times as a namedtuple.
1620 Every CPU time represents the seconds the CPU has spent in the
1621 given mode. The namedtuple's fields availability varies depending on the
1622 platform:
1624 - user
1625 - system
1626 - idle
1627 - nice (UNIX)
1628 - iowait (Linux)
1629 - irq (Linux, FreeBSD)
1630 - softirq (Linux)
1631 - steal (Linux >= 2.6.11)
1632 - guest (Linux >= 2.6.24)
1633 - guest_nice (Linux >= 3.2.0)
1635 When *percpu* is True return a list of namedtuples for each CPU.
1636 First element of the list refers to first CPU, second element
1637 to second CPU and so on.
1638 The order of the list is consistent across calls.
1639 """
1640 if not percpu:
1641 return _psplatform.cpu_times()
1642 else:
1643 return _psplatform.per_cpu_times()
1646try:
1647 _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1648except Exception:
1649 # Don't want to crash at import time.
1650 _last_cpu_times = {}
1652try:
1653 _last_per_cpu_times = {
1654 threading.current_thread().ident: cpu_times(percpu=True)
1655 }
1656except Exception:
1657 # Don't want to crash at import time.
1658 _last_per_cpu_times = {}
1661def _cpu_tot_time(times):
1662 """Given a cpu_time() ntuple calculates the total CPU time
1663 (including idle time).
1664 """
1665 tot = sum(times)
1666 if LINUX:
1667 # On Linux guest times are already accounted in "user" or
1668 # "nice" times, so we subtract them from total.
1669 # Htop does the same. References:
1670 # https://github.com/giampaolo/psutil/pull/940
1671 # http://unix.stackexchange.com/questions/178045
1672 # https://github.com/torvalds/linux/blob/
1673 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1674 # cputime.c#L158
1675 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1676 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1677 return tot
1680def _cpu_busy_time(times):
1681 """Given a cpu_time() ntuple calculates the busy CPU time.
1682 We do so by subtracting all idle CPU times.
1683 """
1684 busy = _cpu_tot_time(times)
1685 busy -= times.idle
1686 # Linux: "iowait" is time during which the CPU does not do anything
1687 # (waits for IO to complete). On Linux IO wait is *not* accounted
1688 # in "idle" time so we subtract it. Htop does the same.
1689 # References:
1690 # https://github.com/torvalds/linux/blob/
1691 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1692 busy -= getattr(times, "iowait", 0)
1693 return busy
1696def _cpu_times_deltas(t1, t2):
1697 assert t1._fields == t2._fields, (t1, t2)
1698 field_deltas = []
1699 for field in _psplatform.scputimes._fields:
1700 field_delta = getattr(t2, field) - getattr(t1, field)
1701 # CPU times are always supposed to increase over time
1702 # or at least remain the same and that's because time
1703 # cannot go backwards.
1704 # Surprisingly sometimes this might not be the case (at
1705 # least on Windows and Linux), see:
1706 # https://github.com/giampaolo/psutil/issues/392
1707 # https://github.com/giampaolo/psutil/issues/645
1708 # https://github.com/giampaolo/psutil/issues/1210
1709 # Trim negative deltas to zero to ignore decreasing fields.
1710 # top does the same. Reference:
1711 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1712 field_delta = max(0, field_delta)
1713 field_deltas.append(field_delta)
1714 return _psplatform.scputimes(*field_deltas)
1717def cpu_percent(interval=None, percpu=False):
1718 """Return a float representing the current system-wide CPU
1719 utilization as a percentage.
1721 When *interval* is > 0.0 compares system CPU times elapsed before
1722 and after the interval (blocking).
1724 When *interval* is 0.0 or None compares system CPU times elapsed
1725 since last call or module import, returning immediately (non
1726 blocking). That means the first time this is called it will
1727 return a meaningless 0.0 value which you should ignore.
1728 In this case is recommended for accuracy that this function be
1729 called with at least 0.1 seconds between calls.
1731 When *percpu* is True returns a list of floats representing the
1732 utilization as a percentage for each CPU.
1733 First element of the list refers to first CPU, second element
1734 to second CPU and so on.
1735 The order of the list is consistent across calls.
1737 Examples:
1739 >>> # blocking, system-wide
1740 >>> psutil.cpu_percent(interval=1)
1741 2.0
1742 >>>
1743 >>> # blocking, per-cpu
1744 >>> psutil.cpu_percent(interval=1, percpu=True)
1745 [2.0, 1.0]
1746 >>>
1747 >>> # non-blocking (percentage since last call)
1748 >>> psutil.cpu_percent(interval=None)
1749 2.9
1750 >>>
1751 """
1752 tid = threading.current_thread().ident
1753 blocking = interval is not None and interval > 0.0
1754 if interval is not None and interval < 0:
1755 raise ValueError("interval is not positive (got %r)" % interval)
1757 def calculate(t1, t2):
1758 times_delta = _cpu_times_deltas(t1, t2)
1759 all_delta = _cpu_tot_time(times_delta)
1760 busy_delta = _cpu_busy_time(times_delta)
1762 try:
1763 busy_perc = (busy_delta / all_delta) * 100
1764 except ZeroDivisionError:
1765 return 0.0
1766 else:
1767 return round(busy_perc, 1)
1769 # system-wide usage
1770 if not percpu:
1771 if blocking:
1772 t1 = cpu_times()
1773 time.sleep(interval)
1774 else:
1775 t1 = _last_cpu_times.get(tid) or cpu_times()
1776 _last_cpu_times[tid] = cpu_times()
1777 return calculate(t1, _last_cpu_times[tid])
1778 # per-cpu usage
1779 else:
1780 ret = []
1781 if blocking:
1782 tot1 = cpu_times(percpu=True)
1783 time.sleep(interval)
1784 else:
1785 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1786 _last_per_cpu_times[tid] = cpu_times(percpu=True)
1787 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1788 ret.append(calculate(t1, t2))
1789 return ret
1792# Use a separate dict for cpu_times_percent(), so it's independent from
1793# cpu_percent() and they can both be used within the same program.
1794_last_cpu_times_2 = _last_cpu_times.copy()
1795_last_per_cpu_times_2 = _last_per_cpu_times.copy()
1798def cpu_times_percent(interval=None, percpu=False):
1799 """Same as cpu_percent() but provides utilization percentages
1800 for each specific CPU time as is returned by cpu_times().
1801 For instance, on Linux we'll get:
1803 >>> cpu_times_percent()
1804 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1805 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1806 >>>
1808 *interval* and *percpu* arguments have the same meaning as in
1809 cpu_percent().
1810 """
1811 tid = threading.current_thread().ident
1812 blocking = interval is not None and interval > 0.0
1813 if interval is not None and interval < 0:
1814 raise ValueError("interval is not positive (got %r)" % interval)
1816 def calculate(t1, t2):
1817 nums = []
1818 times_delta = _cpu_times_deltas(t1, t2)
1819 all_delta = _cpu_tot_time(times_delta)
1820 # "scale" is the value to multiply each delta with to get percentages.
1821 # We use "max" to avoid division by zero (if all_delta is 0, then all
1822 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1823 # fraction because cpu times are integers)
1824 scale = 100.0 / max(1, all_delta)
1825 for field_delta in times_delta:
1826 field_perc = field_delta * scale
1827 field_perc = round(field_perc, 1)
1828 # make sure we don't return negative values or values over 100%
1829 field_perc = min(max(0.0, field_perc), 100.0)
1830 nums.append(field_perc)
1831 return _psplatform.scputimes(*nums)
1833 # system-wide usage
1834 if not percpu:
1835 if blocking:
1836 t1 = cpu_times()
1837 time.sleep(interval)
1838 else:
1839 t1 = _last_cpu_times_2.get(tid) or cpu_times()
1840 _last_cpu_times_2[tid] = cpu_times()
1841 return calculate(t1, _last_cpu_times_2[tid])
1842 # per-cpu usage
1843 else:
1844 ret = []
1845 if blocking:
1846 tot1 = cpu_times(percpu=True)
1847 time.sleep(interval)
1848 else:
1849 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1850 _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1851 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1852 ret.append(calculate(t1, t2))
1853 return ret
1856def cpu_stats():
1857 """Return CPU statistics."""
1858 return _psplatform.cpu_stats()
1861if hasattr(_psplatform, "cpu_freq"):
1863 def cpu_freq(percpu=False):
1864 """Return CPU frequency as a namedtuple including current,
1865 min and max frequency expressed in Mhz.
1867 If *percpu* is True and the system supports per-cpu frequency
1868 retrieval (Linux only) a list of frequencies is returned for
1869 each CPU. If not a list with one element is returned.
1870 """
1871 ret = _psplatform.cpu_freq()
1872 if percpu:
1873 return ret
1874 else:
1875 num_cpus = float(len(ret))
1876 if num_cpus == 0:
1877 return None
1878 elif num_cpus == 1:
1879 return ret[0]
1880 else:
1881 currs, mins, maxs = 0.0, 0.0, 0.0
1882 set_none = False
1883 for cpu in ret:
1884 currs += cpu.current
1885 # On Linux if /proc/cpuinfo is used min/max are set
1886 # to None.
1887 if LINUX and cpu.min is None:
1888 set_none = True
1889 continue
1890 mins += cpu.min
1891 maxs += cpu.max
1893 current = currs / num_cpus
1895 if set_none:
1896 min_ = max_ = None
1897 else:
1898 min_ = mins / num_cpus
1899 max_ = maxs / num_cpus
1901 return _common.scpufreq(current, min_, max_)
1903 __all__.append("cpu_freq")
1906if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1907 # Perform this hasattr check once on import time to either use the
1908 # platform based code or proxy straight from the os module.
1909 if hasattr(os, "getloadavg"):
1910 getloadavg = os.getloadavg
1911 else:
1912 getloadavg = _psplatform.getloadavg
1914 __all__.append("getloadavg")
1917# =====================================================================
1918# --- system memory related functions
1919# =====================================================================
1922def virtual_memory():
1923 """Return statistics about system memory usage as a namedtuple
1924 including the following fields, expressed in bytes:
1926 - total:
1927 total physical memory available.
1929 - available:
1930 the memory that can be given instantly to processes without the
1931 system going into swap.
1932 This is calculated by summing different memory values depending
1933 on the platform and it is supposed to be used to monitor actual
1934 memory usage in a cross platform fashion.
1936 - percent:
1937 the percentage usage calculated as (total - available) / total * 100
1939 - used:
1940 memory used, calculated differently depending on the platform and
1941 designed for informational purposes only:
1942 macOS: active + wired
1943 BSD: active + wired + cached
1944 Linux: total - free
1946 - free:
1947 memory not being used at all (zeroed) that is readily available;
1948 note that this doesn't reflect the actual memory available
1949 (use 'available' instead)
1951 Platform-specific fields:
1953 - active (UNIX):
1954 memory currently in use or very recently used, and so it is in RAM.
1956 - inactive (UNIX):
1957 memory that is marked as not used.
1959 - buffers (BSD, Linux):
1960 cache for things like file system metadata.
1962 - cached (BSD, macOS):
1963 cache for various things.
1965 - wired (macOS, BSD):
1966 memory that is marked to always stay in RAM. It is never moved to disk.
1968 - shared (BSD):
1969 memory that may be simultaneously accessed by multiple processes.
1971 The sum of 'used' and 'available' does not necessarily equal total.
1972 On Windows 'available' and 'free' are the same.
1973 """
1974 global _TOTAL_PHYMEM
1975 ret = _psplatform.virtual_memory()
1976 # cached for later use in Process.memory_percent()
1977 _TOTAL_PHYMEM = ret.total
1978 return ret
1981def swap_memory():
1982 """Return system swap memory statistics as a namedtuple including
1983 the following fields:
1985 - total: total swap memory in bytes
1986 - used: used swap memory in bytes
1987 - free: free swap memory in bytes
1988 - percent: the percentage usage
1989 - sin: no. of bytes the system has swapped in from disk (cumulative)
1990 - sout: no. of bytes the system has swapped out from disk (cumulative)
1992 'sin' and 'sout' on Windows are meaningless and always set to 0.
1993 """
1994 return _psplatform.swap_memory()
1997# =====================================================================
1998# --- disks/paritions related functions
1999# =====================================================================
2002def disk_usage(path):
2003 """Return disk usage statistics about the given *path* as a
2004 namedtuple including total, used and free space expressed in bytes
2005 plus the percentage usage.
2006 """
2007 return _psplatform.disk_usage(path)
2010def disk_partitions(all=False):
2011 """Return mounted partitions as a list of
2012 (device, mountpoint, fstype, opts) namedtuple.
2013 'opts' field is a raw string separated by commas indicating mount
2014 options which may vary depending on the platform.
2016 If *all* parameter is False return physical devices only and ignore
2017 all others.
2018 """
2019 def pathconf(path, name):
2020 try:
2021 return os.pathconf(path, name)
2022 except (OSError, AttributeError):
2023 pass
2025 ret = _psplatform.disk_partitions(all)
2026 if POSIX:
2027 new = []
2028 for item in ret:
2029 nt = item._replace(
2030 maxfile=pathconf(item.mountpoint, 'PC_NAME_MAX'),
2031 maxpath=pathconf(item.mountpoint, 'PC_PATH_MAX'))
2032 new.append(nt)
2033 return new
2034 else:
2035 return ret
2038def disk_io_counters(perdisk=False, nowrap=True):
2039 """Return system disk I/O statistics as a namedtuple including
2040 the following fields:
2042 - read_count: number of reads
2043 - write_count: number of writes
2044 - read_bytes: number of bytes read
2045 - write_bytes: number of bytes written
2046 - read_time: time spent reading from disk (in ms)
2047 - write_time: time spent writing to disk (in ms)
2049 Platform specific:
2051 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2052 - read_merged_count (Linux): number of merged reads
2053 - write_merged_count (Linux): number of merged writes
2055 If *perdisk* is True return the same information for every
2056 physical disk installed on the system as a dictionary
2057 with partition names as the keys and the namedtuple
2058 described above as the values.
2060 If *nowrap* is True it detects and adjust the numbers which overflow
2061 and wrap (restart from 0) and add "old value" to "new value" so that
2062 the returned numbers will always be increasing or remain the same,
2063 but never decrease.
2064 "disk_io_counters.cache_clear()" can be used to invalidate the
2065 cache.
2067 On recent Windows versions 'diskperf -y' command may need to be
2068 executed first otherwise this function won't find any disk.
2069 """
2070 kwargs = dict(perdisk=perdisk) if LINUX else {}
2071 rawdict = _psplatform.disk_io_counters(**kwargs)
2072 if not rawdict:
2073 return {} if perdisk else None
2074 if nowrap:
2075 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2076 nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
2077 if perdisk:
2078 for disk, fields in rawdict.items():
2079 rawdict[disk] = nt(*fields)
2080 return rawdict
2081 else:
2082 return nt(*(sum(x) for x in zip(*rawdict.values())))
2085disk_io_counters.cache_clear = functools.partial(
2086 _wrap_numbers.cache_clear, 'psutil.disk_io_counters')
2087disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2090# =====================================================================
2091# --- network related functions
2092# =====================================================================
2095def net_io_counters(pernic=False, nowrap=True):
2096 """Return network I/O statistics as a namedtuple including
2097 the following fields:
2099 - bytes_sent: number of bytes sent
2100 - bytes_recv: number of bytes received
2101 - packets_sent: number of packets sent
2102 - packets_recv: number of packets received
2103 - errin: total number of errors while receiving
2104 - errout: total number of errors while sending
2105 - dropin: total number of incoming packets which were dropped
2106 - dropout: total number of outgoing packets which were dropped
2107 (always 0 on macOS and BSD)
2109 If *pernic* is True return the same information for every
2110 network interface installed on the system as a dictionary
2111 with network interface names as the keys and the namedtuple
2112 described above as the values.
2114 If *nowrap* is True it detects and adjust the numbers which overflow
2115 and wrap (restart from 0) and add "old value" to "new value" so that
2116 the returned numbers will always be increasing or remain the same,
2117 but never decrease.
2118 "net_io_counters.cache_clear()" can be used to invalidate the
2119 cache.
2120 """
2121 rawdict = _psplatform.net_io_counters()
2122 if not rawdict:
2123 return {} if pernic else None
2124 if nowrap:
2125 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2126 if pernic:
2127 for nic, fields in rawdict.items():
2128 rawdict[nic] = _common.snetio(*fields)
2129 return rawdict
2130 else:
2131 return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
2134net_io_counters.cache_clear = functools.partial(
2135 _wrap_numbers.cache_clear, 'psutil.net_io_counters')
2136net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2139def net_connections(kind='inet'):
2140 """Return system-wide socket connections as a list of
2141 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2142 In case of limited privileges 'fd' and 'pid' may be set to -1
2143 and None respectively.
2144 The *kind* parameter filters for connections that fit the
2145 following criteria:
2147 +------------+----------------------------------------------------+
2148 | Kind Value | Connections using |
2149 +------------+----------------------------------------------------+
2150 | inet | IPv4 and IPv6 |
2151 | inet4 | IPv4 |
2152 | inet6 | IPv6 |
2153 | tcp | TCP |
2154 | tcp4 | TCP over IPv4 |
2155 | tcp6 | TCP over IPv6 |
2156 | udp | UDP |
2157 | udp4 | UDP over IPv4 |
2158 | udp6 | UDP over IPv6 |
2159 | unix | UNIX socket (both UDP and TCP protocols) |
2160 | all | the sum of all the possible families and protocols |
2161 +------------+----------------------------------------------------+
2163 On macOS this function requires root privileges.
2164 """
2165 return _psplatform.net_connections(kind)
2168def net_if_addrs():
2169 """Return the addresses associated to each NIC (network interface
2170 card) installed on the system as a dictionary whose keys are the
2171 NIC names and value is a list of namedtuples for each address
2172 assigned to the NIC. Each namedtuple includes 5 fields:
2174 - family: can be either socket.AF_INET, socket.AF_INET6 or
2175 psutil.AF_LINK, which refers to a MAC address.
2176 - address: is the primary address and it is always set.
2177 - netmask: and 'broadcast' and 'ptp' may be None.
2178 - ptp: stands for "point to point" and references the
2179 destination address on a point to point interface
2180 (typically a VPN).
2181 - broadcast: and *ptp* are mutually exclusive.
2183 Note: you can have more than one address of the same family
2184 associated with each interface.
2185 """
2186 has_enums = _PY3
2187 if has_enums:
2188 import socket
2189 rawlist = _psplatform.net_if_addrs()
2190 rawlist.sort(key=lambda x: x[1]) # sort by family
2191 ret = collections.defaultdict(list)
2192 for name, fam, addr, mask, broadcast, ptp in rawlist:
2193 if has_enums:
2194 try:
2195 fam = socket.AddressFamily(fam)
2196 except ValueError:
2197 if WINDOWS and fam == -1:
2198 fam = _psplatform.AF_LINK
2199 elif (hasattr(_psplatform, "AF_LINK") and
2200 _psplatform.AF_LINK == fam):
2201 # Linux defines AF_LINK as an alias for AF_PACKET.
2202 # We re-set the family here so that repr(family)
2203 # will show AF_LINK rather than AF_PACKET
2204 fam = _psplatform.AF_LINK
2205 if fam == _psplatform.AF_LINK:
2206 # The underlying C function may return an incomplete MAC
2207 # address in which case we fill it with null bytes, see:
2208 # https://github.com/giampaolo/psutil/issues/786
2209 separator = ":" if POSIX else "-"
2210 while addr.count(separator) < 5:
2211 addr += "%s00" % separator
2212 ret[name].append(_common.snicaddr(fam, addr, mask, broadcast, ptp))
2213 return dict(ret)
2216def net_if_stats():
2217 """Return information about each NIC (network interface card)
2218 installed on the system as a dictionary whose keys are the
2219 NIC names and value is a namedtuple with the following fields:
2221 - isup: whether the interface is up (bool)
2222 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2223 NIC_DUPLEX_UNKNOWN
2224 - speed: the NIC speed expressed in mega bits (MB); if it can't
2225 be determined (e.g. 'localhost') it will be set to 0.
2226 - mtu: the maximum transmission unit expressed in bytes.
2227 """
2228 return _psplatform.net_if_stats()
2231# =====================================================================
2232# --- sensors
2233# =====================================================================
2236# Linux, macOS
2237if hasattr(_psplatform, "sensors_temperatures"):
2239 def sensors_temperatures(fahrenheit=False):
2240 """Return hardware temperatures. Each entry is a namedtuple
2241 representing a certain hardware sensor (it may be a CPU, an
2242 hard disk or something else, depending on the OS and its
2243 configuration).
2244 All temperatures are expressed in celsius unless *fahrenheit*
2245 is set to True.
2246 """
2247 def convert(n):
2248 if n is not None:
2249 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2251 ret = collections.defaultdict(list)
2252 rawdict = _psplatform.sensors_temperatures()
2254 for name, values in rawdict.items():
2255 while values:
2256 label, current, high, critical = values.pop(0)
2257 current = convert(current)
2258 high = convert(high)
2259 critical = convert(critical)
2261 if high and not critical:
2262 critical = high
2263 elif critical and not high:
2264 high = critical
2266 ret[name].append(
2267 _common.shwtemp(label, current, high, critical))
2269 return dict(ret)
2271 __all__.append("sensors_temperatures")
2274# Linux
2275if hasattr(_psplatform, "sensors_fans"):
2277 def sensors_fans():
2278 """Return fans speed. Each entry is a namedtuple
2279 representing a certain hardware sensor.
2280 All speed are expressed in RPM (rounds per minute).
2281 """
2282 return _psplatform.sensors_fans()
2284 __all__.append("sensors_fans")
2287# Linux, Windows, FreeBSD, macOS
2288if hasattr(_psplatform, "sensors_battery"):
2290 def sensors_battery():
2291 """Return battery information. If no battery is installed
2292 returns None.
2294 - percent: battery power left as a percentage.
2295 - secsleft: a rough approximation of how many seconds are left
2296 before the battery runs out of power. May be
2297 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2298 - power_plugged: True if the AC power cable is connected.
2299 """
2300 return _psplatform.sensors_battery()
2302 __all__.append("sensors_battery")
2305# =====================================================================
2306# --- other system related functions
2307# =====================================================================
2310def boot_time():
2311 """Return the system boot time expressed in seconds since the epoch."""
2312 # Note: we are not caching this because it is subject to
2313 # system clock updates.
2314 return _psplatform.boot_time()
2317def users():
2318 """Return users currently connected on the system as a list of
2319 namedtuples including the following fields.
2321 - user: the name of the user
2322 - terminal: the tty or pseudo-tty associated with the user, if any.
2323 - host: the host name associated with the entry, if any.
2324 - started: the creation time as a floating point number expressed in
2325 seconds since the epoch.
2326 """
2327 return _psplatform.users()
2330# =====================================================================
2331# --- Windows services
2332# =====================================================================
2335if WINDOWS:
2337 def win_service_iter():
2338 """Return a generator yielding a WindowsService instance for all
2339 Windows services installed.
2340 """
2341 return _psplatform.win_service_iter()
2343 def win_service_get(name):
2344 """Get a Windows service by *name*.
2345 Raise NoSuchProcess if no service with such name exists.
2346 """
2347 return _psplatform.win_service_get(name)
2350# =====================================================================
2353def _set_debug(value):
2354 """Enable or disable PSUTIL_DEBUG option, which prints debugging
2355 messages to stderr.
2356 """
2357 import psutil._common
2358 psutil._common.PSUTIL_DEBUG = bool(value)
2359 _psplatform.cext.set_debug(bool(value))
2362def test(): # pragma: no cover
2363 from ._common import bytes2human
2364 from ._compat import get_terminal_size
2366 today_day = datetime.date.today()
2367 templ = "%-10s %5s %5s %7s %7s %5s %6s %6s %6s %s"
2368 attrs = ['pid', 'memory_percent', 'name', 'cmdline', 'cpu_times',
2369 'create_time', 'memory_info', 'status', 'nice', 'username']
2370 print(templ % ("USER", "PID", "%MEM", "VSZ", "RSS", "NICE", # NOQA
2371 "STATUS", "START", "TIME", "CMDLINE"))
2372 for p in process_iter(attrs, ad_value=None):
2373 if p.info['create_time']:
2374 ctime = datetime.datetime.fromtimestamp(p.info['create_time'])
2375 if ctime.date() == today_day:
2376 ctime = ctime.strftime("%H:%M")
2377 else:
2378 ctime = ctime.strftime("%b%d")
2379 else:
2380 ctime = ''
2381 if p.info['cpu_times']:
2382 cputime = time.strftime("%M:%S",
2383 time.localtime(sum(p.info['cpu_times'])))
2384 else:
2385 cputime = ''
2387 user = p.info['username'] or ''
2388 if not user and POSIX:
2389 try:
2390 user = p.uids()[0]
2391 except Error:
2392 pass
2393 if user and WINDOWS and '\\' in user:
2394 user = user.split('\\')[1]
2395 user = user[:9]
2396 vms = bytes2human(p.info['memory_info'].vms) if \
2397 p.info['memory_info'] is not None else ''
2398 rss = bytes2human(p.info['memory_info'].rss) if \
2399 p.info['memory_info'] is not None else ''
2400 memp = round(p.info['memory_percent'], 1) if \
2401 p.info['memory_percent'] is not None else ''
2402 nice = int(p.info['nice']) if p.info['nice'] else ''
2403 if p.info['cmdline']:
2404 cmdline = ' '.join(p.info['cmdline'])
2405 else:
2406 cmdline = p.info['name']
2407 status = p.info['status'][:5] if p.info['status'] else ''
2409 line = templ % (
2410 user[:10],
2411 p.info['pid'],
2412 memp,
2413 vms,
2414 rss,
2415 nice,
2416 status,
2417 ctime,
2418 cputime,
2419 cmdline)
2420 print(line[:get_terminal_size()[0]]) # NOQA
2423del memoize_when_activated, division
2424if sys.version_info[0] < 3:
2425 del num, x
2427if __name__ == "__main__":
2428 test()