1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""psutil is a cross-platform library for retrieving information on
6running processes and system utilization (CPU, memory, disks, network,
7sensors) in Python. Supported platforms:
8
9 - Linux
10 - Windows
11 - macOS
12 - FreeBSD
13 - OpenBSD
14 - NetBSD
15 - Sun Solaris
16 - AIX
17
18Supported Python versions are cPython 3.6+ and PyPy.
19"""
20
21import collections
22import contextlib
23import datetime
24import functools
25import os
26import signal
27import socket
28import subprocess
29import sys
30import threading
31import time
32
33try:
34 import pwd
35except ImportError:
36 pwd = None
37
38from . import _common
39from . import _ntuples as _ntp
40from ._common import AIX
41from ._common import BSD
42from ._common import CONN_CLOSE
43from ._common import CONN_CLOSE_WAIT
44from ._common import CONN_CLOSING
45from ._common import CONN_ESTABLISHED
46from ._common import CONN_FIN_WAIT1
47from ._common import CONN_FIN_WAIT2
48from ._common import CONN_LAST_ACK
49from ._common import CONN_LISTEN
50from ._common import CONN_NONE
51from ._common import CONN_SYN_RECV
52from ._common import CONN_SYN_SENT
53from ._common import CONN_TIME_WAIT
54from ._common import FREEBSD
55from ._common import LINUX
56from ._common import MACOS
57from ._common import NETBSD
58from ._common import NIC_DUPLEX_FULL
59from ._common import NIC_DUPLEX_HALF
60from ._common import NIC_DUPLEX_UNKNOWN
61from ._common import OPENBSD
62from ._common import OSX # deprecated alias
63from ._common import POSIX
64from ._common import POWER_TIME_UNKNOWN
65from ._common import POWER_TIME_UNLIMITED
66from ._common import STATUS_DEAD
67from ._common import STATUS_DISK_SLEEP
68from ._common import STATUS_IDLE
69from ._common import STATUS_LOCKED
70from ._common import STATUS_PARKED
71from ._common import STATUS_RUNNING
72from ._common import STATUS_SLEEPING
73from ._common import STATUS_STOPPED
74from ._common import STATUS_TRACING_STOP
75from ._common import STATUS_WAITING
76from ._common import STATUS_WAKING
77from ._common import STATUS_ZOMBIE
78from ._common import SUNOS
79from ._common import WINDOWS
80from ._common import AccessDenied
81from ._common import Error
82from ._common import NoSuchProcess
83from ._common import TimeoutExpired
84from ._common import ZombieProcess
85from ._common import debug
86from ._common import memoize_when_activated
87from ._common import wrap_numbers as _wrap_numbers
88
89if LINUX:
90 # This is public API and it will be retrieved from _pslinux.py
91 # via sys.modules.
92 PROCFS_PATH = "/proc"
93
94 from . import _pslinux as _psplatform
95 from ._pslinux import IOPRIO_CLASS_BE # noqa: F401
96 from ._pslinux import IOPRIO_CLASS_IDLE # noqa: F401
97 from ._pslinux import IOPRIO_CLASS_NONE # noqa: F401
98 from ._pslinux import IOPRIO_CLASS_RT # noqa: F401
99
100elif WINDOWS:
101 from . import _pswindows as _psplatform
102 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # noqa: F401
103 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # noqa: F401
104 from ._psutil_windows import HIGH_PRIORITY_CLASS # noqa: F401
105 from ._psutil_windows import IDLE_PRIORITY_CLASS # noqa: F401
106 from ._psutil_windows import NORMAL_PRIORITY_CLASS # noqa: F401
107 from ._psutil_windows import REALTIME_PRIORITY_CLASS # noqa: F401
108 from ._pswindows import CONN_DELETE_TCB # noqa: F401
109 from ._pswindows import IOPRIO_HIGH # noqa: F401
110 from ._pswindows import IOPRIO_LOW # noqa: F401
111 from ._pswindows import IOPRIO_NORMAL # noqa: F401
112 from ._pswindows import IOPRIO_VERYLOW # noqa: F401
113
114elif MACOS:
115 from . import _psosx as _psplatform
116
117elif BSD:
118 from . import _psbsd as _psplatform
119
120elif SUNOS:
121 from . import _pssunos as _psplatform
122 from ._pssunos import CONN_BOUND # noqa: F401
123 from ._pssunos import CONN_IDLE # noqa: F401
124
125 # This is public writable API which is read from _pslinux.py and
126 # _pssunos.py via sys.modules.
127 PROCFS_PATH = "/proc"
128
129elif AIX:
130 from . import _psaix as _psplatform
131
132 # This is public API and it will be retrieved from _pslinux.py
133 # via sys.modules.
134 PROCFS_PATH = "/proc"
135
136else: # pragma: no cover
137 msg = f"platform {sys.platform} is not supported"
138 raise NotImplementedError(msg)
139
140
141# fmt: off
142__all__ = [
143 # exceptions
144 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
145 "TimeoutExpired",
146
147 # constants
148 "version_info", "__version__",
149
150 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
151 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
152 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
153 "STATUS_PARKED",
154
155 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
156 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
157 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
158 # "CONN_IDLE", "CONN_BOUND",
159
160 "AF_LINK",
161
162 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
163
164 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
165
166 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
167 "SUNOS", "WINDOWS", "AIX",
168
169 # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
170 # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
171 # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
172 # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
173
174 # classes
175 "Process", "Popen",
176
177 # functions
178 "pid_exists", "pids", "process_iter", "wait_procs", # proc
179 "virtual_memory", "swap_memory", # memory
180 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
181 "cpu_stats", # "cpu_freq", "getloadavg"
182 "net_io_counters", "net_connections", "net_if_addrs", # network
183 "net_if_stats",
184 "disk_io_counters", "disk_partitions", "disk_usage", # disk
185 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
186 "users", "boot_time", # others
187]
188# fmt: on
189
190
191__all__.extend(_psplatform.__extra__all__)
192
193# Linux, FreeBSD
194if hasattr(_psplatform.Process, "rlimit"):
195 # Populate global namespace with RLIM* constants.
196 _globals = globals()
197 _name = None
198 for _name in dir(_psplatform.cext):
199 if _name.startswith('RLIM') and _name.isupper():
200 _globals[_name] = getattr(_psplatform.cext, _name)
201 __all__.append(_name)
202 del _globals, _name
203
204AF_LINK = _psplatform.AF_LINK
205
206__author__ = "Giampaolo Rodola'"
207__version__ = "7.2.0"
208version_info = tuple(int(num) for num in __version__.split('.'))
209
210_timer = getattr(time, 'monotonic', time.time)
211_TOTAL_PHYMEM = None
212_LOWEST_PID = None
213_SENTINEL = object()
214
215# Sanity check in case the user messed up with psutil installation
216# or did something weird with sys.path. In this case we might end
217# up importing a python module using a C extension module which
218# was compiled for a different version of psutil.
219# We want to prevent that by failing sooner rather than later.
220# See: https://github.com/giampaolo/psutil/issues/564
221if int(__version__.replace('.', '')) != getattr(
222 _psplatform.cext, 'version', None
223):
224 msg = f"version conflict: {_psplatform.cext.__file__!r} C extension "
225 msg += "module was built for another version of psutil"
226 if hasattr(_psplatform.cext, 'version'):
227 v = ".".join(list(str(_psplatform.cext.version)))
228 msg += f" ({v} instead of {__version__})"
229 else:
230 msg += f" (different than {__version__})"
231 what = getattr(
232 _psplatform.cext,
233 "__file__",
234 "the existing psutil install directory",
235 )
236 msg += f"; you may try to 'pip uninstall psutil', manually remove {what}"
237 msg += " or clean the virtual env somehow, then reinstall"
238 raise ImportError(msg)
239
240
241# =====================================================================
242# --- Utils
243# =====================================================================
244
245
246if hasattr(_psplatform, 'ppid_map'):
247 # Faster version (Windows and Linux).
248 _ppid_map = _psplatform.ppid_map
249else: # pragma: no cover
250
251 def _ppid_map():
252 """Return a {pid: ppid, ...} dict for all running processes in
253 one shot. Used to speed up Process.children().
254 """
255 ret = {}
256 for pid in pids():
257 try:
258 ret[pid] = _psplatform.Process(pid).ppid()
259 except (NoSuchProcess, ZombieProcess):
260 pass
261 return ret
262
263
264def _pprint_secs(secs):
265 """Format seconds in a human readable form."""
266 now = time.time()
267 secs_ago = int(now - secs)
268 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S"
269 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
270
271
272def _check_conn_kind(kind):
273 """Check net_connections()'s `kind` parameter."""
274 kinds = tuple(_common.conn_tmap)
275 if kind not in kinds:
276 msg = f"invalid kind argument {kind!r}; valid ones are: {kinds}"
277 raise ValueError(msg)
278
279
280# =====================================================================
281# --- Process class
282# =====================================================================
283
284
285class Process:
286 """Represents an OS process with the given PID.
287 If PID is omitted current process PID (os.getpid()) is used.
288 Raise NoSuchProcess if PID does not exist.
289
290 Note that most of the methods of this class do not make sure that
291 the PID of the process being queried has been reused. That means
292 that you may end up retrieving information for another process.
293
294 The only exceptions for which process identity is pre-emptively
295 checked and guaranteed are:
296
297 - parent()
298 - children()
299 - nice() (set)
300 - ionice() (set)
301 - rlimit() (set)
302 - cpu_affinity (set)
303 - suspend()
304 - resume()
305 - send_signal()
306 - terminate()
307 - kill()
308
309 To prevent this problem for all other methods you can use
310 is_running() before querying the process.
311 """
312
313 def __init__(self, pid=None):
314 self._init(pid)
315
316 def _init(self, pid, _ignore_nsp=False):
317 if pid is None:
318 pid = os.getpid()
319 else:
320 if pid < 0:
321 msg = f"pid must be a positive integer (got {pid})"
322 raise ValueError(msg)
323 try:
324 _psplatform.cext.check_pid_range(pid)
325 except OverflowError as err:
326 msg = "process PID out of range"
327 raise NoSuchProcess(pid, msg=msg) from err
328
329 self._pid = pid
330 self._name = None
331 self._exe = None
332 self._create_time = None
333 self._gone = False
334 self._pid_reused = False
335 self._hash = None
336 self._lock = threading.RLock()
337 # used for caching on Windows only (on POSIX ppid may change)
338 self._ppid = None
339 # platform-specific modules define an _psplatform.Process
340 # implementation class
341 self._proc = _psplatform.Process(pid)
342 self._last_sys_cpu_times = None
343 self._last_proc_cpu_times = None
344 self._exitcode = _SENTINEL
345 self._ident = (self.pid, None)
346 try:
347 self._ident = self._get_ident()
348 except AccessDenied:
349 # This should happen on Windows only, since we use the fast
350 # create time method. AFAIK, on all other platforms we are
351 # able to get create time for all PIDs.
352 pass
353 except ZombieProcess:
354 # Zombies can still be queried by this class (although
355 # not always) and pids() return them so just go on.
356 pass
357 except NoSuchProcess:
358 if not _ignore_nsp:
359 msg = "process PID not found"
360 raise NoSuchProcess(pid, msg=msg) from None
361 self._gone = True
362
363 def _get_ident(self):
364 """Return a (pid, uid) tuple which is supposed to identify a
365 Process instance univocally over time. The PID alone is not
366 enough, as it can be assigned to a new process after this one
367 terminates, so we add process creation time to the mix. We need
368 this in order to prevent killing the wrong process later on.
369 This is also known as PID reuse or PID recycling problem.
370
371 The reliability of this strategy mostly depends on
372 create_time() precision, which is 0.01 secs on Linux. The
373 assumption is that, after a process terminates, the kernel
374 won't reuse the same PID after such a short period of time
375 (0.01 secs). Technically this is inherently racy, but
376 practically it should be good enough.
377
378 NOTE: unreliable on FreeBSD and OpenBSD as ctime is subject to
379 system clock updates.
380 """
381
382 if WINDOWS:
383 # Use create_time() fast method in order to speedup
384 # `process_iter()`. This means we'll get AccessDenied for
385 # most ADMIN processes, but that's fine since it means
386 # we'll also get AccessDenied on kill().
387 # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555
388 self._create_time = self._proc.create_time(fast_only=True)
389 return (self.pid, self._create_time)
390 elif LINUX or NETBSD or OSX:
391 # Use 'monotonic' process starttime since boot to form unique
392 # process identity, since it is stable over changes to system
393 # time.
394 return (self.pid, self._proc.create_time(monotonic=True))
395 else:
396 return (self.pid, self.create_time())
397
398 def __str__(self):
399 info = collections.OrderedDict()
400 info["pid"] = self.pid
401 if self._name:
402 info['name'] = self._name
403 with self.oneshot():
404 if self._pid_reused:
405 info["status"] = "terminated + PID reused"
406 else:
407 try:
408 info["name"] = self.name()
409 info["status"] = self.status()
410 except ZombieProcess:
411 info["status"] = "zombie"
412 except NoSuchProcess:
413 info["status"] = "terminated"
414 except AccessDenied:
415 pass
416
417 if self._exitcode not in {_SENTINEL, None}:
418 info["exitcode"] = self._exitcode
419 if self._create_time is not None:
420 info['started'] = _pprint_secs(self._create_time)
421
422 return "{}.{}({})".format(
423 self.__class__.__module__,
424 self.__class__.__name__,
425 ", ".join([f"{k}={v!r}" for k, v in info.items()]),
426 )
427
428 __repr__ = __str__
429
430 def __eq__(self, other):
431 # Test for equality with another Process object based
432 # on PID and creation time.
433 if not isinstance(other, Process):
434 return NotImplemented
435 if OPENBSD or NETBSD or SUNOS: # pragma: no cover
436 # Zombie processes on Open/NetBSD/illumos/Solaris have a
437 # creation time of 0.0. This covers the case when a process
438 # started normally (so it has a ctime), then it turned into a
439 # zombie. It's important to do this because is_running()
440 # depends on __eq__.
441 pid1, ident1 = self._ident
442 pid2, ident2 = other._ident
443 if pid1 == pid2:
444 if ident1 and not ident2:
445 try:
446 return self.status() == STATUS_ZOMBIE
447 except Error:
448 pass
449 return self._ident == other._ident
450
451 def __ne__(self, other):
452 return not self == other
453
454 def __hash__(self):
455 if self._hash is None:
456 self._hash = hash(self._ident)
457 return self._hash
458
459 def _raise_if_pid_reused(self):
460 """Raises NoSuchProcess in case process PID has been reused."""
461 if self._pid_reused or (not self.is_running() and self._pid_reused):
462 # We may directly raise NSP in here already if PID is just
463 # not running, but I prefer NSP to be raised naturally by
464 # the actual Process API call. This way unit tests will tell
465 # us if the API is broken (aka don't raise NSP when it
466 # should). We also remain consistent with all other "get"
467 # APIs which don't use _raise_if_pid_reused().
468 msg = "process no longer exists and its PID has been reused"
469 raise NoSuchProcess(self.pid, self._name, msg=msg)
470
471 @property
472 def pid(self):
473 """The process PID."""
474 return self._pid
475
476 # --- utility methods
477
478 @contextlib.contextmanager
479 def oneshot(self):
480 """Utility context manager which considerably speeds up the
481 retrieval of multiple process information at the same time.
482
483 Internally different process info (e.g. name, ppid, uids,
484 gids, ...) may be fetched by using the same routine, but
485 only one information is returned and the others are discarded.
486 When using this context manager the internal routine is
487 executed once (in the example below on name()) and the
488 other info are cached.
489
490 The cache is cleared when exiting the context manager block.
491 The advice is to use this every time you retrieve more than
492 one information about the process. If you're lucky, you'll
493 get a hell of a speedup.
494
495 >>> import psutil
496 >>> p = psutil.Process()
497 >>> with p.oneshot():
498 ... p.name() # collect multiple info
499 ... p.cpu_times() # return cached value
500 ... p.cpu_percent() # return cached value
501 ... p.create_time() # return cached value
502 ...
503 >>>
504 """
505 with self._lock:
506 if hasattr(self, "_cache"):
507 # NOOP: this covers the use case where the user enters the
508 # context twice:
509 #
510 # >>> with p.oneshot():
511 # ... with p.oneshot():
512 # ...
513 #
514 # Also, since as_dict() internally uses oneshot()
515 # I expect that the code below will be a pretty common
516 # "mistake" that the user will make, so let's guard
517 # against that:
518 #
519 # >>> with p.oneshot():
520 # ... p.as_dict()
521 # ...
522 yield
523 else:
524 try:
525 # cached in case cpu_percent() is used
526 self.cpu_times.cache_activate(self)
527 # cached in case memory_percent() is used
528 self.memory_info.cache_activate(self)
529 # cached in case parent() is used
530 self.ppid.cache_activate(self)
531 # cached in case username() is used
532 if POSIX:
533 self.uids.cache_activate(self)
534 # specific implementation cache
535 self._proc.oneshot_enter()
536 yield
537 finally:
538 self.cpu_times.cache_deactivate(self)
539 self.memory_info.cache_deactivate(self)
540 self.ppid.cache_deactivate(self)
541 if POSIX:
542 self.uids.cache_deactivate(self)
543 self._proc.oneshot_exit()
544
545 def as_dict(self, attrs=None, ad_value=None):
546 """Utility method returning process information as a
547 hashable dictionary.
548 If *attrs* is specified it must be a list of strings
549 reflecting available Process class' attribute names
550 (e.g. ['cpu_times', 'name']) else all public (read
551 only) attributes are assumed.
552 *ad_value* is the value which gets assigned in case
553 AccessDenied or ZombieProcess exception is raised when
554 retrieving that particular process information.
555 """
556 valid_names = _as_dict_attrnames
557 if attrs is not None:
558 if not isinstance(attrs, (list, tuple, set, frozenset)):
559 msg = f"invalid attrs type {type(attrs)}"
560 raise TypeError(msg)
561 attrs = set(attrs)
562 invalid_names = attrs - valid_names
563 if invalid_names:
564 msg = "invalid attr name{} {}".format(
565 "s" if len(invalid_names) > 1 else "",
566 ", ".join(map(repr, invalid_names)),
567 )
568 raise ValueError(msg)
569
570 retdict = {}
571 ls = attrs or valid_names
572 with self.oneshot():
573 for name in ls:
574 try:
575 if name == 'pid':
576 ret = self.pid
577 else:
578 meth = getattr(self, name)
579 ret = meth()
580 except (AccessDenied, ZombieProcess):
581 ret = ad_value
582 except NotImplementedError:
583 # in case of not implemented functionality (may happen
584 # on old or exotic systems) we want to crash only if
585 # the user explicitly asked for that particular attr
586 if attrs:
587 raise
588 continue
589 retdict[name] = ret
590 return retdict
591
592 def parent(self):
593 """Return the parent process as a Process object pre-emptively
594 checking whether PID has been reused.
595 If no parent is known return None.
596 """
597 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
598 if self.pid == lowest_pid:
599 return None
600 ppid = self.ppid()
601 if ppid is not None:
602 # Get a fresh (non-cached) ctime in case the system clock
603 # was updated. TODO: use a monotonic ctime on platforms
604 # where it's supported.
605 proc_ctime = Process(self.pid).create_time()
606 try:
607 parent = Process(ppid)
608 if parent.create_time() <= proc_ctime:
609 return parent
610 # ...else ppid has been reused by another process
611 except NoSuchProcess:
612 pass
613
614 def parents(self):
615 """Return the parents of this process as a list of Process
616 instances. If no parents are known return an empty list.
617 """
618 parents = []
619 proc = self.parent()
620 while proc is not None:
621 parents.append(proc)
622 proc = proc.parent()
623 return parents
624
625 def is_running(self):
626 """Return whether this process is running.
627
628 It also checks if PID has been reused by another process, in
629 which case it will remove the process from `process_iter()`
630 internal cache and return False.
631 """
632 if self._gone or self._pid_reused:
633 return False
634 try:
635 # Checking if PID is alive is not enough as the PID might
636 # have been reused by another process. Process identity /
637 # uniqueness over time is guaranteed by (PID + creation
638 # time) and that is verified in __eq__.
639 self._pid_reused = self != Process(self.pid)
640 if self._pid_reused:
641 _pids_reused.add(self.pid)
642 raise NoSuchProcess(self.pid)
643 return True
644 except ZombieProcess:
645 # We should never get here as it's already handled in
646 # Process.__init__; here just for extra safety.
647 return True
648 except NoSuchProcess:
649 self._gone = True
650 return False
651
652 # --- actual API
653
654 @memoize_when_activated
655 def ppid(self):
656 """The process parent PID.
657 On Windows the return value is cached after first call.
658 """
659 # On POSIX we don't want to cache the ppid as it may unexpectedly
660 # change to 1 (init) in case this process turns into a zombie:
661 # https://github.com/giampaolo/psutil/issues/321
662 # http://stackoverflow.com/questions/356722/
663
664 # XXX should we check creation time here rather than in
665 # Process.parent()?
666 self._raise_if_pid_reused()
667 if POSIX:
668 return self._proc.ppid()
669 else: # pragma: no cover
670 self._ppid = self._ppid or self._proc.ppid()
671 return self._ppid
672
673 def name(self):
674 """The process name. The return value is cached after first call."""
675 # Process name is only cached on Windows as on POSIX it may
676 # change, see:
677 # https://github.com/giampaolo/psutil/issues/692
678 if WINDOWS and self._name is not None:
679 return self._name
680 name = self._proc.name()
681 if POSIX and len(name) >= 15:
682 # On UNIX the name gets truncated to the first 15 characters.
683 # If it matches the first part of the cmdline we return that
684 # one instead because it's usually more explicative.
685 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
686 try:
687 cmdline = self.cmdline()
688 except (AccessDenied, ZombieProcess):
689 # Just pass and return the truncated name: it's better
690 # than nothing. Note: there are actual cases where a
691 # zombie process can return a name() but not a
692 # cmdline(), see:
693 # https://github.com/giampaolo/psutil/issues/2239
694 pass
695 else:
696 if cmdline:
697 extended_name = os.path.basename(cmdline[0])
698 if extended_name.startswith(name):
699 name = extended_name
700 self._name = name
701 self._proc._name = name
702 return name
703
704 def exe(self):
705 """The process executable as an absolute path.
706 May also be an empty string.
707 The return value is cached after first call.
708 """
709
710 def guess_it(fallback):
711 # try to guess exe from cmdline[0] in absence of a native
712 # exe representation
713 cmdline = self.cmdline()
714 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
715 exe = cmdline[0] # the possible exe
716 # Attempt to guess only in case of an absolute path.
717 # It is not safe otherwise as the process might have
718 # changed cwd.
719 if (
720 os.path.isabs(exe)
721 and os.path.isfile(exe)
722 and os.access(exe, os.X_OK)
723 ):
724 return exe
725 if isinstance(fallback, AccessDenied):
726 raise fallback
727 return fallback
728
729 if self._exe is None:
730 try:
731 exe = self._proc.exe()
732 except AccessDenied as err:
733 return guess_it(fallback=err)
734 else:
735 if not exe:
736 # underlying implementation can legitimately return an
737 # empty string; if that's the case we don't want to
738 # raise AD while guessing from the cmdline
739 try:
740 exe = guess_it(fallback=exe)
741 except AccessDenied:
742 pass
743 self._exe = exe
744 return self._exe
745
746 def cmdline(self):
747 """The command line this process has been called with."""
748 return self._proc.cmdline()
749
750 def status(self):
751 """The process current status as a STATUS_* constant."""
752 try:
753 return self._proc.status()
754 except ZombieProcess:
755 return STATUS_ZOMBIE
756
757 def username(self):
758 """The name of the user that owns the process.
759 On UNIX this is calculated by using *real* process uid.
760 """
761 if POSIX:
762 if pwd is None:
763 # might happen if python was installed from sources
764 msg = "requires pwd module shipped with standard python"
765 raise ImportError(msg)
766 real_uid = self.uids().real
767 try:
768 return pwd.getpwuid(real_uid).pw_name
769 except KeyError:
770 # the uid can't be resolved by the system
771 return str(real_uid)
772 else:
773 return self._proc.username()
774
775 def create_time(self):
776 """The process creation time as a floating point number
777 expressed in seconds since the epoch (seconds since January 1,
778 1970, at midnight UTC). The return value, which is cached after
779 first call, is based on the system clock, which means it may be
780 affected by changes such as manual adjustments or time
781 synchronization (e.g. NTP).
782 """
783 if self._create_time is None:
784 self._create_time = self._proc.create_time()
785 return self._create_time
786
787 def cwd(self):
788 """Process current working directory as an absolute path."""
789 return self._proc.cwd()
790
791 def nice(self, value=None):
792 """Get or set process niceness (priority)."""
793 if value is None:
794 return self._proc.nice_get()
795 else:
796 self._raise_if_pid_reused()
797 self._proc.nice_set(value)
798
799 if POSIX:
800
801 @memoize_when_activated
802 def uids(self):
803 """Return process UIDs as a (real, effective, saved)
804 namedtuple.
805 """
806 return self._proc.uids()
807
808 def gids(self):
809 """Return process GIDs as a (real, effective, saved)
810 namedtuple.
811 """
812 return self._proc.gids()
813
814 def terminal(self):
815 """The terminal associated with this process, if any,
816 else None.
817 """
818 return self._proc.terminal()
819
820 def num_fds(self):
821 """Return the number of file descriptors opened by this
822 process (POSIX only).
823 """
824 return self._proc.num_fds()
825
826 # Linux, BSD, AIX and Windows only
827 if hasattr(_psplatform.Process, "io_counters"):
828
829 def io_counters(self):
830 """Return process I/O statistics as a
831 (read_count, write_count, read_bytes, write_bytes)
832 namedtuple.
833 Those are the number of read/write calls performed and the
834 amount of bytes read and written by the process.
835 """
836 return self._proc.io_counters()
837
838 # Linux and Windows
839 if hasattr(_psplatform.Process, "ionice_get"):
840
841 def ionice(self, ioclass=None, value=None):
842 """Get or set process I/O niceness (priority).
843
844 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
845 *value* is a number which goes from 0 to 7. The higher the
846 value, the lower the I/O priority of the process.
847
848 On Windows only *ioclass* is used and it can be set to 2
849 (normal), 1 (low) or 0 (very low).
850
851 Available on Linux and Windows > Vista only.
852 """
853 if ioclass is None:
854 if value is not None:
855 msg = "'ioclass' argument must be specified"
856 raise ValueError(msg)
857 return self._proc.ionice_get()
858 else:
859 self._raise_if_pid_reused()
860 return self._proc.ionice_set(ioclass, value)
861
862 # Linux / FreeBSD only
863 if hasattr(_psplatform.Process, "rlimit"):
864
865 def rlimit(self, resource, limits=None):
866 """Get or set process resource limits as a (soft, hard)
867 tuple.
868
869 *resource* is one of the RLIMIT_* constants.
870 *limits* is supposed to be a (soft, hard) tuple.
871
872 See "man prlimit" for further info.
873 Available on Linux and FreeBSD only.
874 """
875 if limits is not None:
876 self._raise_if_pid_reused()
877 return self._proc.rlimit(resource, limits)
878
879 # Windows, Linux and FreeBSD only
880 if hasattr(_psplatform.Process, "cpu_affinity_get"):
881
882 def cpu_affinity(self, cpus=None):
883 """Get or set process CPU affinity.
884 If specified, *cpus* must be a list of CPUs for which you
885 want to set the affinity (e.g. [0, 1]).
886 If an empty list is passed, all egible CPUs are assumed
887 (and set).
888 (Windows, Linux and BSD only).
889 """
890 if cpus is None:
891 return sorted(set(self._proc.cpu_affinity_get()))
892 else:
893 self._raise_if_pid_reused()
894 if not cpus:
895 if hasattr(self._proc, "_get_eligible_cpus"):
896 cpus = self._proc._get_eligible_cpus()
897 else:
898 cpus = tuple(range(len(cpu_times(percpu=True))))
899 self._proc.cpu_affinity_set(list(set(cpus)))
900
901 # Linux, FreeBSD, SunOS
902 if hasattr(_psplatform.Process, "cpu_num"):
903
904 def cpu_num(self):
905 """Return what CPU this process is currently running on.
906 The returned number should be <= psutil.cpu_count()
907 and <= len(psutil.cpu_percent(percpu=True)).
908 It may be used in conjunction with
909 psutil.cpu_percent(percpu=True) to observe the system
910 workload distributed across CPUs.
911 """
912 return self._proc.cpu_num()
913
914 # All platforms has it, but maybe not in the future.
915 if hasattr(_psplatform.Process, "environ"):
916
917 def environ(self):
918 """The environment variables of the process as a dict. Note: this
919 might not reflect changes made after the process started.
920 """
921 return self._proc.environ()
922
923 if WINDOWS:
924
925 def num_handles(self):
926 """Return the number of handles opened by this process
927 (Windows only).
928 """
929 return self._proc.num_handles()
930
931 def num_ctx_switches(self):
932 """Return the number of voluntary and involuntary context
933 switches performed by this process.
934 """
935 return self._proc.num_ctx_switches()
936
937 def num_threads(self):
938 """Return the number of threads used by this process."""
939 return self._proc.num_threads()
940
941 if hasattr(_psplatform.Process, "threads"):
942
943 def threads(self):
944 """Return threads opened by process as a list of
945 (id, user_time, system_time) namedtuples representing
946 thread id and thread CPU times (user/system).
947 On OpenBSD this method requires root access.
948 """
949 return self._proc.threads()
950
951 def children(self, recursive=False):
952 """Return the children of this process as a list of Process
953 instances, pre-emptively checking whether PID has been reused.
954 If *recursive* is True return all the parent descendants.
955
956 Example (A == this process):
957
958 A ─┐
959 │
960 ├─ B (child) ─┐
961 │ └─ X (grandchild) ─┐
962 │ └─ Y (great grandchild)
963 ├─ C (child)
964 └─ D (child)
965
966 >>> import psutil
967 >>> p = psutil.Process()
968 >>> p.children()
969 B, C, D
970 >>> p.children(recursive=True)
971 B, X, Y, C, D
972
973 Note that in the example above if process X disappears
974 process Y won't be listed as the reference to process A
975 is lost.
976 """
977 self._raise_if_pid_reused()
978 ppid_map = _ppid_map()
979 # Get a fresh (non-cached) ctime in case the system clock was
980 # updated. TODO: use a monotonic ctime on platforms where it's
981 # supported.
982 proc_ctime = Process(self.pid).create_time()
983 ret = []
984 if not recursive:
985 for pid, ppid in ppid_map.items():
986 if ppid == self.pid:
987 try:
988 child = Process(pid)
989 # if child happens to be older than its parent
990 # (self) it means child's PID has been reused
991 if proc_ctime <= child.create_time():
992 ret.append(child)
993 except (NoSuchProcess, ZombieProcess):
994 pass
995 else:
996 # Construct a {pid: [child pids]} dict
997 reverse_ppid_map = collections.defaultdict(list)
998 for pid, ppid in ppid_map.items():
999 reverse_ppid_map[ppid].append(pid)
1000 # Recursively traverse that dict, starting from self.pid,
1001 # such that we only call Process() on actual children
1002 seen = set()
1003 stack = [self.pid]
1004 while stack:
1005 pid = stack.pop()
1006 if pid in seen:
1007 # Since pids can be reused while the ppid_map is
1008 # constructed, there may be rare instances where
1009 # there's a cycle in the recorded process "tree".
1010 continue
1011 seen.add(pid)
1012 for child_pid in reverse_ppid_map[pid]:
1013 try:
1014 child = Process(child_pid)
1015 # if child happens to be older than its parent
1016 # (self) it means child's PID has been reused
1017 intime = proc_ctime <= child.create_time()
1018 if intime:
1019 ret.append(child)
1020 stack.append(child_pid)
1021 except (NoSuchProcess, ZombieProcess):
1022 pass
1023 return ret
1024
1025 def cpu_percent(self, interval=None):
1026 """Return a float representing the current process CPU
1027 utilization as a percentage.
1028
1029 When *interval* is 0.0 or None (default) compares process times
1030 to system CPU times elapsed since last call, returning
1031 immediately (non-blocking). That means that the first time
1032 this is called it will return a meaningful 0.0 value.
1033
1034 When *interval* is > 0.0 compares process times to system CPU
1035 times elapsed before and after the interval (blocking).
1036
1037 In this case is recommended for accuracy that this function
1038 be called with at least 0.1 seconds between calls.
1039
1040 A value > 100.0 can be returned in case of processes running
1041 multiple threads on different CPU cores.
1042
1043 The returned value is explicitly NOT split evenly between
1044 all available logical CPUs. This means that a busy loop process
1045 running on a system with 2 logical CPUs will be reported as
1046 having 100% CPU utilization instead of 50%.
1047
1048 Examples:
1049
1050 >>> import psutil
1051 >>> p = psutil.Process(os.getpid())
1052 >>> # blocking
1053 >>> p.cpu_percent(interval=1)
1054 2.0
1055 >>> # non-blocking (percentage since last call)
1056 >>> p.cpu_percent(interval=None)
1057 2.9
1058 >>>
1059 """
1060 blocking = interval is not None and interval > 0.0
1061 if interval is not None and interval < 0:
1062 msg = f"interval is not positive (got {interval!r})"
1063 raise ValueError(msg)
1064 num_cpus = cpu_count() or 1
1065
1066 def timer():
1067 return _timer() * num_cpus
1068
1069 if blocking:
1070 st1 = timer()
1071 pt1 = self._proc.cpu_times()
1072 time.sleep(interval)
1073 st2 = timer()
1074 pt2 = self._proc.cpu_times()
1075 else:
1076 st1 = self._last_sys_cpu_times
1077 pt1 = self._last_proc_cpu_times
1078 st2 = timer()
1079 pt2 = self._proc.cpu_times()
1080 if st1 is None or pt1 is None:
1081 self._last_sys_cpu_times = st2
1082 self._last_proc_cpu_times = pt2
1083 return 0.0
1084
1085 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1086 delta_time = st2 - st1
1087 # reset values for next call in case of interval == None
1088 self._last_sys_cpu_times = st2
1089 self._last_proc_cpu_times = pt2
1090
1091 try:
1092 # This is the utilization split evenly between all CPUs.
1093 # E.g. a busy loop process on a 2-CPU-cores system at this
1094 # point is reported as 50% instead of 100%.
1095 overall_cpus_percent = (delta_proc / delta_time) * 100
1096 except ZeroDivisionError:
1097 # interval was too low
1098 return 0.0
1099 else:
1100 # Note 1:
1101 # in order to emulate "top" we multiply the value for the num
1102 # of CPU cores. This way the busy process will be reported as
1103 # having 100% (or more) usage.
1104 #
1105 # Note 2:
1106 # taskmgr.exe on Windows differs in that it will show 50%
1107 # instead.
1108 #
1109 # Note 3:
1110 # a percentage > 100 is legitimate as it can result from a
1111 # process with multiple threads running on different CPU
1112 # cores (top does the same), see:
1113 # http://stackoverflow.com/questions/1032357
1114 # https://github.com/giampaolo/psutil/issues/474
1115 single_cpu_percent = overall_cpus_percent * num_cpus
1116 return round(single_cpu_percent, 1)
1117
1118 @memoize_when_activated
1119 def cpu_times(self):
1120 """Return a (user, system, children_user, children_system)
1121 namedtuple representing the accumulated process time, in
1122 seconds.
1123 This is similar to os.times() but per-process.
1124 On macOS and Windows children_user and children_system are
1125 always set to 0.
1126 """
1127 return self._proc.cpu_times()
1128
1129 @memoize_when_activated
1130 def memory_info(self):
1131 """Return a namedtuple with variable fields depending on the
1132 platform, representing memory information about the process.
1133
1134 The "portable" fields available on all platforms are `rss` and `vms`.
1135
1136 All numbers are expressed in bytes.
1137 """
1138 return self._proc.memory_info()
1139
1140 def memory_full_info(self):
1141 """This method returns the same information as memory_info(),
1142 plus, on some platform (Linux, macOS, Windows), also provides
1143 additional metrics (USS, PSS and swap).
1144 The additional metrics provide a better representation of actual
1145 process memory usage.
1146
1147 Namely USS is the memory which is unique to a process and which
1148 would be freed if the process was terminated right now.
1149
1150 It does so by passing through the whole process address.
1151 As such it usually requires higher user privileges than
1152 memory_info() and is considerably slower.
1153 """
1154 return self._proc.memory_full_info()
1155
1156 def memory_percent(self, memtype="rss"):
1157 """Compare process memory to total physical system memory and
1158 calculate process memory utilization as a percentage.
1159 *memtype* argument is a string that dictates what type of
1160 process memory you want to compare against (defaults to "rss").
1161 The list of available strings can be obtained like this:
1162
1163 >>> psutil.Process().memory_info()._fields
1164 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1165 """
1166 valid_types = list(_ntp.pfullmem._fields)
1167 if memtype not in valid_types:
1168 msg = (
1169 f"invalid memtype {memtype!r}; valid types are"
1170 f" {tuple(valid_types)!r}"
1171 )
1172 raise ValueError(msg)
1173 fun = (
1174 self.memory_info
1175 if memtype in _ntp.pmem._fields
1176 else self.memory_full_info
1177 )
1178 metrics = fun()
1179 value = getattr(metrics, memtype)
1180
1181 # use cached value if available
1182 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1183 if not total_phymem > 0:
1184 # we should never get here
1185 msg = (
1186 "can't calculate process memory percent because total physical"
1187 f" system memory is not positive ({total_phymem!r})"
1188 )
1189 raise ValueError(msg)
1190 return (value / float(total_phymem)) * 100
1191
1192 if hasattr(_psplatform.Process, "memory_maps"):
1193
1194 def memory_maps(self, grouped=True):
1195 """Return process' mapped memory regions as a list of namedtuples
1196 whose fields are variable depending on the platform.
1197
1198 If *grouped* is True the mapped regions with the same 'path'
1199 are grouped together and the different memory fields are summed.
1200
1201 If *grouped* is False every mapped region is shown as a single
1202 entity and the namedtuple will also include the mapped region's
1203 address space ('addr') and permission set ('perms').
1204 """
1205 it = self._proc.memory_maps()
1206 if grouped:
1207 d = {}
1208 for tupl in it:
1209 path = tupl[2]
1210 nums = tupl[3:]
1211 try:
1212 d[path] = list(map(lambda x, y: x + y, d[path], nums))
1213 except KeyError:
1214 d[path] = nums
1215 return [_ntp.pmmap_grouped(path, *d[path]) for path in d]
1216 else:
1217 return [_ntp.pmmap_ext(*x) for x in it]
1218
1219 def open_files(self):
1220 """Return files opened by process as a list of
1221 (path, fd) namedtuples including the absolute file name
1222 and file descriptor number.
1223 """
1224 return self._proc.open_files()
1225
1226 def net_connections(self, kind='inet'):
1227 """Return socket connections opened by process as a list of
1228 (fd, family, type, laddr, raddr, status) namedtuples.
1229 The *kind* parameter filters for connections that match the
1230 following criteria:
1231
1232 +------------+----------------------------------------------------+
1233 | Kind Value | Connections using |
1234 +------------+----------------------------------------------------+
1235 | inet | IPv4 and IPv6 |
1236 | inet4 | IPv4 |
1237 | inet6 | IPv6 |
1238 | tcp | TCP |
1239 | tcp4 | TCP over IPv4 |
1240 | tcp6 | TCP over IPv6 |
1241 | udp | UDP |
1242 | udp4 | UDP over IPv4 |
1243 | udp6 | UDP over IPv6 |
1244 | unix | UNIX socket (both UDP and TCP protocols) |
1245 | all | the sum of all the possible families and protocols |
1246 +------------+----------------------------------------------------+
1247 """
1248 _check_conn_kind(kind)
1249 return self._proc.net_connections(kind)
1250
1251 @_common.deprecated_method(replacement="net_connections")
1252 def connections(self, kind="inet"):
1253 return self.net_connections(kind=kind)
1254
1255 # --- signals
1256
1257 if POSIX:
1258
1259 def _send_signal(self, sig):
1260 assert not self.pid < 0, self.pid
1261 self._raise_if_pid_reused()
1262
1263 pid, ppid, name = self.pid, self._ppid, self._name
1264 if pid == 0:
1265 # see "man 2 kill"
1266 msg = (
1267 "preventing sending signal to process with PID 0 as it "
1268 "would affect every process in the process group of the "
1269 "calling process (os.getpid()) instead of PID 0"
1270 )
1271 raise ValueError(msg)
1272 try:
1273 os.kill(pid, sig)
1274 except ProcessLookupError as err:
1275 if OPENBSD and pid_exists(pid):
1276 # We do this because os.kill() lies in case of
1277 # zombie processes.
1278 raise ZombieProcess(pid, name, ppid) from err
1279 self._gone = True
1280 raise NoSuchProcess(pid, name) from err
1281 except PermissionError as err:
1282 raise AccessDenied(pid, name) from err
1283
1284 def send_signal(self, sig):
1285 """Send a signal *sig* to process pre-emptively checking
1286 whether PID has been reused (see signal module constants) .
1287 On Windows only SIGTERM is valid and is treated as an alias
1288 for kill().
1289 """
1290 if POSIX:
1291 self._send_signal(sig)
1292 else: # pragma: no cover
1293 self._raise_if_pid_reused()
1294 if sig != signal.SIGTERM and not self.is_running():
1295 msg = "process no longer exists"
1296 raise NoSuchProcess(self.pid, self._name, msg=msg)
1297 self._proc.send_signal(sig)
1298
1299 def suspend(self):
1300 """Suspend process execution with SIGSTOP pre-emptively checking
1301 whether PID has been reused.
1302 On Windows this has the effect of suspending all process threads.
1303 """
1304 if POSIX:
1305 self._send_signal(signal.SIGSTOP)
1306 else: # pragma: no cover
1307 self._raise_if_pid_reused()
1308 self._proc.suspend()
1309
1310 def resume(self):
1311 """Resume process execution with SIGCONT pre-emptively checking
1312 whether PID has been reused.
1313 On Windows this has the effect of resuming all process threads.
1314 """
1315 if POSIX:
1316 self._send_signal(signal.SIGCONT)
1317 else: # pragma: no cover
1318 self._raise_if_pid_reused()
1319 self._proc.resume()
1320
1321 def terminate(self):
1322 """Terminate the process with SIGTERM pre-emptively checking
1323 whether PID has been reused.
1324 On Windows this is an alias for kill().
1325 """
1326 if POSIX:
1327 self._send_signal(signal.SIGTERM)
1328 else: # pragma: no cover
1329 self._raise_if_pid_reused()
1330 self._proc.kill()
1331
1332 def kill(self):
1333 """Kill the current process with SIGKILL pre-emptively checking
1334 whether PID has been reused.
1335 """
1336 if POSIX:
1337 self._send_signal(signal.SIGKILL)
1338 else: # pragma: no cover
1339 self._raise_if_pid_reused()
1340 self._proc.kill()
1341
1342 def wait(self, timeout=None):
1343 """Wait for process to terminate and, if process is a children
1344 of os.getpid(), also return its exit code, else None.
1345 On Windows there's no such limitation (exit code is always
1346 returned).
1347
1348 If the process is already terminated immediately return None
1349 instead of raising NoSuchProcess.
1350
1351 If *timeout* (in seconds) is specified and process is still
1352 alive raise TimeoutExpired.
1353
1354 To wait for multiple Process(es) use psutil.wait_procs().
1355 """
1356 if timeout is not None and not timeout >= 0:
1357 msg = "timeout must be a positive integer"
1358 raise ValueError(msg)
1359 if self._exitcode is not _SENTINEL:
1360 return self._exitcode
1361 self._exitcode = self._proc.wait(timeout)
1362 return self._exitcode
1363
1364
1365# The valid attr names which can be processed by Process.as_dict().
1366# fmt: off
1367_as_dict_attrnames = {
1368 x for x in dir(Process) if not x.startswith("_") and x not in
1369 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1370 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1371 'connections', 'oneshot'}
1372}
1373# fmt: on
1374
1375
1376# =====================================================================
1377# --- Popen class
1378# =====================================================================
1379
1380
1381class Popen(Process):
1382 """Same as subprocess.Popen, but in addition it provides all
1383 psutil.Process methods in a single class.
1384 For the following methods which are common to both classes, psutil
1385 implementation takes precedence:
1386
1387 * send_signal()
1388 * terminate()
1389 * kill()
1390
1391 This is done in order to avoid killing another process in case its
1392 PID has been reused, fixing BPO-6973.
1393
1394 >>> import psutil
1395 >>> from subprocess import PIPE
1396 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1397 >>> p.name()
1398 'python'
1399 >>> p.uids()
1400 user(real=1000, effective=1000, saved=1000)
1401 >>> p.username()
1402 'giampaolo'
1403 >>> p.communicate()
1404 ('hi', None)
1405 >>> p.terminate()
1406 >>> p.wait(timeout=2)
1407 0
1408 >>>
1409 """
1410
1411 def __init__(self, *args, **kwargs):
1412 # Explicitly avoid to raise NoSuchProcess in case the process
1413 # spawned by subprocess.Popen terminates too quickly, see:
1414 # https://github.com/giampaolo/psutil/issues/193
1415 self.__subproc = subprocess.Popen(*args, **kwargs)
1416 self._init(self.__subproc.pid, _ignore_nsp=True)
1417
1418 def __dir__(self):
1419 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1420
1421 def __enter__(self):
1422 if hasattr(self.__subproc, '__enter__'):
1423 self.__subproc.__enter__()
1424 return self
1425
1426 def __exit__(self, *args, **kwargs):
1427 if hasattr(self.__subproc, '__exit__'):
1428 return self.__subproc.__exit__(*args, **kwargs)
1429 else:
1430 if self.stdout:
1431 self.stdout.close()
1432 if self.stderr:
1433 self.stderr.close()
1434 try:
1435 # Flushing a BufferedWriter may raise an error.
1436 if self.stdin:
1437 self.stdin.close()
1438 finally:
1439 # Wait for the process to terminate, to avoid zombies.
1440 self.wait()
1441
1442 def __getattribute__(self, name):
1443 try:
1444 return object.__getattribute__(self, name)
1445 except AttributeError:
1446 try:
1447 return object.__getattribute__(self.__subproc, name)
1448 except AttributeError:
1449 msg = f"{self.__class__!r} has no attribute {name!r}"
1450 raise AttributeError(msg) from None
1451
1452 def wait(self, timeout=None):
1453 if self.__subproc.returncode is not None:
1454 return self.__subproc.returncode
1455 ret = super().wait(timeout)
1456 self.__subproc.returncode = ret
1457 return ret
1458
1459
1460# =====================================================================
1461# --- system processes related functions
1462# =====================================================================
1463
1464
1465def pids():
1466 """Return a list of current running PIDs."""
1467 global _LOWEST_PID
1468 ret = sorted(_psplatform.pids())
1469 _LOWEST_PID = ret[0]
1470 return ret
1471
1472
1473def pid_exists(pid):
1474 """Return True if given PID exists in the current process list.
1475 This is faster than doing "pid in psutil.pids()" and
1476 should be preferred.
1477 """
1478 if pid < 0:
1479 return False
1480 elif pid == 0 and POSIX:
1481 # On POSIX we use os.kill() to determine PID existence.
1482 # According to "man 2 kill" PID 0 has a special meaning
1483 # though: it refers to <<every process in the process
1484 # group of the calling process>> and that is not we want
1485 # to do here.
1486 return pid in pids()
1487 else:
1488 return _psplatform.pid_exists(pid)
1489
1490
1491_pmap = {}
1492_pids_reused = set()
1493
1494
1495def process_iter(attrs=None, ad_value=None):
1496 """Return a generator yielding a Process instance for all
1497 running processes.
1498
1499 Every new Process instance is only created once and then cached
1500 into an internal table which is updated every time this is used.
1501 Cache can optionally be cleared via `process_iter.cache_clear()`.
1502
1503 The sorting order in which processes are yielded is based on
1504 their PIDs.
1505
1506 *attrs* and *ad_value* have the same meaning as in
1507 Process.as_dict(). If *attrs* is specified as_dict() is called
1508 and the resulting dict is stored as a 'info' attribute attached
1509 to returned Process instance.
1510 If *attrs* is an empty list it will retrieve all process info
1511 (slow).
1512 """
1513 global _pmap
1514
1515 def add(pid):
1516 proc = Process(pid)
1517 pmap[proc.pid] = proc
1518 return proc
1519
1520 def remove(pid):
1521 pmap.pop(pid, None)
1522
1523 pmap = _pmap.copy()
1524 a = set(pids())
1525 b = set(pmap.keys())
1526 new_pids = a - b
1527 gone_pids = b - a
1528 for pid in gone_pids:
1529 remove(pid)
1530 while _pids_reused:
1531 pid = _pids_reused.pop()
1532 debug(f"refreshing Process instance for reused PID {pid}")
1533 remove(pid)
1534 try:
1535 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1536 for pid, proc in ls:
1537 try:
1538 if proc is None: # new process
1539 proc = add(pid)
1540 if attrs is not None:
1541 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1542 yield proc
1543 except NoSuchProcess:
1544 remove(pid)
1545 finally:
1546 _pmap = pmap
1547
1548
1549process_iter.cache_clear = lambda: _pmap.clear() # noqa: PLW0108
1550process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache."
1551
1552
1553def wait_procs(procs, timeout=None, callback=None):
1554 """Convenience function which waits for a list of processes to
1555 terminate.
1556
1557 Return a (gone, alive) tuple indicating which processes
1558 are gone and which ones are still alive.
1559
1560 The gone ones will have a new *returncode* attribute indicating
1561 process exit status (may be None).
1562
1563 *callback* is a function which gets called every time a process
1564 terminates (a Process instance is passed as callback argument).
1565
1566 Function will return as soon as all processes terminate or when
1567 *timeout* occurs.
1568 Differently from Process.wait() it will not raise TimeoutExpired if
1569 *timeout* occurs.
1570
1571 Typical use case is:
1572
1573 - send SIGTERM to a list of processes
1574 - give them some time to terminate
1575 - send SIGKILL to those ones which are still alive
1576
1577 Example:
1578
1579 >>> def on_terminate(proc):
1580 ... print("process {} terminated".format(proc))
1581 ...
1582 >>> for p in procs:
1583 ... p.terminate()
1584 ...
1585 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1586 >>> for p in alive:
1587 ... p.kill()
1588 """
1589
1590 def check_gone(proc, timeout):
1591 try:
1592 returncode = proc.wait(timeout=timeout)
1593 except (TimeoutExpired, subprocess.TimeoutExpired):
1594 pass
1595 else:
1596 if returncode is not None or not proc.is_running():
1597 # Set new Process instance attribute.
1598 proc.returncode = returncode
1599 gone.add(proc)
1600 if callback is not None:
1601 callback(proc)
1602
1603 if timeout is not None and not timeout >= 0:
1604 msg = f"timeout must be a positive integer, got {timeout}"
1605 raise ValueError(msg)
1606 gone = set()
1607 alive = set(procs)
1608 if callback is not None and not callable(callback):
1609 msg = f"callback {callback!r} is not a callable"
1610 raise TypeError(msg)
1611 if timeout is not None:
1612 deadline = _timer() + timeout
1613
1614 while alive:
1615 if timeout is not None and timeout <= 0:
1616 break
1617 for proc in alive:
1618 # Make sure that every complete iteration (all processes)
1619 # will last max 1 sec.
1620 # We do this because we don't want to wait too long on a
1621 # single process: in case it terminates too late other
1622 # processes may disappear in the meantime and their PID
1623 # reused.
1624 max_timeout = 1.0 / len(alive)
1625 if timeout is not None:
1626 timeout = min((deadline - _timer()), max_timeout)
1627 if timeout <= 0:
1628 break
1629 check_gone(proc, timeout)
1630 else:
1631 check_gone(proc, max_timeout)
1632 alive = alive - gone # noqa: PLR6104
1633
1634 if alive:
1635 # Last attempt over processes survived so far.
1636 # timeout == 0 won't make this function wait any further.
1637 for proc in alive:
1638 check_gone(proc, 0)
1639 alive = alive - gone # noqa: PLR6104
1640
1641 return (list(gone), list(alive))
1642
1643
1644# =====================================================================
1645# --- CPU related functions
1646# =====================================================================
1647
1648
1649def cpu_count(logical=True):
1650 """Return the number of logical CPUs in the system (same as
1651 os.cpu_count()).
1652
1653 If *logical* is False return the number of physical cores only
1654 (e.g. hyper thread CPUs are excluded).
1655
1656 Return None if undetermined.
1657
1658 The return value is cached after first call.
1659 If desired cache can be cleared like this:
1660
1661 >>> psutil.cpu_count.cache_clear()
1662 """
1663 if logical:
1664 ret = _psplatform.cpu_count_logical()
1665 else:
1666 ret = _psplatform.cpu_count_cores()
1667 if ret is not None and ret < 1:
1668 ret = None
1669 return ret
1670
1671
1672def cpu_times(percpu=False):
1673 """Return system-wide CPU times as a namedtuple.
1674 Every CPU time represents the seconds the CPU has spent in the
1675 given mode. The namedtuple's fields availability varies depending on the
1676 platform:
1677
1678 - user
1679 - system
1680 - idle
1681 - nice (UNIX)
1682 - iowait (Linux)
1683 - irq (Linux, FreeBSD)
1684 - softirq (Linux)
1685 - steal (Linux >= 2.6.11)
1686 - guest (Linux >= 2.6.24)
1687 - guest_nice (Linux >= 3.2.0)
1688
1689 When *percpu* is True return a list of namedtuples for each CPU.
1690 First element of the list refers to first CPU, second element
1691 to second CPU and so on.
1692 The order of the list is consistent across calls.
1693 """
1694 if not percpu:
1695 return _psplatform.cpu_times()
1696 else:
1697 return _psplatform.per_cpu_times()
1698
1699
1700try:
1701 _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1702except Exception: # noqa: BLE001
1703 # Don't want to crash at import time.
1704 _last_cpu_times = {}
1705
1706try:
1707 _last_per_cpu_times = {
1708 threading.current_thread().ident: cpu_times(percpu=True)
1709 }
1710except Exception: # noqa: BLE001
1711 # Don't want to crash at import time.
1712 _last_per_cpu_times = {}
1713
1714
1715def _cpu_tot_time(times):
1716 """Given a cpu_time() ntuple calculates the total CPU time
1717 (including idle time).
1718 """
1719 tot = sum(times)
1720 if LINUX:
1721 # On Linux guest times are already accounted in "user" or
1722 # "nice" times, so we subtract them from total.
1723 # Htop does the same. References:
1724 # https://github.com/giampaolo/psutil/pull/940
1725 # http://unix.stackexchange.com/questions/178045
1726 # https://github.com/torvalds/linux/blob/
1727 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1728 # cputime.c#L158
1729 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1730 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1731 return tot
1732
1733
1734def _cpu_busy_time(times):
1735 """Given a cpu_time() ntuple calculates the busy CPU time.
1736 We do so by subtracting all idle CPU times.
1737 """
1738 busy = _cpu_tot_time(times)
1739 busy -= times.idle
1740 # Linux: "iowait" is time during which the CPU does not do anything
1741 # (waits for IO to complete). On Linux IO wait is *not* accounted
1742 # in "idle" time so we subtract it. Htop does the same.
1743 # References:
1744 # https://github.com/torvalds/linux/blob/
1745 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1746 busy -= getattr(times, "iowait", 0)
1747 return busy
1748
1749
1750def _cpu_times_deltas(t1, t2):
1751 assert t1._fields == t2._fields, (t1, t2)
1752 field_deltas = []
1753 for field in _ntp.scputimes._fields:
1754 field_delta = getattr(t2, field) - getattr(t1, field)
1755 # CPU times are always supposed to increase over time
1756 # or at least remain the same and that's because time
1757 # cannot go backwards.
1758 # Surprisingly sometimes this might not be the case (at
1759 # least on Windows and Linux), see:
1760 # https://github.com/giampaolo/psutil/issues/392
1761 # https://github.com/giampaolo/psutil/issues/645
1762 # https://github.com/giampaolo/psutil/issues/1210
1763 # Trim negative deltas to zero to ignore decreasing fields.
1764 # top does the same. Reference:
1765 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1766 field_delta = max(0, field_delta)
1767 field_deltas.append(field_delta)
1768 return _ntp.scputimes(*field_deltas)
1769
1770
1771def cpu_percent(interval=None, percpu=False):
1772 """Return a float representing the current system-wide CPU
1773 utilization as a percentage.
1774
1775 When *interval* is > 0.0 compares system CPU times elapsed before
1776 and after the interval (blocking).
1777
1778 When *interval* is 0.0 or None compares system CPU times elapsed
1779 since last call or module import, returning immediately (non
1780 blocking). That means the first time this is called it will
1781 return a meaningless 0.0 value which you should ignore.
1782 In this case is recommended for accuracy that this function be
1783 called with at least 0.1 seconds between calls.
1784
1785 When *percpu* is True returns a list of floats representing the
1786 utilization as a percentage for each CPU.
1787 First element of the list refers to first CPU, second element
1788 to second CPU and so on.
1789 The order of the list is consistent across calls.
1790
1791 Examples:
1792
1793 >>> # blocking, system-wide
1794 >>> psutil.cpu_percent(interval=1)
1795 2.0
1796 >>>
1797 >>> # blocking, per-cpu
1798 >>> psutil.cpu_percent(interval=1, percpu=True)
1799 [2.0, 1.0]
1800 >>>
1801 >>> # non-blocking (percentage since last call)
1802 >>> psutil.cpu_percent(interval=None)
1803 2.9
1804 >>>
1805 """
1806 tid = threading.current_thread().ident
1807 blocking = interval is not None and interval > 0.0
1808 if interval is not None and interval < 0:
1809 msg = f"interval is not positive (got {interval})"
1810 raise ValueError(msg)
1811
1812 def calculate(t1, t2):
1813 times_delta = _cpu_times_deltas(t1, t2)
1814 all_delta = _cpu_tot_time(times_delta)
1815 busy_delta = _cpu_busy_time(times_delta)
1816
1817 try:
1818 busy_perc = (busy_delta / all_delta) * 100
1819 except ZeroDivisionError:
1820 return 0.0
1821 else:
1822 return round(busy_perc, 1)
1823
1824 # system-wide usage
1825 if not percpu:
1826 if blocking:
1827 t1 = cpu_times()
1828 time.sleep(interval)
1829 else:
1830 t1 = _last_cpu_times.get(tid) or cpu_times()
1831 _last_cpu_times[tid] = cpu_times()
1832 return calculate(t1, _last_cpu_times[tid])
1833 # per-cpu usage
1834 else:
1835 ret = []
1836 if blocking:
1837 tot1 = cpu_times(percpu=True)
1838 time.sleep(interval)
1839 else:
1840 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1841 _last_per_cpu_times[tid] = cpu_times(percpu=True)
1842 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1843 ret.append(calculate(t1, t2))
1844 return ret
1845
1846
1847# Use a separate dict for cpu_times_percent(), so it's independent from
1848# cpu_percent() and they can both be used within the same program.
1849_last_cpu_times_2 = _last_cpu_times.copy()
1850_last_per_cpu_times_2 = _last_per_cpu_times.copy()
1851
1852
1853def cpu_times_percent(interval=None, percpu=False):
1854 """Same as cpu_percent() but provides utilization percentages
1855 for each specific CPU time as is returned by cpu_times().
1856 For instance, on Linux we'll get:
1857
1858 >>> cpu_times_percent()
1859 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1860 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1861 >>>
1862
1863 *interval* and *percpu* arguments have the same meaning as in
1864 cpu_percent().
1865 """
1866 tid = threading.current_thread().ident
1867 blocking = interval is not None and interval > 0.0
1868 if interval is not None and interval < 0:
1869 msg = f"interval is not positive (got {interval!r})"
1870 raise ValueError(msg)
1871
1872 def calculate(t1, t2):
1873 nums = []
1874 times_delta = _cpu_times_deltas(t1, t2)
1875 all_delta = _cpu_tot_time(times_delta)
1876 # "scale" is the value to multiply each delta with to get percentages.
1877 # We use "max" to avoid division by zero (if all_delta is 0, then all
1878 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1879 # fraction because cpu times are integers)
1880 scale = 100.0 / max(1, all_delta)
1881 for field_delta in times_delta:
1882 field_perc = field_delta * scale
1883 field_perc = round(field_perc, 1)
1884 # make sure we don't return negative values or values over 100%
1885 field_perc = min(max(0.0, field_perc), 100.0)
1886 nums.append(field_perc)
1887 return _ntp.scputimes(*nums)
1888
1889 # system-wide usage
1890 if not percpu:
1891 if blocking:
1892 t1 = cpu_times()
1893 time.sleep(interval)
1894 else:
1895 t1 = _last_cpu_times_2.get(tid) or cpu_times()
1896 _last_cpu_times_2[tid] = cpu_times()
1897 return calculate(t1, _last_cpu_times_2[tid])
1898 # per-cpu usage
1899 else:
1900 ret = []
1901 if blocking:
1902 tot1 = cpu_times(percpu=True)
1903 time.sleep(interval)
1904 else:
1905 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1906 _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1907 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1908 ret.append(calculate(t1, t2))
1909 return ret
1910
1911
1912def cpu_stats():
1913 """Return CPU statistics."""
1914 return _psplatform.cpu_stats()
1915
1916
1917if hasattr(_psplatform, "cpu_freq"):
1918
1919 def cpu_freq(percpu=False):
1920 """Return CPU frequency as a namedtuple including current,
1921 min and max frequency expressed in Mhz.
1922
1923 If *percpu* is True and the system supports per-cpu frequency
1924 retrieval (Linux only) a list of frequencies is returned for
1925 each CPU. If not a list with one element is returned.
1926 """
1927 ret = _psplatform.cpu_freq()
1928 if percpu:
1929 return ret
1930 else:
1931 num_cpus = float(len(ret))
1932 if num_cpus == 0:
1933 return None
1934 elif num_cpus == 1:
1935 return ret[0]
1936 else:
1937 currs, mins, maxs = 0.0, 0.0, 0.0
1938 set_none = False
1939 for cpu in ret:
1940 currs += cpu.current
1941 # On Linux if /proc/cpuinfo is used min/max are set
1942 # to None.
1943 if LINUX and cpu.min is None:
1944 set_none = True
1945 continue
1946 mins += cpu.min
1947 maxs += cpu.max
1948
1949 current = currs / num_cpus
1950
1951 if set_none:
1952 min_ = max_ = None
1953 else:
1954 min_ = mins / num_cpus
1955 max_ = maxs / num_cpus
1956
1957 return _ntp.scpufreq(current, min_, max_)
1958
1959 __all__.append("cpu_freq")
1960
1961
1962if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1963 # Perform this hasattr check once on import time to either use the
1964 # platform based code or proxy straight from the os module.
1965 if hasattr(os, "getloadavg"):
1966 getloadavg = os.getloadavg
1967 else:
1968 getloadavg = _psplatform.getloadavg
1969
1970 __all__.append("getloadavg")
1971
1972
1973# =====================================================================
1974# --- system memory related functions
1975# =====================================================================
1976
1977
1978def virtual_memory():
1979 """Return statistics about system memory usage as a namedtuple
1980 including the following fields, expressed in bytes:
1981
1982 - total:
1983 total physical memory available.
1984
1985 - available:
1986 the memory that can be given instantly to processes without the
1987 system going into swap.
1988 This is calculated by summing different memory values depending
1989 on the platform and it is supposed to be used to monitor actual
1990 memory usage in a cross platform fashion.
1991
1992 - percent:
1993 the percentage usage calculated as (total - available) / total * 100
1994
1995 - used:
1996 memory used, calculated differently depending on the platform and
1997 designed for informational purposes only:
1998 macOS: active + wired
1999 BSD: active + wired + cached
2000 Linux: total - free
2001
2002 - free:
2003 memory not being used at all (zeroed) that is readily available;
2004 note that this doesn't reflect the actual memory available
2005 (use 'available' instead)
2006
2007 Platform-specific fields:
2008
2009 - active (UNIX):
2010 memory currently in use or very recently used, and so it is in RAM.
2011
2012 - inactive (UNIX):
2013 memory that is marked as not used.
2014
2015 - buffers (BSD, Linux):
2016 cache for things like file system metadata.
2017
2018 - cached (BSD, macOS):
2019 cache for various things.
2020
2021 - wired (macOS, BSD):
2022 memory that is marked to always stay in RAM. It is never moved to disk.
2023
2024 - shared (BSD):
2025 memory that may be simultaneously accessed by multiple processes.
2026
2027 The sum of 'used' and 'available' does not necessarily equal total.
2028 On Windows 'available' and 'free' are the same.
2029 """
2030 global _TOTAL_PHYMEM
2031 ret = _psplatform.virtual_memory()
2032 # cached for later use in Process.memory_percent()
2033 _TOTAL_PHYMEM = ret.total
2034 return ret
2035
2036
2037def swap_memory():
2038 """Return system swap memory statistics as a namedtuple including
2039 the following fields:
2040
2041 - total: total swap memory in bytes
2042 - used: used swap memory in bytes
2043 - free: free swap memory in bytes
2044 - percent: the percentage usage
2045 - sin: no. of bytes the system has swapped in from disk (cumulative)
2046 - sout: no. of bytes the system has swapped out from disk (cumulative)
2047
2048 'sin' and 'sout' on Windows are meaningless and always set to 0.
2049 """
2050 return _psplatform.swap_memory()
2051
2052
2053# =====================================================================
2054# --- disks/partitions related functions
2055# =====================================================================
2056
2057
2058def disk_usage(path):
2059 """Return disk usage statistics about the given *path* as a
2060 namedtuple including total, used and free space expressed in bytes
2061 plus the percentage usage.
2062 """
2063 return _psplatform.disk_usage(path)
2064
2065
2066def disk_partitions(all=False):
2067 """Return mounted partitions as a list of
2068 (device, mountpoint, fstype, opts) namedtuple.
2069 'opts' field is a raw string separated by commas indicating mount
2070 options which may vary depending on the platform.
2071
2072 If *all* parameter is False return physical devices only and ignore
2073 all others.
2074 """
2075 return _psplatform.disk_partitions(all)
2076
2077
2078def disk_io_counters(perdisk=False, nowrap=True):
2079 """Return system disk I/O statistics as a namedtuple including
2080 the following fields:
2081
2082 - read_count: number of reads
2083 - write_count: number of writes
2084 - read_bytes: number of bytes read
2085 - write_bytes: number of bytes written
2086 - read_time: time spent reading from disk (in ms)
2087 - write_time: time spent writing to disk (in ms)
2088
2089 Platform specific:
2090
2091 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2092 - read_merged_count (Linux): number of merged reads
2093 - write_merged_count (Linux): number of merged writes
2094
2095 If *perdisk* is True return the same information for every
2096 physical disk installed on the system as a dictionary
2097 with partition names as the keys and the namedtuple
2098 described above as the values.
2099
2100 If *nowrap* is True it detects and adjust the numbers which overflow
2101 and wrap (restart from 0) and add "old value" to "new value" so that
2102 the returned numbers will always be increasing or remain the same,
2103 but never decrease.
2104 "disk_io_counters.cache_clear()" can be used to invalidate the
2105 cache.
2106
2107 On recent Windows versions 'diskperf -y' command may need to be
2108 executed first otherwise this function won't find any disk.
2109 """
2110 kwargs = dict(perdisk=perdisk) if LINUX else {}
2111 rawdict = _psplatform.disk_io_counters(**kwargs)
2112 if not rawdict:
2113 return {} if perdisk else None
2114 if nowrap:
2115 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2116 if perdisk:
2117 for disk, fields in rawdict.items():
2118 rawdict[disk] = _ntp.sdiskio(*fields)
2119 return rawdict
2120 else:
2121 return _ntp.sdiskio(*(sum(x) for x in zip(*rawdict.values())))
2122
2123
2124disk_io_counters.cache_clear = functools.partial(
2125 _wrap_numbers.cache_clear, 'psutil.disk_io_counters'
2126)
2127disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2128
2129
2130# =====================================================================
2131# --- network related functions
2132# =====================================================================
2133
2134
2135def net_io_counters(pernic=False, nowrap=True):
2136 """Return network I/O statistics as a namedtuple including
2137 the following fields:
2138
2139 - bytes_sent: number of bytes sent
2140 - bytes_recv: number of bytes received
2141 - packets_sent: number of packets sent
2142 - packets_recv: number of packets received
2143 - errin: total number of errors while receiving
2144 - errout: total number of errors while sending
2145 - dropin: total number of incoming packets which were dropped
2146 - dropout: total number of outgoing packets which were dropped
2147 (always 0 on macOS and BSD)
2148
2149 If *pernic* is True return the same information for every
2150 network interface installed on the system as a dictionary
2151 with network interface names as the keys and the namedtuple
2152 described above as the values.
2153
2154 If *nowrap* is True it detects and adjust the numbers which overflow
2155 and wrap (restart from 0) and add "old value" to "new value" so that
2156 the returned numbers will always be increasing or remain the same,
2157 but never decrease.
2158 "net_io_counters.cache_clear()" can be used to invalidate the
2159 cache.
2160 """
2161 rawdict = _psplatform.net_io_counters()
2162 if not rawdict:
2163 return {} if pernic else None
2164 if nowrap:
2165 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2166 if pernic:
2167 for nic, fields in rawdict.items():
2168 rawdict[nic] = _ntp.snetio(*fields)
2169 return rawdict
2170 else:
2171 return _ntp.snetio(*[sum(x) for x in zip(*rawdict.values())])
2172
2173
2174net_io_counters.cache_clear = functools.partial(
2175 _wrap_numbers.cache_clear, 'psutil.net_io_counters'
2176)
2177net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2178
2179
2180def net_connections(kind='inet'):
2181 """Return system-wide socket connections as a list of
2182 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2183 In case of limited privileges 'fd' and 'pid' may be set to -1
2184 and None respectively.
2185 The *kind* parameter filters for connections that fit the
2186 following criteria:
2187
2188 +------------+----------------------------------------------------+
2189 | Kind Value | Connections using |
2190 +------------+----------------------------------------------------+
2191 | inet | IPv4 and IPv6 |
2192 | inet4 | IPv4 |
2193 | inet6 | IPv6 |
2194 | tcp | TCP |
2195 | tcp4 | TCP over IPv4 |
2196 | tcp6 | TCP over IPv6 |
2197 | udp | UDP |
2198 | udp4 | UDP over IPv4 |
2199 | udp6 | UDP over IPv6 |
2200 | unix | UNIX socket (both UDP and TCP protocols) |
2201 | all | the sum of all the possible families and protocols |
2202 +------------+----------------------------------------------------+
2203
2204 On macOS this function requires root privileges.
2205 """
2206 _check_conn_kind(kind)
2207 return _psplatform.net_connections(kind)
2208
2209
2210def net_if_addrs():
2211 """Return the addresses associated to each NIC (network interface
2212 card) installed on the system as a dictionary whose keys are the
2213 NIC names and value is a list of namedtuples for each address
2214 assigned to the NIC. Each namedtuple includes 5 fields:
2215
2216 - family: can be either socket.AF_INET, socket.AF_INET6 or
2217 psutil.AF_LINK, which refers to a MAC address.
2218 - address: is the primary address and it is always set.
2219 - netmask: and 'broadcast' and 'ptp' may be None.
2220 - ptp: stands for "point to point" and references the
2221 destination address on a point to point interface
2222 (typically a VPN).
2223 - broadcast: and *ptp* are mutually exclusive.
2224
2225 Note: you can have more than one address of the same family
2226 associated with each interface.
2227 """
2228 rawlist = _psplatform.net_if_addrs()
2229 rawlist.sort(key=lambda x: x[1]) # sort by family
2230 ret = collections.defaultdict(list)
2231 for name, fam, addr, mask, broadcast, ptp in rawlist:
2232 try:
2233 fam = socket.AddressFamily(fam)
2234 except ValueError:
2235 if WINDOWS and fam == -1:
2236 fam = _psplatform.AF_LINK
2237 elif (
2238 hasattr(_psplatform, "AF_LINK") and fam == _psplatform.AF_LINK
2239 ):
2240 # Linux defines AF_LINK as an alias for AF_PACKET.
2241 # We re-set the family here so that repr(family)
2242 # will show AF_LINK rather than AF_PACKET
2243 fam = _psplatform.AF_LINK
2244
2245 if fam == _psplatform.AF_LINK:
2246 # The underlying C function may return an incomplete MAC
2247 # address in which case we fill it with null bytes, see:
2248 # https://github.com/giampaolo/psutil/issues/786
2249 separator = ":" if POSIX else "-"
2250 while addr.count(separator) < 5:
2251 addr += f"{separator}00"
2252
2253 nt = _ntp.snicaddr(fam, addr, mask, broadcast, ptp)
2254
2255 # On Windows broadcast is None, so we determine it via
2256 # ipaddress module.
2257 if WINDOWS and fam in {socket.AF_INET, socket.AF_INET6}:
2258 try:
2259 broadcast = _common.broadcast_addr(nt)
2260 except Exception as err: # noqa: BLE001
2261 debug(err)
2262 else:
2263 if broadcast is not None:
2264 nt._replace(broadcast=broadcast)
2265
2266 ret[name].append(nt)
2267
2268 return dict(ret)
2269
2270
2271def net_if_stats():
2272 """Return information about each NIC (network interface card)
2273 installed on the system as a dictionary whose keys are the
2274 NIC names and value is a namedtuple with the following fields:
2275
2276 - isup: whether the interface is up (bool)
2277 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2278 NIC_DUPLEX_UNKNOWN
2279 - speed: the NIC speed expressed in mega bits (MB); if it can't
2280 be determined (e.g. 'localhost') it will be set to 0.
2281 - mtu: the maximum transmission unit expressed in bytes.
2282 """
2283 return _psplatform.net_if_stats()
2284
2285
2286# =====================================================================
2287# --- sensors
2288# =====================================================================
2289
2290
2291# Linux, macOS
2292if hasattr(_psplatform, "sensors_temperatures"):
2293
2294 def sensors_temperatures(fahrenheit=False):
2295 """Return hardware temperatures. Each entry is a namedtuple
2296 representing a certain hardware sensor (it may be a CPU, an
2297 hard disk or something else, depending on the OS and its
2298 configuration).
2299 All temperatures are expressed in celsius unless *fahrenheit*
2300 is set to True.
2301 """
2302
2303 def convert(n):
2304 if n is not None:
2305 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2306
2307 ret = collections.defaultdict(list)
2308 rawdict = _psplatform.sensors_temperatures()
2309
2310 for name, values in rawdict.items():
2311 while values:
2312 label, current, high, critical = values.pop(0)
2313 current = convert(current)
2314 high = convert(high)
2315 critical = convert(critical)
2316
2317 if high and not critical:
2318 critical = high
2319 elif critical and not high:
2320 high = critical
2321
2322 ret[name].append(_ntp.shwtemp(label, current, high, critical))
2323
2324 return dict(ret)
2325
2326 __all__.append("sensors_temperatures")
2327
2328
2329# Linux
2330if hasattr(_psplatform, "sensors_fans"):
2331
2332 def sensors_fans():
2333 """Return fans speed. Each entry is a namedtuple
2334 representing a certain hardware sensor.
2335 All speed are expressed in RPM (rounds per minute).
2336 """
2337 return _psplatform.sensors_fans()
2338
2339 __all__.append("sensors_fans")
2340
2341
2342# Linux, Windows, FreeBSD, macOS
2343if hasattr(_psplatform, "sensors_battery"):
2344
2345 def sensors_battery():
2346 """Return battery information. If no battery is installed
2347 returns None.
2348
2349 - percent: battery power left as a percentage.
2350 - secsleft: a rough approximation of how many seconds are left
2351 before the battery runs out of power. May be
2352 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2353 - power_plugged: True if the AC power cable is connected.
2354 """
2355 return _psplatform.sensors_battery()
2356
2357 __all__.append("sensors_battery")
2358
2359
2360# =====================================================================
2361# --- other system related functions
2362# =====================================================================
2363
2364
2365def boot_time():
2366 """Return the system boot time expressed in seconds since the epoch
2367 (seconds since January 1, 1970, at midnight UTC). The returned
2368 value is based on the system clock, which means it may be affected
2369 by changes such as manual adjustments or time synchronization (e.g.
2370 NTP).
2371 """
2372 return _psplatform.boot_time()
2373
2374
2375def users():
2376 """Return users currently connected on the system as a list of
2377 namedtuples including the following fields.
2378
2379 - user: the name of the user
2380 - terminal: the tty or pseudo-tty associated with the user, if any.
2381 - host: the host name associated with the entry, if any.
2382 - started: the creation time as a floating point number expressed in
2383 seconds since the epoch.
2384 """
2385 return _psplatform.users()
2386
2387
2388# =====================================================================
2389# --- Windows services
2390# =====================================================================
2391
2392
2393if WINDOWS:
2394
2395 def win_service_iter():
2396 """Return a generator yielding a WindowsService instance for all
2397 Windows services installed.
2398 """
2399 return _psplatform.win_service_iter()
2400
2401 def win_service_get(name):
2402 """Get a Windows service by *name*.
2403 Raise NoSuchProcess if no service with such name exists.
2404 """
2405 return _psplatform.win_service_get(name)
2406
2407
2408# =====================================================================
2409
2410
2411def _set_debug(value):
2412 """Enable or disable PSUTIL_DEBUG option, which prints debugging
2413 messages to stderr.
2414 """
2415 import psutil._common
2416
2417 psutil._common.PSUTIL_DEBUG = bool(value)
2418 _psplatform.cext.set_debug(bool(value))
2419
2420
2421del memoize_when_activated