1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""psutil is a cross-platform library for retrieving information on
6running processes and system utilization (CPU, memory, disks, network,
7sensors) in Python. Supported platforms:
8
9 - Linux
10 - Windows
11 - macOS
12 - FreeBSD
13 - OpenBSD
14 - NetBSD
15 - Sun Solaris
16 - AIX
17
18Supported Python versions are cPython 3.6+ and PyPy.
19"""
20
21import collections
22import contextlib
23import datetime
24import functools
25import os
26import signal
27import socket
28import subprocess
29import sys
30import threading
31import time
32
33try:
34 import pwd
35except ImportError:
36 pwd = None
37
38from . import _common
39from . import _ntuples as _ntp
40from ._common import AIX
41from ._common import BSD
42from ._common import CONN_CLOSE
43from ._common import CONN_CLOSE_WAIT
44from ._common import CONN_CLOSING
45from ._common import CONN_ESTABLISHED
46from ._common import CONN_FIN_WAIT1
47from ._common import CONN_FIN_WAIT2
48from ._common import CONN_LAST_ACK
49from ._common import CONN_LISTEN
50from ._common import CONN_NONE
51from ._common import CONN_SYN_RECV
52from ._common import CONN_SYN_SENT
53from ._common import CONN_TIME_WAIT
54from ._common import FREEBSD
55from ._common import LINUX
56from ._common import MACOS
57from ._common import NETBSD
58from ._common import NIC_DUPLEX_FULL
59from ._common import NIC_DUPLEX_HALF
60from ._common import NIC_DUPLEX_UNKNOWN
61from ._common import OPENBSD
62from ._common import OSX # deprecated alias
63from ._common import POSIX
64from ._common import POWER_TIME_UNKNOWN
65from ._common import POWER_TIME_UNLIMITED
66from ._common import STATUS_DEAD
67from ._common import STATUS_DISK_SLEEP
68from ._common import STATUS_IDLE
69from ._common import STATUS_LOCKED
70from ._common import STATUS_PARKED
71from ._common import STATUS_RUNNING
72from ._common import STATUS_SLEEPING
73from ._common import STATUS_STOPPED
74from ._common import STATUS_TRACING_STOP
75from ._common import STATUS_WAITING
76from ._common import STATUS_WAKING
77from ._common import STATUS_ZOMBIE
78from ._common import SUNOS
79from ._common import WINDOWS
80from ._common import AccessDenied
81from ._common import Error
82from ._common import NoSuchProcess
83from ._common import TimeoutExpired
84from ._common import ZombieProcess
85from ._common import debug
86from ._common import memoize_when_activated
87from ._common import wrap_numbers as _wrap_numbers
88
89if LINUX:
90 # This is public API and it will be retrieved from _pslinux.py
91 # via sys.modules.
92 PROCFS_PATH = "/proc"
93
94 from . import _pslinux as _psplatform
95 from ._pslinux import IOPRIO_CLASS_BE # noqa: F401
96 from ._pslinux import IOPRIO_CLASS_IDLE # noqa: F401
97 from ._pslinux import IOPRIO_CLASS_NONE # noqa: F401
98 from ._pslinux import IOPRIO_CLASS_RT # noqa: F401
99
100elif WINDOWS:
101 from . import _pswindows as _psplatform
102 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # noqa: F401
103 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # noqa: F401
104 from ._psutil_windows import HIGH_PRIORITY_CLASS # noqa: F401
105 from ._psutil_windows import IDLE_PRIORITY_CLASS # noqa: F401
106 from ._psutil_windows import NORMAL_PRIORITY_CLASS # noqa: F401
107 from ._psutil_windows import REALTIME_PRIORITY_CLASS # noqa: F401
108 from ._pswindows import CONN_DELETE_TCB # noqa: F401
109 from ._pswindows import IOPRIO_HIGH # noqa: F401
110 from ._pswindows import IOPRIO_LOW # noqa: F401
111 from ._pswindows import IOPRIO_NORMAL # noqa: F401
112 from ._pswindows import IOPRIO_VERYLOW # noqa: F401
113
114elif MACOS:
115 from . import _psosx as _psplatform
116
117elif BSD:
118 from . import _psbsd as _psplatform
119
120elif SUNOS:
121 from . import _pssunos as _psplatform
122 from ._pssunos import CONN_BOUND # noqa: F401
123 from ._pssunos import CONN_IDLE # noqa: F401
124
125 # This is public writable API which is read from _pslinux.py and
126 # _pssunos.py via sys.modules.
127 PROCFS_PATH = "/proc"
128
129elif AIX:
130 from . import _psaix as _psplatform
131
132 # This is public API and it will be retrieved from _pslinux.py
133 # via sys.modules.
134 PROCFS_PATH = "/proc"
135
136else: # pragma: no cover
137 msg = f"platform {sys.platform} is not supported"
138 raise NotImplementedError(msg)
139
140
141# fmt: off
142__all__ = [
143 # exceptions
144 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
145 "TimeoutExpired",
146
147 # constants
148 "version_info", "__version__",
149
150 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
151 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
152 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
153 "STATUS_PARKED",
154
155 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
156 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
157 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
158 # "CONN_IDLE", "CONN_BOUND",
159
160 "AF_LINK",
161
162 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
163
164 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
165
166 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
167 "SUNOS", "WINDOWS", "AIX",
168
169 # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
170 # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
171 # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
172 # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
173
174 # classes
175 "Process", "Popen",
176
177 # functions
178 "pid_exists", "pids", "process_iter", "wait_procs", # proc
179 "virtual_memory", "swap_memory", # memory
180 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
181 "cpu_stats", # "cpu_freq", "getloadavg"
182 "net_io_counters", "net_connections", "net_if_addrs", # network
183 "net_if_stats",
184 "disk_io_counters", "disk_partitions", "disk_usage", # disk
185 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
186 "users", "boot_time", # others
187]
188# fmt: on
189
190
191__all__.extend(_psplatform.__extra__all__)
192
193# Linux, FreeBSD
194if hasattr(_psplatform.Process, "rlimit"):
195 # Populate global namespace with RLIM* constants.
196 _globals = globals()
197 _name = None
198 for _name in dir(_psplatform.cext):
199 if _name.startswith('RLIM') and _name.isupper():
200 _globals[_name] = getattr(_psplatform.cext, _name)
201 __all__.append(_name)
202 del _globals, _name
203
204AF_LINK = _psplatform.AF_LINK
205
206__author__ = "Giampaolo Rodola'"
207__version__ = "7.2.1"
208version_info = tuple(int(num) for num in __version__.split('.'))
209
210_timer = getattr(time, 'monotonic', time.time)
211_TOTAL_PHYMEM = None
212_LOWEST_PID = None
213_SENTINEL = object()
214
215# Sanity check in case the user messed up with psutil installation
216# or did something weird with sys.path. In this case we might end
217# up importing a python module using a C extension module which
218# was compiled for a different version of psutil.
219# We want to prevent that by failing sooner rather than later.
220# See: https://github.com/giampaolo/psutil/issues/564
221if int(__version__.replace('.', '')) != getattr(
222 _psplatform.cext, 'version', None
223):
224 msg = f"version conflict: {_psplatform.cext.__file__!r} C extension "
225 msg += "module was built for another version of psutil"
226 if hasattr(_psplatform.cext, 'version'):
227 v = ".".join(list(str(_psplatform.cext.version)))
228 msg += f" ({v} instead of {__version__})"
229 else:
230 msg += f" (different than {__version__})"
231 what = getattr(
232 _psplatform.cext,
233 "__file__",
234 "the existing psutil install directory",
235 )
236 msg += f"; you may try to 'pip uninstall psutil', manually remove {what}"
237 msg += " or clean the virtual env somehow, then reinstall"
238 raise ImportError(msg)
239
240
241# =====================================================================
242# --- Utils
243# =====================================================================
244
245
246if hasattr(_psplatform, 'ppid_map'):
247 # Faster version (Windows and Linux).
248 _ppid_map = _psplatform.ppid_map
249else: # pragma: no cover
250
251 def _ppid_map():
252 """Return a {pid: ppid, ...} dict for all running processes in
253 one shot. Used to speed up Process.children().
254 """
255 ret = {}
256 for pid in pids():
257 try:
258 ret[pid] = _psplatform.Process(pid).ppid()
259 except (NoSuchProcess, ZombieProcess):
260 pass
261 return ret
262
263
264def _pprint_secs(secs):
265 """Format seconds in a human readable form."""
266 now = time.time()
267 secs_ago = int(now - secs)
268 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S"
269 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
270
271
272def _check_conn_kind(kind):
273 """Check net_connections()'s `kind` parameter."""
274 kinds = tuple(_common.conn_tmap)
275 if kind not in kinds:
276 msg = f"invalid kind argument {kind!r}; valid ones are: {kinds}"
277 raise ValueError(msg)
278
279
280# =====================================================================
281# --- Process class
282# =====================================================================
283
284
285class Process:
286 """Represents an OS process with the given PID.
287 If PID is omitted current process PID (os.getpid()) is used.
288 Raise NoSuchProcess if PID does not exist.
289
290 Note that most of the methods of this class do not make sure that
291 the PID of the process being queried has been reused. That means
292 that you may end up retrieving information for another process.
293
294 The only exceptions for which process identity is pre-emptively
295 checked and guaranteed are:
296
297 - parent()
298 - children()
299 - nice() (set)
300 - ionice() (set)
301 - rlimit() (set)
302 - cpu_affinity (set)
303 - suspend()
304 - resume()
305 - send_signal()
306 - terminate()
307 - kill()
308
309 To prevent this problem for all other methods you can use
310 is_running() before querying the process.
311 """
312
313 def __init__(self, pid=None):
314 self._init(pid)
315
316 def _init(self, pid, _ignore_nsp=False):
317 if pid is None:
318 pid = os.getpid()
319 else:
320 if pid < 0:
321 msg = f"pid must be a positive integer (got {pid})"
322 raise ValueError(msg)
323 try:
324 _psplatform.cext.check_pid_range(pid)
325 except OverflowError as err:
326 msg = "process PID out of range"
327 raise NoSuchProcess(pid, msg=msg) from err
328
329 self._pid = pid
330 self._name = None
331 self._exe = None
332 self._create_time = None
333 self._gone = False
334 self._pid_reused = False
335 self._hash = None
336 self._lock = threading.RLock()
337 # used for caching on Windows only (on POSIX ppid may change)
338 self._ppid = None
339 # platform-specific modules define an _psplatform.Process
340 # implementation class
341 self._proc = _psplatform.Process(pid)
342 self._last_sys_cpu_times = None
343 self._last_proc_cpu_times = None
344 self._exitcode = _SENTINEL
345 self._ident = (self.pid, None)
346 try:
347 self._ident = self._get_ident()
348 except AccessDenied:
349 # This should happen on Windows only, since we use the fast
350 # create time method. AFAIK, on all other platforms we are
351 # able to get create time for all PIDs.
352 pass
353 except ZombieProcess:
354 # Zombies can still be queried by this class (although
355 # not always) and pids() return them so just go on.
356 pass
357 except NoSuchProcess:
358 if not _ignore_nsp:
359 msg = "process PID not found"
360 raise NoSuchProcess(pid, msg=msg) from None
361 self._gone = True
362
363 def _get_ident(self):
364 """Return a (pid, uid) tuple which is supposed to identify a
365 Process instance univocally over time. The PID alone is not
366 enough, as it can be assigned to a new process after this one
367 terminates, so we add process creation time to the mix. We need
368 this in order to prevent killing the wrong process later on.
369 This is also known as PID reuse or PID recycling problem.
370
371 The reliability of this strategy mostly depends on
372 create_time() precision, which is 0.01 secs on Linux. The
373 assumption is that, after a process terminates, the kernel
374 won't reuse the same PID after such a short period of time
375 (0.01 secs). Technically this is inherently racy, but
376 practically it should be good enough.
377
378 NOTE: unreliable on FreeBSD and OpenBSD as ctime is subject to
379 system clock updates.
380 """
381
382 if WINDOWS:
383 # Use create_time() fast method in order to speedup
384 # `process_iter()`. This means we'll get AccessDenied for
385 # most ADMIN processes, but that's fine since it means
386 # we'll also get AccessDenied on kill().
387 # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555
388 self._create_time = self._proc.create_time(fast_only=True)
389 return (self.pid, self._create_time)
390 elif LINUX or NETBSD or OSX:
391 # Use 'monotonic' process starttime since boot to form unique
392 # process identity, since it is stable over changes to system
393 # time.
394 return (self.pid, self._proc.create_time(monotonic=True))
395 else:
396 return (self.pid, self.create_time())
397
398 def __str__(self):
399 info = collections.OrderedDict()
400 info["pid"] = self.pid
401 if self._name:
402 info['name'] = self._name
403 with self.oneshot():
404 if self._pid_reused:
405 info["status"] = "terminated + PID reused"
406 else:
407 try:
408 info["name"] = self.name()
409 info["status"] = self.status()
410 except ZombieProcess:
411 info["status"] = "zombie"
412 except NoSuchProcess:
413 info["status"] = "terminated"
414 except AccessDenied:
415 pass
416
417 if self._exitcode not in {_SENTINEL, None}:
418 info["exitcode"] = self._exitcode
419 if self._create_time is not None:
420 info['started'] = _pprint_secs(self._create_time)
421
422 return "{}.{}({})".format(
423 self.__class__.__module__,
424 self.__class__.__name__,
425 ", ".join([f"{k}={v!r}" for k, v in info.items()]),
426 )
427
428 __repr__ = __str__
429
430 def __eq__(self, other):
431 # Test for equality with another Process object based
432 # on PID and creation time.
433 if not isinstance(other, Process):
434 return NotImplemented
435 if OPENBSD or NETBSD or SUNOS: # pragma: no cover
436 # Zombie processes on Open/NetBSD/illumos/Solaris have a
437 # creation time of 0.0. This covers the case when a process
438 # started normally (so it has a ctime), then it turned into a
439 # zombie. It's important to do this because is_running()
440 # depends on __eq__.
441 pid1, ident1 = self._ident
442 pid2, ident2 = other._ident
443 if pid1 == pid2:
444 if ident1 and not ident2:
445 try:
446 return self.status() == STATUS_ZOMBIE
447 except Error:
448 pass
449 return self._ident == other._ident
450
451 def __ne__(self, other):
452 return not self == other
453
454 def __hash__(self):
455 if self._hash is None:
456 self._hash = hash(self._ident)
457 return self._hash
458
459 def _raise_if_pid_reused(self):
460 """Raises NoSuchProcess in case process PID has been reused."""
461 if self._pid_reused or (not self.is_running() and self._pid_reused):
462 # We may directly raise NSP in here already if PID is just
463 # not running, but I prefer NSP to be raised naturally by
464 # the actual Process API call. This way unit tests will tell
465 # us if the API is broken (aka don't raise NSP when it
466 # should). We also remain consistent with all other "get"
467 # APIs which don't use _raise_if_pid_reused().
468 msg = "process no longer exists and its PID has been reused"
469 raise NoSuchProcess(self.pid, self._name, msg=msg)
470
471 @property
472 def pid(self):
473 """The process PID."""
474 return self._pid
475
476 # --- utility methods
477
478 @contextlib.contextmanager
479 def oneshot(self):
480 """Utility context manager which considerably speeds up the
481 retrieval of multiple process information at the same time.
482
483 Internally different process info (e.g. name, ppid, uids,
484 gids, ...) may be fetched by using the same routine, but
485 only one information is returned and the others are discarded.
486 When using this context manager the internal routine is
487 executed once (in the example below on name()) and the
488 other info are cached.
489
490 The cache is cleared when exiting the context manager block.
491 The advice is to use this every time you retrieve more than
492 one information about the process. If you're lucky, you'll
493 get a hell of a speedup.
494
495 >>> import psutil
496 >>> p = psutil.Process()
497 >>> with p.oneshot():
498 ... p.name() # collect multiple info
499 ... p.cpu_times() # return cached value
500 ... p.cpu_percent() # return cached value
501 ... p.create_time() # return cached value
502 ...
503 >>>
504 """
505 with self._lock:
506 if hasattr(self, "_cache"):
507 # NOOP: this covers the use case where the user enters the
508 # context twice:
509 #
510 # >>> with p.oneshot():
511 # ... with p.oneshot():
512 # ...
513 #
514 # Also, since as_dict() internally uses oneshot()
515 # I expect that the code below will be a pretty common
516 # "mistake" that the user will make, so let's guard
517 # against that:
518 #
519 # >>> with p.oneshot():
520 # ... p.as_dict()
521 # ...
522 yield
523 else:
524 try:
525 # cached in case cpu_percent() is used
526 self.cpu_times.cache_activate(self)
527 # cached in case memory_percent() is used
528 self.memory_info.cache_activate(self)
529 # cached in case parent() is used
530 self.ppid.cache_activate(self)
531 # cached in case username() is used
532 if POSIX:
533 self.uids.cache_activate(self)
534 # specific implementation cache
535 self._proc.oneshot_enter()
536 yield
537 finally:
538 self.cpu_times.cache_deactivate(self)
539 self.memory_info.cache_deactivate(self)
540 self.ppid.cache_deactivate(self)
541 if POSIX:
542 self.uids.cache_deactivate(self)
543 self._proc.oneshot_exit()
544
545 def as_dict(self, attrs=None, ad_value=None):
546 """Utility method returning process information as a
547 hashable dictionary.
548 If *attrs* is specified it must be a list of strings
549 reflecting available Process class' attribute names
550 (e.g. ['cpu_times', 'name']) else all public (read
551 only) attributes are assumed.
552 *ad_value* is the value which gets assigned in case
553 AccessDenied or ZombieProcess exception is raised when
554 retrieving that particular process information.
555 """
556 valid_names = _as_dict_attrnames
557 if attrs is not None:
558 if not isinstance(attrs, (list, tuple, set, frozenset)):
559 msg = f"invalid attrs type {type(attrs)}"
560 raise TypeError(msg)
561 attrs = set(attrs)
562 invalid_names = attrs - valid_names
563 if invalid_names:
564 msg = "invalid attr name{} {}".format(
565 "s" if len(invalid_names) > 1 else "",
566 ", ".join(map(repr, invalid_names)),
567 )
568 raise ValueError(msg)
569
570 retdict = {}
571 ls = attrs or valid_names
572 with self.oneshot():
573 for name in ls:
574 try:
575 if name == 'pid':
576 ret = self.pid
577 else:
578 meth = getattr(self, name)
579 ret = meth()
580 except (AccessDenied, ZombieProcess):
581 ret = ad_value
582 except NotImplementedError:
583 # in case of not implemented functionality (may happen
584 # on old or exotic systems) we want to crash only if
585 # the user explicitly asked for that particular attr
586 if attrs:
587 raise
588 continue
589 retdict[name] = ret
590 return retdict
591
592 def parent(self):
593 """Return the parent process as a Process object pre-emptively
594 checking whether PID has been reused.
595 If no parent is known return None.
596 """
597 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
598 if self.pid == lowest_pid:
599 return None
600 ppid = self.ppid()
601 if ppid is not None:
602 # Get a fresh (non-cached) ctime in case the system clock
603 # was updated. TODO: use a monotonic ctime on platforms
604 # where it's supported.
605 proc_ctime = Process(self.pid).create_time()
606 try:
607 parent = Process(ppid)
608 if parent.create_time() <= proc_ctime:
609 return parent
610 # ...else ppid has been reused by another process
611 except NoSuchProcess:
612 pass
613
614 def parents(self):
615 """Return the parents of this process as a list of Process
616 instances. If no parents are known return an empty list.
617 """
618 parents = []
619 proc = self.parent()
620 while proc is not None:
621 parents.append(proc)
622 proc = proc.parent()
623 return parents
624
625 def is_running(self):
626 """Return whether this process is running.
627
628 It also checks if PID has been reused by another process, in
629 which case it will remove the process from `process_iter()`
630 internal cache and return False.
631 """
632 if self._gone or self._pid_reused:
633 return False
634 try:
635 # Checking if PID is alive is not enough as the PID might
636 # have been reused by another process. Process identity /
637 # uniqueness over time is guaranteed by (PID + creation
638 # time) and that is verified in __eq__.
639 self._pid_reused = self != Process(self.pid)
640 if self._pid_reused:
641 _pids_reused.add(self.pid)
642 raise NoSuchProcess(self.pid)
643 return True
644 except ZombieProcess:
645 # We should never get here as it's already handled in
646 # Process.__init__; here just for extra safety.
647 return True
648 except NoSuchProcess:
649 self._gone = True
650 return False
651
652 # --- actual API
653
654 @memoize_when_activated
655 def ppid(self):
656 """The process parent PID.
657 On Windows the return value is cached after first call.
658 """
659 # On POSIX we don't want to cache the ppid as it may unexpectedly
660 # change to 1 (init) in case this process turns into a zombie:
661 # https://github.com/giampaolo/psutil/issues/321
662 # http://stackoverflow.com/questions/356722/
663
664 # XXX should we check creation time here rather than in
665 # Process.parent()?
666 self._raise_if_pid_reused()
667 if POSIX:
668 return self._proc.ppid()
669 else: # pragma: no cover
670 self._ppid = self._ppid or self._proc.ppid()
671 return self._ppid
672
673 def name(self):
674 """The process name. The return value is cached after first call."""
675 # Process name is only cached on Windows as on POSIX it may
676 # change, see:
677 # https://github.com/giampaolo/psutil/issues/692
678 if WINDOWS and self._name is not None:
679 return self._name
680 name = self._proc.name()
681 if POSIX and len(name) >= 15:
682 # On UNIX the name gets truncated to the first 15 characters.
683 # If it matches the first part of the cmdline we return that
684 # one instead because it's usually more explicative.
685 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
686 try:
687 cmdline = self.cmdline()
688 except (AccessDenied, ZombieProcess):
689 # Just pass and return the truncated name: it's better
690 # than nothing. Note: there are actual cases where a
691 # zombie process can return a name() but not a
692 # cmdline(), see:
693 # https://github.com/giampaolo/psutil/issues/2239
694 pass
695 else:
696 if cmdline:
697 extended_name = os.path.basename(cmdline[0])
698 if extended_name.startswith(name):
699 name = extended_name
700 self._name = name
701 self._proc._name = name
702 return name
703
704 def exe(self):
705 """The process executable as an absolute path.
706 May also be an empty string.
707 The return value is cached after first call.
708 """
709
710 def guess_it(fallback):
711 # try to guess exe from cmdline[0] in absence of a native
712 # exe representation
713 cmdline = self.cmdline()
714 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
715 exe = cmdline[0] # the possible exe
716 # Attempt to guess only in case of an absolute path.
717 # It is not safe otherwise as the process might have
718 # changed cwd.
719 if (
720 os.path.isabs(exe)
721 and os.path.isfile(exe)
722 and os.access(exe, os.X_OK)
723 ):
724 return exe
725 if isinstance(fallback, AccessDenied):
726 raise fallback
727 return fallback
728
729 if self._exe is None:
730 try:
731 exe = self._proc.exe()
732 except AccessDenied as err:
733 return guess_it(fallback=err)
734 else:
735 if not exe:
736 # underlying implementation can legitimately return an
737 # empty string; if that's the case we don't want to
738 # raise AD while guessing from the cmdline
739 try:
740 exe = guess_it(fallback=exe)
741 except AccessDenied:
742 pass
743 self._exe = exe
744 return self._exe
745
746 def cmdline(self):
747 """The command line this process has been called with."""
748 return self._proc.cmdline()
749
750 def status(self):
751 """The process current status as a STATUS_* constant."""
752 try:
753 return self._proc.status()
754 except ZombieProcess:
755 return STATUS_ZOMBIE
756
757 def username(self):
758 """The name of the user that owns the process.
759 On UNIX this is calculated by using *real* process uid.
760 """
761 if POSIX:
762 if pwd is None:
763 # might happen if python was installed from sources
764 msg = "requires pwd module shipped with standard python"
765 raise ImportError(msg)
766 real_uid = self.uids().real
767 try:
768 return pwd.getpwuid(real_uid).pw_name
769 except KeyError:
770 # the uid can't be resolved by the system
771 return str(real_uid)
772 else:
773 return self._proc.username()
774
775 def create_time(self):
776 """The process creation time as a floating point number
777 expressed in seconds since the epoch (seconds since January 1,
778 1970, at midnight UTC). The return value, which is cached after
779 first call, is based on the system clock, which means it may be
780 affected by changes such as manual adjustments or time
781 synchronization (e.g. NTP).
782 """
783 if self._create_time is None:
784 self._create_time = self._proc.create_time()
785 return self._create_time
786
787 def cwd(self):
788 """Process current working directory as an absolute path."""
789 return self._proc.cwd()
790
791 def nice(self, value=None):
792 """Get or set process niceness (priority)."""
793 if value is None:
794 return self._proc.nice_get()
795 else:
796 self._raise_if_pid_reused()
797 self._proc.nice_set(value)
798
799 if POSIX:
800
801 @memoize_when_activated
802 def uids(self):
803 """Return process UIDs as a (real, effective, saved)
804 namedtuple.
805 """
806 return self._proc.uids()
807
808 def gids(self):
809 """Return process GIDs as a (real, effective, saved)
810 namedtuple.
811 """
812 return self._proc.gids()
813
814 def terminal(self):
815 """The terminal associated with this process, if any,
816 else None.
817 """
818 return self._proc.terminal()
819
820 def num_fds(self):
821 """Return the number of file descriptors opened by this
822 process (POSIX only).
823 """
824 return self._proc.num_fds()
825
826 # Linux, BSD, AIX and Windows only
827 if hasattr(_psplatform.Process, "io_counters"):
828
829 def io_counters(self):
830 """Return process I/O statistics as a
831 (read_count, write_count, read_bytes, write_bytes)
832 namedtuple.
833 Those are the number of read/write calls performed and the
834 amount of bytes read and written by the process.
835 """
836 return self._proc.io_counters()
837
838 # Linux and Windows
839 if hasattr(_psplatform.Process, "ionice_get"):
840
841 def ionice(self, ioclass=None, value=None):
842 """Get or set process I/O niceness (priority).
843
844 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
845 *value* is a number which goes from 0 to 7. The higher the
846 value, the lower the I/O priority of the process.
847
848 On Windows only *ioclass* is used and it can be set to 2
849 (normal), 1 (low) or 0 (very low).
850
851 Available on Linux and Windows > Vista only.
852 """
853 if ioclass is None:
854 if value is not None:
855 msg = "'ioclass' argument must be specified"
856 raise ValueError(msg)
857 return self._proc.ionice_get()
858 else:
859 self._raise_if_pid_reused()
860 return self._proc.ionice_set(ioclass, value)
861
862 # Linux / FreeBSD only
863 if hasattr(_psplatform.Process, "rlimit"):
864
865 def rlimit(self, resource, limits=None):
866 """Get or set process resource limits as a (soft, hard)
867 tuple.
868
869 *resource* is one of the RLIMIT_* constants.
870 *limits* is supposed to be a (soft, hard) tuple.
871
872 See "man prlimit" for further info.
873 Available on Linux and FreeBSD only.
874 """
875 if limits is not None:
876 self._raise_if_pid_reused()
877 return self._proc.rlimit(resource, limits)
878
879 # Windows, Linux and FreeBSD only
880 if hasattr(_psplatform.Process, "cpu_affinity_get"):
881
882 def cpu_affinity(self, cpus=None):
883 """Get or set process CPU affinity.
884 If specified, *cpus* must be a list of CPUs for which you
885 want to set the affinity (e.g. [0, 1]).
886 If an empty list is passed, all egible CPUs are assumed
887 (and set).
888 (Windows, Linux and BSD only).
889 """
890 if cpus is None:
891 return sorted(set(self._proc.cpu_affinity_get()))
892 else:
893 self._raise_if_pid_reused()
894 if not cpus:
895 if hasattr(self._proc, "_get_eligible_cpus"):
896 cpus = self._proc._get_eligible_cpus()
897 else:
898 cpus = tuple(range(len(cpu_times(percpu=True))))
899 self._proc.cpu_affinity_set(list(set(cpus)))
900
901 # Linux, FreeBSD, SunOS
902 if hasattr(_psplatform.Process, "cpu_num"):
903
904 def cpu_num(self):
905 """Return what CPU this process is currently running on.
906 The returned number should be <= psutil.cpu_count()
907 and <= len(psutil.cpu_percent(percpu=True)).
908 It may be used in conjunction with
909 psutil.cpu_percent(percpu=True) to observe the system
910 workload distributed across CPUs.
911 """
912 return self._proc.cpu_num()
913
914 # All platforms has it, but maybe not in the future.
915 if hasattr(_psplatform.Process, "environ"):
916
917 def environ(self):
918 """The environment variables of the process as a dict. Note: this
919 might not reflect changes made after the process started.
920 """
921 return self._proc.environ()
922
923 if WINDOWS:
924
925 def num_handles(self):
926 """Return the number of handles opened by this process
927 (Windows only).
928 """
929 return self._proc.num_handles()
930
931 def num_ctx_switches(self):
932 """Return the number of voluntary and involuntary context
933 switches performed by this process.
934 """
935 return self._proc.num_ctx_switches()
936
937 def num_threads(self):
938 """Return the number of threads used by this process."""
939 return self._proc.num_threads()
940
941 if hasattr(_psplatform.Process, "threads"):
942
943 def threads(self):
944 """Return threads opened by process as a list of
945 (id, user_time, system_time) namedtuples representing
946 thread id and thread CPU times (user/system).
947 On OpenBSD this method requires root access.
948 """
949 return self._proc.threads()
950
951 def children(self, recursive=False):
952 """Return the children of this process as a list of Process
953 instances, pre-emptively checking whether PID has been reused.
954 If *recursive* is True return all the parent descendants.
955
956 Example (A == this process):
957
958 A ─┐
959 │
960 ├─ B (child) ─┐
961 │ └─ X (grandchild) ─┐
962 │ └─ Y (great grandchild)
963 ├─ C (child)
964 └─ D (child)
965
966 >>> import psutil
967 >>> p = psutil.Process()
968 >>> p.children()
969 B, C, D
970 >>> p.children(recursive=True)
971 B, X, Y, C, D
972
973 Note that in the example above if process X disappears
974 process Y won't be listed as the reference to process A
975 is lost.
976 """
977 self._raise_if_pid_reused()
978 ppid_map = _ppid_map()
979 # Get a fresh (non-cached) ctime in case the system clock was
980 # updated. TODO: use a monotonic ctime on platforms where it's
981 # supported.
982 proc_ctime = Process(self.pid).create_time()
983 ret = []
984 if not recursive:
985 for pid, ppid in ppid_map.items():
986 if ppid == self.pid:
987 try:
988 child = Process(pid)
989 # if child happens to be older than its parent
990 # (self) it means child's PID has been reused
991 if proc_ctime <= child.create_time():
992 ret.append(child)
993 except (NoSuchProcess, ZombieProcess):
994 pass
995 else:
996 # Construct a {pid: [child pids]} dict
997 reverse_ppid_map = collections.defaultdict(list)
998 for pid, ppid in ppid_map.items():
999 reverse_ppid_map[ppid].append(pid)
1000 # Recursively traverse that dict, starting from self.pid,
1001 # such that we only call Process() on actual children
1002 seen = set()
1003 stack = [self.pid]
1004 while stack:
1005 pid = stack.pop()
1006 if pid in seen:
1007 # Since pids can be reused while the ppid_map is
1008 # constructed, there may be rare instances where
1009 # there's a cycle in the recorded process "tree".
1010 continue
1011 seen.add(pid)
1012 for child_pid in reverse_ppid_map[pid]:
1013 try:
1014 child = Process(child_pid)
1015 # if child happens to be older than its parent
1016 # (self) it means child's PID has been reused
1017 intime = proc_ctime <= child.create_time()
1018 if intime:
1019 ret.append(child)
1020 stack.append(child_pid)
1021 except (NoSuchProcess, ZombieProcess):
1022 pass
1023 return ret
1024
1025 def cpu_percent(self, interval=None):
1026 """Return a float representing the current process CPU
1027 utilization as a percentage.
1028
1029 When *interval* is 0.0 or None (default) compares process times
1030 to system CPU times elapsed since last call, returning
1031 immediately (non-blocking). That means that the first time
1032 this is called it will return a meaningful 0.0 value.
1033
1034 When *interval* is > 0.0 compares process times to system CPU
1035 times elapsed before and after the interval (blocking).
1036
1037 In this case is recommended for accuracy that this function
1038 be called with at least 0.1 seconds between calls.
1039
1040 A value > 100.0 can be returned in case of processes running
1041 multiple threads on different CPU cores.
1042
1043 The returned value is explicitly NOT split evenly between
1044 all available logical CPUs. This means that a busy loop process
1045 running on a system with 2 logical CPUs will be reported as
1046 having 100% CPU utilization instead of 50%.
1047
1048 Examples:
1049
1050 >>> import psutil
1051 >>> p = psutil.Process(os.getpid())
1052 >>> # blocking
1053 >>> p.cpu_percent(interval=1)
1054 2.0
1055 >>> # non-blocking (percentage since last call)
1056 >>> p.cpu_percent(interval=None)
1057 2.9
1058 >>>
1059 """
1060 blocking = interval is not None and interval > 0.0
1061 if interval is not None and interval < 0:
1062 msg = f"interval is not positive (got {interval!r})"
1063 raise ValueError(msg)
1064 num_cpus = cpu_count() or 1
1065
1066 def timer():
1067 return _timer() * num_cpus
1068
1069 if blocking:
1070 st1 = timer()
1071 pt1 = self._proc.cpu_times()
1072 time.sleep(interval)
1073 st2 = timer()
1074 pt2 = self._proc.cpu_times()
1075 else:
1076 st1 = self._last_sys_cpu_times
1077 pt1 = self._last_proc_cpu_times
1078 st2 = timer()
1079 pt2 = self._proc.cpu_times()
1080 if st1 is None or pt1 is None:
1081 self._last_sys_cpu_times = st2
1082 self._last_proc_cpu_times = pt2
1083 return 0.0
1084
1085 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1086 delta_time = st2 - st1
1087 # reset values for next call in case of interval == None
1088 self._last_sys_cpu_times = st2
1089 self._last_proc_cpu_times = pt2
1090
1091 try:
1092 # This is the utilization split evenly between all CPUs.
1093 # E.g. a busy loop process on a 2-CPU-cores system at this
1094 # point is reported as 50% instead of 100%.
1095 overall_cpus_percent = (delta_proc / delta_time) * 100
1096 except ZeroDivisionError:
1097 # interval was too low
1098 return 0.0
1099 else:
1100 # Note 1:
1101 # in order to emulate "top" we multiply the value for the num
1102 # of CPU cores. This way the busy process will be reported as
1103 # having 100% (or more) usage.
1104 #
1105 # Note 2:
1106 # taskmgr.exe on Windows differs in that it will show 50%
1107 # instead.
1108 #
1109 # Note 3:
1110 # a percentage > 100 is legitimate as it can result from a
1111 # process with multiple threads running on different CPU
1112 # cores (top does the same), see:
1113 # http://stackoverflow.com/questions/1032357
1114 # https://github.com/giampaolo/psutil/issues/474
1115 single_cpu_percent = overall_cpus_percent * num_cpus
1116 return round(single_cpu_percent, 1)
1117
1118 @memoize_when_activated
1119 def cpu_times(self):
1120 """Return a (user, system, children_user, children_system)
1121 namedtuple representing the accumulated process time, in
1122 seconds.
1123 This is similar to os.times() but per-process.
1124 On macOS and Windows children_user and children_system are
1125 always set to 0.
1126 """
1127 return self._proc.cpu_times()
1128
1129 @memoize_when_activated
1130 def memory_info(self):
1131 """Return a namedtuple with variable fields depending on the
1132 platform, representing memory information about the process.
1133
1134 The "portable" fields available on all platforms are `rss` and `vms`.
1135
1136 All numbers are expressed in bytes.
1137 """
1138 return self._proc.memory_info()
1139
1140 def memory_full_info(self):
1141 """This method returns the same information as memory_info(),
1142 plus, on some platform (Linux, macOS, Windows), also provides
1143 additional metrics (USS, PSS and swap).
1144 The additional metrics provide a better representation of actual
1145 process memory usage.
1146
1147 Namely USS is the memory which is unique to a process and which
1148 would be freed if the process was terminated right now.
1149
1150 It does so by passing through the whole process address.
1151 As such it usually requires higher user privileges than
1152 memory_info() and is considerably slower.
1153 """
1154 return self._proc.memory_full_info()
1155
1156 def memory_percent(self, memtype="rss"):
1157 """Compare process memory to total physical system memory and
1158 calculate process memory utilization as a percentage.
1159 *memtype* argument is a string that dictates what type of
1160 process memory you want to compare against (defaults to "rss").
1161 The list of available strings can be obtained like this:
1162
1163 >>> psutil.Process().memory_info()._fields
1164 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1165 """
1166 valid_types = list(_ntp.pfullmem._fields)
1167 if memtype not in valid_types:
1168 msg = (
1169 f"invalid memtype {memtype!r}; valid types are"
1170 f" {tuple(valid_types)!r}"
1171 )
1172 raise ValueError(msg)
1173 fun = (
1174 self.memory_info
1175 if memtype in _ntp.pmem._fields
1176 else self.memory_full_info
1177 )
1178 metrics = fun()
1179 value = getattr(metrics, memtype)
1180
1181 # use cached value if available
1182 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1183 if not total_phymem > 0:
1184 # we should never get here
1185 msg = (
1186 "can't calculate process memory percent because total physical"
1187 f" system memory is not positive ({total_phymem!r})"
1188 )
1189 raise ValueError(msg)
1190 return (value / float(total_phymem)) * 100
1191
1192 if hasattr(_psplatform.Process, "memory_maps"):
1193
1194 def memory_maps(self, grouped=True):
1195 """Return process' mapped memory regions as a list of namedtuples
1196 whose fields are variable depending on the platform.
1197
1198 If *grouped* is True the mapped regions with the same 'path'
1199 are grouped together and the different memory fields are summed.
1200
1201 If *grouped* is False every mapped region is shown as a single
1202 entity and the namedtuple will also include the mapped region's
1203 address space ('addr') and permission set ('perms').
1204 """
1205 it = self._proc.memory_maps()
1206 if grouped:
1207 d = {}
1208 for tupl in it:
1209 path = tupl[2]
1210 nums = tupl[3:]
1211 try:
1212 d[path] = list(map(lambda x, y: x + y, d[path], nums))
1213 except KeyError:
1214 d[path] = nums
1215 return [_ntp.pmmap_grouped(path, *d[path]) for path in d]
1216 else:
1217 return [_ntp.pmmap_ext(*x) for x in it]
1218
1219 def open_files(self):
1220 """Return files opened by process as a list of
1221 (path, fd) namedtuples including the absolute file name
1222 and file descriptor number.
1223 """
1224 return self._proc.open_files()
1225
1226 def net_connections(self, kind='inet'):
1227 """Return socket connections opened by process as a list of
1228 (fd, family, type, laddr, raddr, status) namedtuples.
1229 The *kind* parameter filters for connections that match the
1230 following criteria:
1231
1232 +------------+----------------------------------------------------+
1233 | Kind Value | Connections using |
1234 +------------+----------------------------------------------------+
1235 | inet | IPv4 and IPv6 |
1236 | inet4 | IPv4 |
1237 | inet6 | IPv6 |
1238 | tcp | TCP |
1239 | tcp4 | TCP over IPv4 |
1240 | tcp6 | TCP over IPv6 |
1241 | udp | UDP |
1242 | udp4 | UDP over IPv4 |
1243 | udp6 | UDP over IPv6 |
1244 | unix | UNIX socket (both UDP and TCP protocols) |
1245 | all | the sum of all the possible families and protocols |
1246 +------------+----------------------------------------------------+
1247 """
1248 _check_conn_kind(kind)
1249 return self._proc.net_connections(kind)
1250
1251 @_common.deprecated_method(replacement="net_connections")
1252 def connections(self, kind="inet"):
1253 return self.net_connections(kind=kind)
1254
1255 # --- signals
1256
1257 if POSIX:
1258
1259 def _send_signal(self, sig):
1260 assert not self.pid < 0, self.pid
1261 self._raise_if_pid_reused()
1262
1263 pid, ppid, name = self.pid, self._ppid, self._name
1264 if pid == 0:
1265 # see "man 2 kill"
1266 msg = (
1267 "preventing sending signal to process with PID 0 as it "
1268 "would affect every process in the process group of the "
1269 "calling process (os.getpid()) instead of PID 0"
1270 )
1271 raise ValueError(msg)
1272 try:
1273 os.kill(pid, sig)
1274 except ProcessLookupError as err:
1275 if OPENBSD and pid_exists(pid):
1276 # We do this because os.kill() lies in case of
1277 # zombie processes.
1278 raise ZombieProcess(pid, name, ppid) from err
1279 self._gone = True
1280 raise NoSuchProcess(pid, name) from err
1281 except PermissionError as err:
1282 raise AccessDenied(pid, name) from err
1283
1284 def send_signal(self, sig):
1285 """Send a signal *sig* to process pre-emptively checking
1286 whether PID has been reused (see signal module constants) .
1287 On Windows only SIGTERM is valid and is treated as an alias
1288 for kill().
1289 """
1290 if POSIX:
1291 self._send_signal(sig)
1292 else: # pragma: no cover
1293 self._raise_if_pid_reused()
1294 if sig != signal.SIGTERM and not self.is_running():
1295 msg = "process no longer exists"
1296 raise NoSuchProcess(self.pid, self._name, msg=msg)
1297 self._proc.send_signal(sig)
1298
1299 def suspend(self):
1300 """Suspend process execution with SIGSTOP pre-emptively checking
1301 whether PID has been reused.
1302 On Windows this has the effect of suspending all process threads.
1303 """
1304 if POSIX:
1305 self._send_signal(signal.SIGSTOP)
1306 else: # pragma: no cover
1307 self._raise_if_pid_reused()
1308 self._proc.suspend()
1309
1310 def resume(self):
1311 """Resume process execution with SIGCONT pre-emptively checking
1312 whether PID has been reused.
1313 On Windows this has the effect of resuming all process threads.
1314 """
1315 if POSIX:
1316 self._send_signal(signal.SIGCONT)
1317 else: # pragma: no cover
1318 self._raise_if_pid_reused()
1319 self._proc.resume()
1320
1321 def terminate(self):
1322 """Terminate the process with SIGTERM pre-emptively checking
1323 whether PID has been reused.
1324 On Windows this is an alias for kill().
1325 """
1326 if POSIX:
1327 self._send_signal(signal.SIGTERM)
1328 else: # pragma: no cover
1329 self._raise_if_pid_reused()
1330 self._proc.kill()
1331
1332 def kill(self):
1333 """Kill the current process with SIGKILL pre-emptively checking
1334 whether PID has been reused.
1335 """
1336 if POSIX:
1337 self._send_signal(signal.SIGKILL)
1338 else: # pragma: no cover
1339 self._raise_if_pid_reused()
1340 self._proc.kill()
1341
1342 def wait(self, timeout=None):
1343 """Wait for process to terminate, and if process is a children
1344 of os.getpid(), also return its exit code, else None.
1345 On Windows there's no such limitation (exit code is always
1346 returned).
1347
1348 If the process is already terminated, immediately return None
1349 instead of raising NoSuchProcess.
1350
1351 If *timeout* (in seconds) is specified and process is still
1352 alive, raise TimeoutExpired.
1353
1354 If *timeout=0* either return immediately or raise
1355 TimeoutExpired (non-blocking).
1356
1357 To wait for multiple Process objects use psutil.wait_procs().
1358 """
1359 if self.pid == 0:
1360 msg = "can't wait for PID 0"
1361 raise ValueError(msg)
1362 if timeout is not None:
1363 if not isinstance(timeout, (int, float)):
1364 msg = f"timeout must be an int or float (got {type(timeout)})"
1365 raise TypeError(msg)
1366 if timeout < 0:
1367 msg = f"timeout must be positive or zero (got {timeout})"
1368 raise ValueError(msg)
1369
1370 if self._exitcode is not _SENTINEL:
1371 return self._exitcode
1372
1373 try:
1374 self._exitcode = self._proc.wait(timeout)
1375 except TimeoutExpired as err:
1376 exc = TimeoutExpired(timeout, pid=self.pid, name=self._name)
1377 raise exc from err
1378
1379 return self._exitcode
1380
1381
1382# The valid attr names which can be processed by Process.as_dict().
1383# fmt: off
1384_as_dict_attrnames = {
1385 x for x in dir(Process) if not x.startswith("_") and x not in
1386 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1387 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1388 'connections', 'oneshot'}
1389}
1390# fmt: on
1391
1392
1393# =====================================================================
1394# --- Popen class
1395# =====================================================================
1396
1397
1398class Popen(Process):
1399 """Same as subprocess.Popen, but in addition it provides all
1400 psutil.Process methods in a single class.
1401 For the following methods which are common to both classes, psutil
1402 implementation takes precedence:
1403
1404 * send_signal()
1405 * terminate()
1406 * kill()
1407
1408 This is done in order to avoid killing another process in case its
1409 PID has been reused, fixing BPO-6973.
1410
1411 >>> import psutil
1412 >>> from subprocess import PIPE
1413 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1414 >>> p.name()
1415 'python'
1416 >>> p.uids()
1417 user(real=1000, effective=1000, saved=1000)
1418 >>> p.username()
1419 'giampaolo'
1420 >>> p.communicate()
1421 ('hi', None)
1422 >>> p.terminate()
1423 >>> p.wait(timeout=2)
1424 0
1425 >>>
1426 """
1427
1428 def __init__(self, *args, **kwargs):
1429 # Explicitly avoid to raise NoSuchProcess in case the process
1430 # spawned by subprocess.Popen terminates too quickly, see:
1431 # https://github.com/giampaolo/psutil/issues/193
1432 self.__subproc = subprocess.Popen(*args, **kwargs)
1433 self._init(self.__subproc.pid, _ignore_nsp=True)
1434
1435 def __dir__(self):
1436 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1437
1438 def __enter__(self):
1439 if hasattr(self.__subproc, '__enter__'):
1440 self.__subproc.__enter__()
1441 return self
1442
1443 def __exit__(self, *args, **kwargs):
1444 if hasattr(self.__subproc, '__exit__'):
1445 return self.__subproc.__exit__(*args, **kwargs)
1446 else:
1447 if self.stdout:
1448 self.stdout.close()
1449 if self.stderr:
1450 self.stderr.close()
1451 try:
1452 # Flushing a BufferedWriter may raise an error.
1453 if self.stdin:
1454 self.stdin.close()
1455 finally:
1456 # Wait for the process to terminate, to avoid zombies.
1457 self.wait()
1458
1459 def __getattribute__(self, name):
1460 try:
1461 return object.__getattribute__(self, name)
1462 except AttributeError:
1463 try:
1464 return object.__getattribute__(self.__subproc, name)
1465 except AttributeError:
1466 msg = f"{self.__class__!r} has no attribute {name!r}"
1467 raise AttributeError(msg) from None
1468
1469 def wait(self, timeout=None):
1470 if self.__subproc.returncode is not None:
1471 return self.__subproc.returncode
1472 ret = super().wait(timeout)
1473 self.__subproc.returncode = ret
1474 return ret
1475
1476
1477# =====================================================================
1478# --- system processes related functions
1479# =====================================================================
1480
1481
1482def pids():
1483 """Return a list of current running PIDs."""
1484 global _LOWEST_PID
1485 ret = sorted(_psplatform.pids())
1486 _LOWEST_PID = ret[0]
1487 return ret
1488
1489
1490def pid_exists(pid):
1491 """Return True if given PID exists in the current process list.
1492 This is faster than doing "pid in psutil.pids()" and
1493 should be preferred.
1494 """
1495 if pid < 0:
1496 return False
1497 elif pid == 0 and POSIX:
1498 # On POSIX we use os.kill() to determine PID existence.
1499 # According to "man 2 kill" PID 0 has a special meaning
1500 # though: it refers to <<every process in the process
1501 # group of the calling process>> and that is not we want
1502 # to do here.
1503 return pid in pids()
1504 else:
1505 return _psplatform.pid_exists(pid)
1506
1507
1508_pmap = {}
1509_pids_reused = set()
1510
1511
1512def process_iter(attrs=None, ad_value=None):
1513 """Return a generator yielding a Process instance for all
1514 running processes.
1515
1516 Every new Process instance is only created once and then cached
1517 into an internal table which is updated every time this is used.
1518 Cache can optionally be cleared via `process_iter.cache_clear()`.
1519
1520 The sorting order in which processes are yielded is based on
1521 their PIDs.
1522
1523 *attrs* and *ad_value* have the same meaning as in
1524 Process.as_dict(). If *attrs* is specified as_dict() is called
1525 and the resulting dict is stored as a 'info' attribute attached
1526 to returned Process instance.
1527 If *attrs* is an empty list it will retrieve all process info
1528 (slow).
1529 """
1530 global _pmap
1531
1532 def add(pid):
1533 proc = Process(pid)
1534 pmap[proc.pid] = proc
1535 return proc
1536
1537 def remove(pid):
1538 pmap.pop(pid, None)
1539
1540 pmap = _pmap.copy()
1541 a = set(pids())
1542 b = set(pmap.keys())
1543 new_pids = a - b
1544 gone_pids = b - a
1545 for pid in gone_pids:
1546 remove(pid)
1547 while _pids_reused:
1548 pid = _pids_reused.pop()
1549 debug(f"refreshing Process instance for reused PID {pid}")
1550 remove(pid)
1551 try:
1552 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1553 for pid, proc in ls:
1554 try:
1555 if proc is None: # new process
1556 proc = add(pid)
1557 if attrs is not None:
1558 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1559 yield proc
1560 except NoSuchProcess:
1561 remove(pid)
1562 finally:
1563 _pmap = pmap
1564
1565
1566process_iter.cache_clear = lambda: _pmap.clear() # noqa: PLW0108
1567process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache."
1568
1569
1570def wait_procs(procs, timeout=None, callback=None):
1571 """Convenience function which waits for a list of processes to
1572 terminate.
1573
1574 Return a (gone, alive) tuple indicating which processes
1575 are gone and which ones are still alive.
1576
1577 The gone ones will have a new *returncode* attribute indicating
1578 process exit status (may be None).
1579
1580 *callback* is a function which gets called every time a process
1581 terminates (a Process instance is passed as callback argument).
1582
1583 Function will return as soon as all processes terminate or when
1584 *timeout* occurs.
1585 Differently from Process.wait() it will not raise TimeoutExpired if
1586 *timeout* occurs.
1587
1588 Typical use case is:
1589
1590 - send SIGTERM to a list of processes
1591 - give them some time to terminate
1592 - send SIGKILL to those ones which are still alive
1593
1594 Example:
1595
1596 >>> def on_terminate(proc):
1597 ... print("process {} terminated".format(proc))
1598 ...
1599 >>> for p in procs:
1600 ... p.terminate()
1601 ...
1602 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1603 >>> for p in alive:
1604 ... p.kill()
1605 """
1606
1607 def check_gone(proc, timeout):
1608 try:
1609 returncode = proc.wait(timeout=timeout)
1610 except (TimeoutExpired, subprocess.TimeoutExpired):
1611 pass
1612 else:
1613 if returncode is not None or not proc.is_running():
1614 # Set new Process instance attribute.
1615 proc.returncode = returncode
1616 gone.add(proc)
1617 if callback is not None:
1618 callback(proc)
1619
1620 if timeout is not None and not timeout >= 0:
1621 msg = f"timeout must be a positive integer, got {timeout}"
1622 raise ValueError(msg)
1623 gone = set()
1624 alive = set(procs)
1625 if callback is not None and not callable(callback):
1626 msg = f"callback {callback!r} is not a callable"
1627 raise TypeError(msg)
1628 if timeout is not None:
1629 deadline = _timer() + timeout
1630
1631 while alive:
1632 if timeout is not None and timeout <= 0:
1633 break
1634 for proc in alive:
1635 # Make sure that every complete iteration (all processes)
1636 # will last max 1 sec.
1637 # We do this because we don't want to wait too long on a
1638 # single process: in case it terminates too late other
1639 # processes may disappear in the meantime and their PID
1640 # reused.
1641 max_timeout = 1.0 / len(alive)
1642 if timeout is not None:
1643 timeout = min((deadline - _timer()), max_timeout)
1644 if timeout <= 0:
1645 break
1646 check_gone(proc, timeout)
1647 else:
1648 check_gone(proc, max_timeout)
1649 alive = alive - gone # noqa: PLR6104
1650
1651 if alive:
1652 # Last attempt over processes survived so far.
1653 # timeout == 0 won't make this function wait any further.
1654 for proc in alive:
1655 check_gone(proc, 0)
1656 alive = alive - gone # noqa: PLR6104
1657
1658 return (list(gone), list(alive))
1659
1660
1661# =====================================================================
1662# --- CPU related functions
1663# =====================================================================
1664
1665
1666def cpu_count(logical=True):
1667 """Return the number of logical CPUs in the system (same as
1668 os.cpu_count()).
1669
1670 If *logical* is False return the number of physical cores only
1671 (e.g. hyper thread CPUs are excluded).
1672
1673 Return None if undetermined.
1674
1675 The return value is cached after first call.
1676 If desired cache can be cleared like this:
1677
1678 >>> psutil.cpu_count.cache_clear()
1679 """
1680 if logical:
1681 ret = _psplatform.cpu_count_logical()
1682 else:
1683 ret = _psplatform.cpu_count_cores()
1684 if ret is not None and ret < 1:
1685 ret = None
1686 return ret
1687
1688
1689def cpu_times(percpu=False):
1690 """Return system-wide CPU times as a namedtuple.
1691 Every CPU time represents the seconds the CPU has spent in the
1692 given mode. The namedtuple's fields availability varies depending on the
1693 platform:
1694
1695 - user
1696 - system
1697 - idle
1698 - nice (UNIX)
1699 - iowait (Linux)
1700 - irq (Linux, FreeBSD)
1701 - softirq (Linux)
1702 - steal (Linux >= 2.6.11)
1703 - guest (Linux >= 2.6.24)
1704 - guest_nice (Linux >= 3.2.0)
1705
1706 When *percpu* is True return a list of namedtuples for each CPU.
1707 First element of the list refers to first CPU, second element
1708 to second CPU and so on.
1709 The order of the list is consistent across calls.
1710 """
1711 if not percpu:
1712 return _psplatform.cpu_times()
1713 else:
1714 return _psplatform.per_cpu_times()
1715
1716
1717try:
1718 _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1719except Exception: # noqa: BLE001
1720 # Don't want to crash at import time.
1721 _last_cpu_times = {}
1722
1723try:
1724 _last_per_cpu_times = {
1725 threading.current_thread().ident: cpu_times(percpu=True)
1726 }
1727except Exception: # noqa: BLE001
1728 # Don't want to crash at import time.
1729 _last_per_cpu_times = {}
1730
1731
1732def _cpu_tot_time(times):
1733 """Given a cpu_time() ntuple calculates the total CPU time
1734 (including idle time).
1735 """
1736 tot = sum(times)
1737 if LINUX:
1738 # On Linux guest times are already accounted in "user" or
1739 # "nice" times, so we subtract them from total.
1740 # Htop does the same. References:
1741 # https://github.com/giampaolo/psutil/pull/940
1742 # http://unix.stackexchange.com/questions/178045
1743 # https://github.com/torvalds/linux/blob/
1744 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1745 # cputime.c#L158
1746 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1747 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1748 return tot
1749
1750
1751def _cpu_busy_time(times):
1752 """Given a cpu_time() ntuple calculates the busy CPU time.
1753 We do so by subtracting all idle CPU times.
1754 """
1755 busy = _cpu_tot_time(times)
1756 busy -= times.idle
1757 # Linux: "iowait" is time during which the CPU does not do anything
1758 # (waits for IO to complete). On Linux IO wait is *not* accounted
1759 # in "idle" time so we subtract it. Htop does the same.
1760 # References:
1761 # https://github.com/torvalds/linux/blob/
1762 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1763 busy -= getattr(times, "iowait", 0)
1764 return busy
1765
1766
1767def _cpu_times_deltas(t1, t2):
1768 assert t1._fields == t2._fields, (t1, t2)
1769 field_deltas = []
1770 for field in _ntp.scputimes._fields:
1771 field_delta = getattr(t2, field) - getattr(t1, field)
1772 # CPU times are always supposed to increase over time
1773 # or at least remain the same and that's because time
1774 # cannot go backwards.
1775 # Surprisingly sometimes this might not be the case (at
1776 # least on Windows and Linux), see:
1777 # https://github.com/giampaolo/psutil/issues/392
1778 # https://github.com/giampaolo/psutil/issues/645
1779 # https://github.com/giampaolo/psutil/issues/1210
1780 # Trim negative deltas to zero to ignore decreasing fields.
1781 # top does the same. Reference:
1782 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1783 field_delta = max(0, field_delta)
1784 field_deltas.append(field_delta)
1785 return _ntp.scputimes(*field_deltas)
1786
1787
1788def cpu_percent(interval=None, percpu=False):
1789 """Return a float representing the current system-wide CPU
1790 utilization as a percentage.
1791
1792 When *interval* is > 0.0 compares system CPU times elapsed before
1793 and after the interval (blocking).
1794
1795 When *interval* is 0.0 or None compares system CPU times elapsed
1796 since last call or module import, returning immediately (non
1797 blocking). That means the first time this is called it will
1798 return a meaningless 0.0 value which you should ignore.
1799 In this case is recommended for accuracy that this function be
1800 called with at least 0.1 seconds between calls.
1801
1802 When *percpu* is True returns a list of floats representing the
1803 utilization as a percentage for each CPU.
1804 First element of the list refers to first CPU, second element
1805 to second CPU and so on.
1806 The order of the list is consistent across calls.
1807
1808 Examples:
1809
1810 >>> # blocking, system-wide
1811 >>> psutil.cpu_percent(interval=1)
1812 2.0
1813 >>>
1814 >>> # blocking, per-cpu
1815 >>> psutil.cpu_percent(interval=1, percpu=True)
1816 [2.0, 1.0]
1817 >>>
1818 >>> # non-blocking (percentage since last call)
1819 >>> psutil.cpu_percent(interval=None)
1820 2.9
1821 >>>
1822 """
1823 tid = threading.current_thread().ident
1824 blocking = interval is not None and interval > 0.0
1825 if interval is not None and interval < 0:
1826 msg = f"interval is not positive (got {interval})"
1827 raise ValueError(msg)
1828
1829 def calculate(t1, t2):
1830 times_delta = _cpu_times_deltas(t1, t2)
1831 all_delta = _cpu_tot_time(times_delta)
1832 busy_delta = _cpu_busy_time(times_delta)
1833
1834 try:
1835 busy_perc = (busy_delta / all_delta) * 100
1836 except ZeroDivisionError:
1837 return 0.0
1838 else:
1839 return round(busy_perc, 1)
1840
1841 # system-wide usage
1842 if not percpu:
1843 if blocking:
1844 t1 = cpu_times()
1845 time.sleep(interval)
1846 else:
1847 t1 = _last_cpu_times.get(tid) or cpu_times()
1848 _last_cpu_times[tid] = cpu_times()
1849 return calculate(t1, _last_cpu_times[tid])
1850 # per-cpu usage
1851 else:
1852 ret = []
1853 if blocking:
1854 tot1 = cpu_times(percpu=True)
1855 time.sleep(interval)
1856 else:
1857 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1858 _last_per_cpu_times[tid] = cpu_times(percpu=True)
1859 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1860 ret.append(calculate(t1, t2))
1861 return ret
1862
1863
1864# Use a separate dict for cpu_times_percent(), so it's independent from
1865# cpu_percent() and they can both be used within the same program.
1866_last_cpu_times_2 = _last_cpu_times.copy()
1867_last_per_cpu_times_2 = _last_per_cpu_times.copy()
1868
1869
1870def cpu_times_percent(interval=None, percpu=False):
1871 """Same as cpu_percent() but provides utilization percentages
1872 for each specific CPU time as is returned by cpu_times().
1873 For instance, on Linux we'll get:
1874
1875 >>> cpu_times_percent()
1876 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1877 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1878 >>>
1879
1880 *interval* and *percpu* arguments have the same meaning as in
1881 cpu_percent().
1882 """
1883 tid = threading.current_thread().ident
1884 blocking = interval is not None and interval > 0.0
1885 if interval is not None and interval < 0:
1886 msg = f"interval is not positive (got {interval!r})"
1887 raise ValueError(msg)
1888
1889 def calculate(t1, t2):
1890 nums = []
1891 times_delta = _cpu_times_deltas(t1, t2)
1892 all_delta = _cpu_tot_time(times_delta)
1893 # "scale" is the value to multiply each delta with to get percentages.
1894 # We use "max" to avoid division by zero (if all_delta is 0, then all
1895 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1896 # fraction because cpu times are integers)
1897 scale = 100.0 / max(1, all_delta)
1898 for field_delta in times_delta:
1899 field_perc = field_delta * scale
1900 field_perc = round(field_perc, 1)
1901 # make sure we don't return negative values or values over 100%
1902 field_perc = min(max(0.0, field_perc), 100.0)
1903 nums.append(field_perc)
1904 return _ntp.scputimes(*nums)
1905
1906 # system-wide usage
1907 if not percpu:
1908 if blocking:
1909 t1 = cpu_times()
1910 time.sleep(interval)
1911 else:
1912 t1 = _last_cpu_times_2.get(tid) or cpu_times()
1913 _last_cpu_times_2[tid] = cpu_times()
1914 return calculate(t1, _last_cpu_times_2[tid])
1915 # per-cpu usage
1916 else:
1917 ret = []
1918 if blocking:
1919 tot1 = cpu_times(percpu=True)
1920 time.sleep(interval)
1921 else:
1922 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1923 _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1924 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1925 ret.append(calculate(t1, t2))
1926 return ret
1927
1928
1929def cpu_stats():
1930 """Return CPU statistics."""
1931 return _psplatform.cpu_stats()
1932
1933
1934if hasattr(_psplatform, "cpu_freq"):
1935
1936 def cpu_freq(percpu=False):
1937 """Return CPU frequency as a namedtuple including current,
1938 min and max frequency expressed in Mhz.
1939
1940 If *percpu* is True and the system supports per-cpu frequency
1941 retrieval (Linux only) a list of frequencies is returned for
1942 each CPU. If not a list with one element is returned.
1943 """
1944 ret = _psplatform.cpu_freq()
1945 if percpu:
1946 return ret
1947 else:
1948 num_cpus = float(len(ret))
1949 if num_cpus == 0:
1950 return None
1951 elif num_cpus == 1:
1952 return ret[0]
1953 else:
1954 currs, mins, maxs = 0.0, 0.0, 0.0
1955 set_none = False
1956 for cpu in ret:
1957 currs += cpu.current
1958 # On Linux if /proc/cpuinfo is used min/max are set
1959 # to None.
1960 if LINUX and cpu.min is None:
1961 set_none = True
1962 continue
1963 mins += cpu.min
1964 maxs += cpu.max
1965
1966 current = currs / num_cpus
1967
1968 if set_none:
1969 min_ = max_ = None
1970 else:
1971 min_ = mins / num_cpus
1972 max_ = maxs / num_cpus
1973
1974 return _ntp.scpufreq(current, min_, max_)
1975
1976 __all__.append("cpu_freq")
1977
1978
1979if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1980 # Perform this hasattr check once on import time to either use the
1981 # platform based code or proxy straight from the os module.
1982 if hasattr(os, "getloadavg"):
1983 getloadavg = os.getloadavg
1984 else:
1985 getloadavg = _psplatform.getloadavg
1986
1987 __all__.append("getloadavg")
1988
1989
1990# =====================================================================
1991# --- system memory related functions
1992# =====================================================================
1993
1994
1995def virtual_memory():
1996 """Return statistics about system memory usage as a namedtuple
1997 including the following fields, expressed in bytes:
1998
1999 - total:
2000 total physical memory available.
2001
2002 - available:
2003 the memory that can be given instantly to processes without the
2004 system going into swap.
2005 This is calculated by summing different memory values depending
2006 on the platform and it is supposed to be used to monitor actual
2007 memory usage in a cross platform fashion.
2008
2009 - percent:
2010 the percentage usage calculated as (total - available) / total * 100
2011
2012 - used:
2013 memory used, calculated differently depending on the platform and
2014 designed for informational purposes only:
2015 macOS: active + wired
2016 BSD: active + wired + cached
2017 Linux: total - free
2018
2019 - free:
2020 memory not being used at all (zeroed) that is readily available;
2021 note that this doesn't reflect the actual memory available
2022 (use 'available' instead)
2023
2024 Platform-specific fields:
2025
2026 - active (UNIX):
2027 memory currently in use or very recently used, and so it is in RAM.
2028
2029 - inactive (UNIX):
2030 memory that is marked as not used.
2031
2032 - buffers (BSD, Linux):
2033 cache for things like file system metadata.
2034
2035 - cached (BSD, macOS):
2036 cache for various things.
2037
2038 - wired (macOS, BSD):
2039 memory that is marked to always stay in RAM. It is never moved to disk.
2040
2041 - shared (BSD):
2042 memory that may be simultaneously accessed by multiple processes.
2043
2044 The sum of 'used' and 'available' does not necessarily equal total.
2045 On Windows 'available' and 'free' are the same.
2046 """
2047 global _TOTAL_PHYMEM
2048 ret = _psplatform.virtual_memory()
2049 # cached for later use in Process.memory_percent()
2050 _TOTAL_PHYMEM = ret.total
2051 return ret
2052
2053
2054def swap_memory():
2055 """Return system swap memory statistics as a namedtuple including
2056 the following fields:
2057
2058 - total: total swap memory in bytes
2059 - used: used swap memory in bytes
2060 - free: free swap memory in bytes
2061 - percent: the percentage usage
2062 - sin: no. of bytes the system has swapped in from disk (cumulative)
2063 - sout: no. of bytes the system has swapped out from disk (cumulative)
2064
2065 'sin' and 'sout' on Windows are meaningless and always set to 0.
2066 """
2067 return _psplatform.swap_memory()
2068
2069
2070# =====================================================================
2071# --- disks/partitions related functions
2072# =====================================================================
2073
2074
2075def disk_usage(path):
2076 """Return disk usage statistics about the given *path* as a
2077 namedtuple including total, used and free space expressed in bytes
2078 plus the percentage usage.
2079 """
2080 return _psplatform.disk_usage(path)
2081
2082
2083def disk_partitions(all=False):
2084 """Return mounted partitions as a list of
2085 (device, mountpoint, fstype, opts) namedtuple.
2086 'opts' field is a raw string separated by commas indicating mount
2087 options which may vary depending on the platform.
2088
2089 If *all* parameter is False return physical devices only and ignore
2090 all others.
2091 """
2092 return _psplatform.disk_partitions(all)
2093
2094
2095def disk_io_counters(perdisk=False, nowrap=True):
2096 """Return system disk I/O statistics as a namedtuple including
2097 the following fields:
2098
2099 - read_count: number of reads
2100 - write_count: number of writes
2101 - read_bytes: number of bytes read
2102 - write_bytes: number of bytes written
2103 - read_time: time spent reading from disk (in ms)
2104 - write_time: time spent writing to disk (in ms)
2105
2106 Platform specific:
2107
2108 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2109 - read_merged_count (Linux): number of merged reads
2110 - write_merged_count (Linux): number of merged writes
2111
2112 If *perdisk* is True return the same information for every
2113 physical disk installed on the system as a dictionary
2114 with partition names as the keys and the namedtuple
2115 described above as the values.
2116
2117 If *nowrap* is True it detects and adjust the numbers which overflow
2118 and wrap (restart from 0) and add "old value" to "new value" so that
2119 the returned numbers will always be increasing or remain the same,
2120 but never decrease.
2121 "disk_io_counters.cache_clear()" can be used to invalidate the
2122 cache.
2123
2124 On recent Windows versions 'diskperf -y' command may need to be
2125 executed first otherwise this function won't find any disk.
2126 """
2127 kwargs = dict(perdisk=perdisk) if LINUX else {}
2128 rawdict = _psplatform.disk_io_counters(**kwargs)
2129 if not rawdict:
2130 return {} if perdisk else None
2131 if nowrap:
2132 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2133 if perdisk:
2134 for disk, fields in rawdict.items():
2135 rawdict[disk] = _ntp.sdiskio(*fields)
2136 return rawdict
2137 else:
2138 return _ntp.sdiskio(*(sum(x) for x in zip(*rawdict.values())))
2139
2140
2141disk_io_counters.cache_clear = functools.partial(
2142 _wrap_numbers.cache_clear, 'psutil.disk_io_counters'
2143)
2144disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2145
2146
2147# =====================================================================
2148# --- network related functions
2149# =====================================================================
2150
2151
2152def net_io_counters(pernic=False, nowrap=True):
2153 """Return network I/O statistics as a namedtuple including
2154 the following fields:
2155
2156 - bytes_sent: number of bytes sent
2157 - bytes_recv: number of bytes received
2158 - packets_sent: number of packets sent
2159 - packets_recv: number of packets received
2160 - errin: total number of errors while receiving
2161 - errout: total number of errors while sending
2162 - dropin: total number of incoming packets which were dropped
2163 - dropout: total number of outgoing packets which were dropped
2164 (always 0 on macOS and BSD)
2165
2166 If *pernic* is True return the same information for every
2167 network interface installed on the system as a dictionary
2168 with network interface names as the keys and the namedtuple
2169 described above as the values.
2170
2171 If *nowrap* is True it detects and adjust the numbers which overflow
2172 and wrap (restart from 0) and add "old value" to "new value" so that
2173 the returned numbers will always be increasing or remain the same,
2174 but never decrease.
2175 "net_io_counters.cache_clear()" can be used to invalidate the
2176 cache.
2177 """
2178 rawdict = _psplatform.net_io_counters()
2179 if not rawdict:
2180 return {} if pernic else None
2181 if nowrap:
2182 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2183 if pernic:
2184 for nic, fields in rawdict.items():
2185 rawdict[nic] = _ntp.snetio(*fields)
2186 return rawdict
2187 else:
2188 return _ntp.snetio(*[sum(x) for x in zip(*rawdict.values())])
2189
2190
2191net_io_counters.cache_clear = functools.partial(
2192 _wrap_numbers.cache_clear, 'psutil.net_io_counters'
2193)
2194net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2195
2196
2197def net_connections(kind='inet'):
2198 """Return system-wide socket connections as a list of
2199 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2200 In case of limited privileges 'fd' and 'pid' may be set to -1
2201 and None respectively.
2202 The *kind* parameter filters for connections that fit the
2203 following criteria:
2204
2205 +------------+----------------------------------------------------+
2206 | Kind Value | Connections using |
2207 +------------+----------------------------------------------------+
2208 | inet | IPv4 and IPv6 |
2209 | inet4 | IPv4 |
2210 | inet6 | IPv6 |
2211 | tcp | TCP |
2212 | tcp4 | TCP over IPv4 |
2213 | tcp6 | TCP over IPv6 |
2214 | udp | UDP |
2215 | udp4 | UDP over IPv4 |
2216 | udp6 | UDP over IPv6 |
2217 | unix | UNIX socket (both UDP and TCP protocols) |
2218 | all | the sum of all the possible families and protocols |
2219 +------------+----------------------------------------------------+
2220
2221 On macOS this function requires root privileges.
2222 """
2223 _check_conn_kind(kind)
2224 return _psplatform.net_connections(kind)
2225
2226
2227def net_if_addrs():
2228 """Return the addresses associated to each NIC (network interface
2229 card) installed on the system as a dictionary whose keys are the
2230 NIC names and value is a list of namedtuples for each address
2231 assigned to the NIC. Each namedtuple includes 5 fields:
2232
2233 - family: can be either socket.AF_INET, socket.AF_INET6 or
2234 psutil.AF_LINK, which refers to a MAC address.
2235 - address: is the primary address and it is always set.
2236 - netmask: and 'broadcast' and 'ptp' may be None.
2237 - ptp: stands for "point to point" and references the
2238 destination address on a point to point interface
2239 (typically a VPN).
2240 - broadcast: and *ptp* are mutually exclusive.
2241
2242 Note: you can have more than one address of the same family
2243 associated with each interface.
2244 """
2245 rawlist = _psplatform.net_if_addrs()
2246 rawlist.sort(key=lambda x: x[1]) # sort by family
2247 ret = collections.defaultdict(list)
2248 for name, fam, addr, mask, broadcast, ptp in rawlist:
2249 try:
2250 fam = socket.AddressFamily(fam)
2251 except ValueError:
2252 if WINDOWS and fam == -1:
2253 fam = _psplatform.AF_LINK
2254 elif (
2255 hasattr(_psplatform, "AF_LINK") and fam == _psplatform.AF_LINK
2256 ):
2257 # Linux defines AF_LINK as an alias for AF_PACKET.
2258 # We re-set the family here so that repr(family)
2259 # will show AF_LINK rather than AF_PACKET
2260 fam = _psplatform.AF_LINK
2261
2262 if fam == _psplatform.AF_LINK:
2263 # The underlying C function may return an incomplete MAC
2264 # address in which case we fill it with null bytes, see:
2265 # https://github.com/giampaolo/psutil/issues/786
2266 separator = ":" if POSIX else "-"
2267 while addr.count(separator) < 5:
2268 addr += f"{separator}00"
2269
2270 nt = _ntp.snicaddr(fam, addr, mask, broadcast, ptp)
2271
2272 # On Windows broadcast is None, so we determine it via
2273 # ipaddress module.
2274 if WINDOWS and fam in {socket.AF_INET, socket.AF_INET6}:
2275 try:
2276 broadcast = _common.broadcast_addr(nt)
2277 except Exception as err: # noqa: BLE001
2278 debug(err)
2279 else:
2280 if broadcast is not None:
2281 nt._replace(broadcast=broadcast)
2282
2283 ret[name].append(nt)
2284
2285 return dict(ret)
2286
2287
2288def net_if_stats():
2289 """Return information about each NIC (network interface card)
2290 installed on the system as a dictionary whose keys are the
2291 NIC names and value is a namedtuple with the following fields:
2292
2293 - isup: whether the interface is up (bool)
2294 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2295 NIC_DUPLEX_UNKNOWN
2296 - speed: the NIC speed expressed in mega bits (MB); if it can't
2297 be determined (e.g. 'localhost') it will be set to 0.
2298 - mtu: the maximum transmission unit expressed in bytes.
2299 """
2300 return _psplatform.net_if_stats()
2301
2302
2303# =====================================================================
2304# --- sensors
2305# =====================================================================
2306
2307
2308# Linux, macOS
2309if hasattr(_psplatform, "sensors_temperatures"):
2310
2311 def sensors_temperatures(fahrenheit=False):
2312 """Return hardware temperatures. Each entry is a namedtuple
2313 representing a certain hardware sensor (it may be a CPU, an
2314 hard disk or something else, depending on the OS and its
2315 configuration).
2316 All temperatures are expressed in celsius unless *fahrenheit*
2317 is set to True.
2318 """
2319
2320 def convert(n):
2321 if n is not None:
2322 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2323
2324 ret = collections.defaultdict(list)
2325 rawdict = _psplatform.sensors_temperatures()
2326
2327 for name, values in rawdict.items():
2328 while values:
2329 label, current, high, critical = values.pop(0)
2330 current = convert(current)
2331 high = convert(high)
2332 critical = convert(critical)
2333
2334 if high and not critical:
2335 critical = high
2336 elif critical and not high:
2337 high = critical
2338
2339 ret[name].append(_ntp.shwtemp(label, current, high, critical))
2340
2341 return dict(ret)
2342
2343 __all__.append("sensors_temperatures")
2344
2345
2346# Linux
2347if hasattr(_psplatform, "sensors_fans"):
2348
2349 def sensors_fans():
2350 """Return fans speed. Each entry is a namedtuple
2351 representing a certain hardware sensor.
2352 All speed are expressed in RPM (rounds per minute).
2353 """
2354 return _psplatform.sensors_fans()
2355
2356 __all__.append("sensors_fans")
2357
2358
2359# Linux, Windows, FreeBSD, macOS
2360if hasattr(_psplatform, "sensors_battery"):
2361
2362 def sensors_battery():
2363 """Return battery information. If no battery is installed
2364 returns None.
2365
2366 - percent: battery power left as a percentage.
2367 - secsleft: a rough approximation of how many seconds are left
2368 before the battery runs out of power. May be
2369 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2370 - power_plugged: True if the AC power cable is connected.
2371 """
2372 return _psplatform.sensors_battery()
2373
2374 __all__.append("sensors_battery")
2375
2376
2377# =====================================================================
2378# --- other system related functions
2379# =====================================================================
2380
2381
2382def boot_time():
2383 """Return the system boot time expressed in seconds since the epoch
2384 (seconds since January 1, 1970, at midnight UTC). The returned
2385 value is based on the system clock, which means it may be affected
2386 by changes such as manual adjustments or time synchronization (e.g.
2387 NTP).
2388 """
2389 return _psplatform.boot_time()
2390
2391
2392def users():
2393 """Return users currently connected on the system as a list of
2394 namedtuples including the following fields.
2395
2396 - user: the name of the user
2397 - terminal: the tty or pseudo-tty associated with the user, if any.
2398 - host: the host name associated with the entry, if any.
2399 - started: the creation time as a floating point number expressed in
2400 seconds since the epoch.
2401 """
2402 return _psplatform.users()
2403
2404
2405# =====================================================================
2406# --- Windows services
2407# =====================================================================
2408
2409
2410if WINDOWS:
2411
2412 def win_service_iter():
2413 """Return a generator yielding a WindowsService instance for all
2414 Windows services installed.
2415 """
2416 return _psplatform.win_service_iter()
2417
2418 def win_service_get(name):
2419 """Get a Windows service by *name*.
2420 Raise NoSuchProcess if no service with such name exists.
2421 """
2422 return _psplatform.win_service_get(name)
2423
2424
2425# =====================================================================
2426# --- malloc / heap
2427# =====================================================================
2428
2429
2430# Linux + glibc, Windows, macOS, FreeBSD, NetBSD
2431if hasattr(_psplatform, "heap_info"):
2432
2433 def heap_info():
2434 """Return low-level heap statistics from the C heap allocator
2435 (glibc).
2436
2437 - `heap_used`: the total number of bytes allocated via
2438 malloc/free. These are typically allocations smaller than
2439 MMAP_THRESHOLD.
2440
2441 - `mmap_used`: the total number of bytes allocated via `mmap()`
2442 or via large ``malloc()`` allocations.
2443
2444 - `heap_count` (Windows only): number of private heaps created
2445 via `HeapCreate()`.
2446 """
2447 return _ntp.pheap(*_psplatform.heap_info())
2448
2449 def heap_trim():
2450 """Request that the underlying allocator free any unused memory
2451 it's holding in the heap (typically small `malloc()`
2452 allocations).
2453
2454 In practice, modern allocators rarely comply, so this is not a
2455 general-purpose memory-reduction tool and won't meaningfully
2456 shrink RSS in real programs. Its primary value is in **leak
2457 detection tools**.
2458
2459 Calling `heap_trim()` before taking measurements helps reduce
2460 allocator noise, giving you a cleaner baseline so that changes
2461 in `heap_used` come from the code you're testing, not from
2462 internal allocator caching or fragmentation. Its effectiveness
2463 depends on allocator behavior and fragmentation patterns.
2464 """
2465 _psplatform.heap_trim()
2466
2467 __all__.append("heap_info")
2468 __all__.append("heap_trim")
2469
2470
2471# =====================================================================
2472
2473
2474def _set_debug(value):
2475 """Enable or disable PSUTIL_DEBUG option, which prints debugging
2476 messages to stderr.
2477 """
2478 import psutil._common
2479
2480 psutil._common.PSUTIL_DEBUG = bool(value)
2481 _psplatform.cext.set_debug(bool(value))
2482
2483
2484del memoize_when_activated