1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""psutil is a cross-platform library for retrieving information on
6running processes and system utilization (CPU, memory, disks, network,
7sensors) in Python. Supported platforms:
8
9 - Linux
10 - Windows
11 - macOS
12 - FreeBSD
13 - OpenBSD
14 - NetBSD
15 - Sun Solaris
16 - AIX
17
18Supported Python versions are cPython 3.6+ and PyPy.
19"""
20
21import collections
22import contextlib
23import datetime
24import functools
25import os
26import signal
27import socket
28import subprocess
29import sys
30import threading
31import time
32
33try:
34 import pwd
35except ImportError:
36 pwd = None
37
38from . import _common
39from . import _ntuples as _ntp
40from ._common import AIX
41from ._common import BSD
42from ._common import CONN_CLOSE
43from ._common import CONN_CLOSE_WAIT
44from ._common import CONN_CLOSING
45from ._common import CONN_ESTABLISHED
46from ._common import CONN_FIN_WAIT1
47from ._common import CONN_FIN_WAIT2
48from ._common import CONN_LAST_ACK
49from ._common import CONN_LISTEN
50from ._common import CONN_NONE
51from ._common import CONN_SYN_RECV
52from ._common import CONN_SYN_SENT
53from ._common import CONN_TIME_WAIT
54from ._common import FREEBSD
55from ._common import LINUX
56from ._common import MACOS
57from ._common import NETBSD
58from ._common import NIC_DUPLEX_FULL
59from ._common import NIC_DUPLEX_HALF
60from ._common import NIC_DUPLEX_UNKNOWN
61from ._common import OPENBSD
62from ._common import OSX # deprecated alias
63from ._common import POSIX
64from ._common import POWER_TIME_UNKNOWN
65from ._common import POWER_TIME_UNLIMITED
66from ._common import STATUS_DEAD
67from ._common import STATUS_DISK_SLEEP
68from ._common import STATUS_IDLE
69from ._common import STATUS_LOCKED
70from ._common import STATUS_PARKED
71from ._common import STATUS_RUNNING
72from ._common import STATUS_SLEEPING
73from ._common import STATUS_STOPPED
74from ._common import STATUS_TRACING_STOP
75from ._common import STATUS_WAITING
76from ._common import STATUS_WAKING
77from ._common import STATUS_ZOMBIE
78from ._common import SUNOS
79from ._common import WINDOWS
80from ._common import AccessDenied
81from ._common import Error
82from ._common import NoSuchProcess
83from ._common import TimeoutExpired
84from ._common import ZombieProcess
85from ._common import debug
86from ._common import memoize_when_activated
87from ._common import wrap_numbers as _wrap_numbers
88
89if LINUX:
90 # This is public API and it will be retrieved from _pslinux.py
91 # via sys.modules.
92 PROCFS_PATH = "/proc"
93
94 from . import _pslinux as _psplatform
95 from ._pslinux import IOPRIO_CLASS_BE # noqa: F401
96 from ._pslinux import IOPRIO_CLASS_IDLE # noqa: F401
97 from ._pslinux import IOPRIO_CLASS_NONE # noqa: F401
98 from ._pslinux import IOPRIO_CLASS_RT # noqa: F401
99
100elif WINDOWS:
101 from . import _pswindows as _psplatform
102 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # noqa: F401
103 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # noqa: F401
104 from ._psutil_windows import HIGH_PRIORITY_CLASS # noqa: F401
105 from ._psutil_windows import IDLE_PRIORITY_CLASS # noqa: F401
106 from ._psutil_windows import NORMAL_PRIORITY_CLASS # noqa: F401
107 from ._psutil_windows import REALTIME_PRIORITY_CLASS # noqa: F401
108 from ._pswindows import CONN_DELETE_TCB # noqa: F401
109 from ._pswindows import IOPRIO_HIGH # noqa: F401
110 from ._pswindows import IOPRIO_LOW # noqa: F401
111 from ._pswindows import IOPRIO_NORMAL # noqa: F401
112 from ._pswindows import IOPRIO_VERYLOW # noqa: F401
113
114elif MACOS:
115 from . import _psosx as _psplatform
116
117elif BSD:
118 from . import _psbsd as _psplatform
119
120elif SUNOS:
121 from . import _pssunos as _psplatform
122 from ._pssunos import CONN_BOUND # noqa: F401
123 from ._pssunos import CONN_IDLE # noqa: F401
124
125 # This is public writable API which is read from _pslinux.py and
126 # _pssunos.py via sys.modules.
127 PROCFS_PATH = "/proc"
128
129elif AIX:
130 from . import _psaix as _psplatform
131
132 # This is public API and it will be retrieved from _pslinux.py
133 # via sys.modules.
134 PROCFS_PATH = "/proc"
135
136else: # pragma: no cover
137 msg = f"platform {sys.platform} is not supported"
138 raise NotImplementedError(msg)
139
140
141# fmt: off
142__all__ = [
143 # exceptions
144 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
145 "TimeoutExpired",
146
147 # constants
148 "version_info", "__version__",
149
150 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
151 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
152 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_PARKED",
153
154 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
155 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
156 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
157 # "CONN_IDLE", "CONN_BOUND",
158
159 "AF_LINK",
160
161 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
162
163 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
164
165 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
166 "SUNOS", "WINDOWS", "AIX",
167
168 # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
169 # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
170 # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
171 # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
172
173 # classes
174 "Process", "Popen",
175
176 # functions
177 "pid_exists", "pids", "process_iter", "wait_procs", # proc
178 "virtual_memory", "swap_memory", # memory
179 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
180 "cpu_stats", # "cpu_freq", "getloadavg"
181 "net_io_counters", "net_connections", "net_if_addrs", # network
182 "net_if_stats",
183 "disk_io_counters", "disk_partitions", "disk_usage", # disk
184 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
185 "users", "boot_time", # others
186]
187# fmt: on
188
189
190__all__.extend(_psplatform.__extra__all__)
191
192# Linux, FreeBSD
193if hasattr(_psplatform.Process, "rlimit"):
194 # Populate global namespace with RLIM* constants.
195 _globals = globals()
196 _name = None
197 for _name in dir(_psplatform.cext):
198 if _name.startswith('RLIM') and _name.isupper():
199 _globals[_name] = getattr(_psplatform.cext, _name)
200 __all__.append(_name)
201 del _globals, _name
202
203AF_LINK = _psplatform.AF_LINK
204
205__author__ = "Giampaolo Rodola'"
206__version__ = "7.2.3"
207version_info = tuple(int(num) for num in __version__.split('.'))
208
209_timer = getattr(time, 'monotonic', time.time)
210_TOTAL_PHYMEM = None
211_LOWEST_PID = None
212_SENTINEL = object()
213
214# Sanity check in case the user messed up with psutil installation
215# or did something weird with sys.path. In this case we might end
216# up importing a python module using a C extension module which
217# was compiled for a different version of psutil.
218# We want to prevent that by failing sooner rather than later.
219# See: https://github.com/giampaolo/psutil/issues/564
220if int(__version__.replace('.', '')) != getattr(
221 _psplatform.cext, 'version', None
222):
223 msg = f"version conflict: {_psplatform.cext.__file__!r} C extension "
224 msg += "module was built for another version of psutil"
225 if hasattr(_psplatform.cext, 'version'):
226 v = ".".join(list(str(_psplatform.cext.version)))
227 msg += f" ({v} instead of {__version__})"
228 else:
229 msg += f" (different than {__version__})"
230 what = getattr(
231 _psplatform.cext,
232 "__file__",
233 "the existing psutil install directory",
234 )
235 msg += f"; you may try to 'pip uninstall psutil', manually remove {what}"
236 msg += " or clean the virtual env somehow, then reinstall"
237 raise ImportError(msg)
238
239
240# =====================================================================
241# --- Utils
242# =====================================================================
243
244
245if hasattr(_psplatform, 'ppid_map'):
246 # Faster version (Windows and Linux).
247 _ppid_map = _psplatform.ppid_map
248else: # pragma: no cover
249
250 def _ppid_map():
251 """Return a {pid: ppid, ...} dict for all running processes in
252 one shot. Used to speed up Process.children().
253 """
254 ret = {}
255 for pid in pids():
256 try:
257 ret[pid] = _psplatform.Process(pid).ppid()
258 except (NoSuchProcess, ZombieProcess):
259 pass
260 return ret
261
262
263def _pprint_secs(secs):
264 """Format seconds in a human readable form."""
265 now = time.time()
266 secs_ago = int(now - secs)
267 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S"
268 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
269
270
271def _check_conn_kind(kind):
272 """Check net_connections()'s `kind` parameter."""
273 kinds = tuple(_common.conn_tmap)
274 if kind not in kinds:
275 msg = f"invalid kind argument {kind!r}; valid ones are: {kinds}"
276 raise ValueError(msg)
277
278
279# =====================================================================
280# --- Process class
281# =====================================================================
282
283
284class Process:
285 """Represents an OS process with the given PID.
286 If PID is omitted current process PID (os.getpid()) is used.
287 Raise NoSuchProcess if PID does not exist.
288
289 Note that most of the methods of this class do not make sure that
290 the PID of the process being queried has been reused. That means
291 that you may end up retrieving information for another process.
292
293 The only exceptions for which process identity is pre-emptively
294 checked and guaranteed are:
295
296 - parent()
297 - children()
298 - nice() (set)
299 - ionice() (set)
300 - rlimit() (set)
301 - cpu_affinity (set)
302 - suspend()
303 - resume()
304 - send_signal()
305 - terminate()
306 - kill()
307
308 To prevent this problem for all other methods you can use
309 is_running() before querying the process.
310 """
311
312 def __init__(self, pid=None):
313 self._init(pid)
314
315 def _init(self, pid, _ignore_nsp=False):
316 if pid is None:
317 pid = os.getpid()
318 else:
319 if pid < 0:
320 msg = f"pid must be a positive integer (got {pid})"
321 raise ValueError(msg)
322 try:
323 _psplatform.cext.check_pid_range(pid)
324 except OverflowError as err:
325 msg = "process PID out of range"
326 raise NoSuchProcess(pid, msg=msg) from err
327
328 self._pid = pid
329 self._name = None
330 self._exe = None
331 self._create_time = None
332 self._gone = False
333 self._pid_reused = False
334 self._hash = None
335 self._lock = threading.RLock()
336 # used for caching on Windows only (on POSIX ppid may change)
337 self._ppid = None
338 # platform-specific modules define an _psplatform.Process
339 # implementation class
340 self._proc = _psplatform.Process(pid)
341 self._last_sys_cpu_times = None
342 self._last_proc_cpu_times = None
343 self._exitcode = _SENTINEL
344 self._ident = (self.pid, None)
345 try:
346 self._ident = self._get_ident()
347 except AccessDenied:
348 # This should happen on Windows only, since we use the fast
349 # create time method. AFAIK, on all other platforms we are
350 # able to get create time for all PIDs.
351 pass
352 except ZombieProcess:
353 # Zombies can still be queried by this class (although
354 # not always) and pids() return them so just go on.
355 pass
356 except NoSuchProcess:
357 if not _ignore_nsp:
358 msg = "process PID not found"
359 raise NoSuchProcess(pid, msg=msg) from None
360 self._gone = True
361
362 def _get_ident(self):
363 """Return a (pid, uid) tuple which is supposed to identify a
364 Process instance univocally over time. The PID alone is not
365 enough, as it can be assigned to a new process after this one
366 terminates, so we add process creation time to the mix. We need
367 this in order to prevent killing the wrong process later on.
368 This is also known as PID reuse or PID recycling problem.
369
370 The reliability of this strategy mostly depends on
371 create_time() precision, which is 0.01 secs on Linux. The
372 assumption is that, after a process terminates, the kernel
373 won't reuse the same PID after such a short period of time
374 (0.01 secs). Technically this is inherently racy, but
375 practically it should be good enough.
376
377 NOTE: unreliable on FreeBSD and OpenBSD as ctime is subject to
378 system clock updates.
379 """
380
381 if WINDOWS:
382 # Use create_time() fast method in order to speedup
383 # `process_iter()`. This means we'll get AccessDenied for
384 # most ADMIN processes, but that's fine since it means
385 # we'll also get AccessDenied on kill().
386 # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555
387 self._create_time = self._proc.create_time(fast_only=True)
388 return (self.pid, self._create_time)
389 elif LINUX or NETBSD or OSX:
390 # Use 'monotonic' process starttime since boot to form unique
391 # process identity, since it is stable over changes to system
392 # time.
393 return (self.pid, self._proc.create_time(monotonic=True))
394 else:
395 return (self.pid, self.create_time())
396
397 def __str__(self):
398 info = collections.OrderedDict()
399 info["pid"] = self.pid
400 if self._name:
401 info['name'] = self._name
402 with self.oneshot():
403 if self._pid_reused:
404 info["status"] = "terminated + PID reused"
405 else:
406 try:
407 info["name"] = self.name()
408 info["status"] = self.status()
409 except ZombieProcess:
410 info["status"] = "zombie"
411 except NoSuchProcess:
412 info["status"] = "terminated"
413 except AccessDenied:
414 pass
415
416 if self._exitcode not in {_SENTINEL, None}:
417 info["exitcode"] = self._exitcode
418 if self._create_time is not None:
419 info['started'] = _pprint_secs(self._create_time)
420
421 return "{}.{}({})".format(
422 self.__class__.__module__,
423 self.__class__.__name__,
424 ", ".join([f"{k}={v!r}" for k, v in info.items()]),
425 )
426
427 __repr__ = __str__
428
429 def __eq__(self, other):
430 # Test for equality with another Process object based
431 # on PID and creation time.
432 if not isinstance(other, Process):
433 return NotImplemented
434 if OPENBSD or NETBSD or SUNOS: # pragma: no cover
435 # Zombie processes on Open/NetBSD/illumos/Solaris have a
436 # creation time of 0.0. This covers the case when a process
437 # started normally (so it has a ctime), then it turned into a
438 # zombie. It's important to do this because is_running()
439 # depends on __eq__.
440 pid1, ident1 = self._ident
441 pid2, ident2 = other._ident
442 if pid1 == pid2:
443 if ident1 and not ident2:
444 try:
445 return self.status() == STATUS_ZOMBIE
446 except Error:
447 pass
448 return self._ident == other._ident
449
450 def __ne__(self, other):
451 return not self == other
452
453 def __hash__(self):
454 if self._hash is None:
455 self._hash = hash(self._ident)
456 return self._hash
457
458 def _raise_if_pid_reused(self):
459 """Raises NoSuchProcess in case process PID has been reused."""
460 if self._pid_reused or (not self.is_running() and self._pid_reused):
461 # We may directly raise NSP in here already if PID is just
462 # not running, but I prefer NSP to be raised naturally by
463 # the actual Process API call. This way unit tests will tell
464 # us if the API is broken (aka don't raise NSP when it
465 # should). We also remain consistent with all other "get"
466 # APIs which don't use _raise_if_pid_reused().
467 msg = "process no longer exists and its PID has been reused"
468 raise NoSuchProcess(self.pid, self._name, msg=msg)
469
470 @property
471 def pid(self):
472 """The process PID."""
473 return self._pid
474
475 # --- utility methods
476
477 @contextlib.contextmanager
478 def oneshot(self):
479 """Utility context manager which considerably speeds up the
480 retrieval of multiple process information at the same time.
481
482 Internally different process info (e.g. name, ppid, uids,
483 gids, ...) may be fetched by using the same routine, but
484 only one information is returned and the others are discarded.
485 When using this context manager the internal routine is
486 executed once (in the example below on name()) and the
487 other info are cached.
488
489 The cache is cleared when exiting the context manager block.
490 The advice is to use this every time you retrieve more than
491 one information about the process. If you're lucky, you'll
492 get a hell of a speedup.
493
494 >>> import psutil
495 >>> p = psutil.Process()
496 >>> with p.oneshot():
497 ... p.name() # collect multiple info
498 ... p.cpu_times() # return cached value
499 ... p.cpu_percent() # return cached value
500 ... p.create_time() # return cached value
501 ...
502 >>>
503 """
504 with self._lock:
505 if hasattr(self, "_cache"):
506 # NOOP: this covers the use case where the user enters the
507 # context twice:
508 #
509 # >>> with p.oneshot():
510 # ... with p.oneshot():
511 # ...
512 #
513 # Also, since as_dict() internally uses oneshot()
514 # I expect that the code below will be a pretty common
515 # "mistake" that the user will make, so let's guard
516 # against that:
517 #
518 # >>> with p.oneshot():
519 # ... p.as_dict()
520 # ...
521 yield
522 else:
523 try:
524 # cached in case cpu_percent() is used
525 self.cpu_times.cache_activate(self)
526 # cached in case memory_percent() is used
527 self.memory_info.cache_activate(self)
528 # cached in case parent() is used
529 self.ppid.cache_activate(self)
530 # cached in case username() is used
531 if POSIX:
532 self.uids.cache_activate(self)
533 # specific implementation cache
534 self._proc.oneshot_enter()
535 yield
536 finally:
537 self.cpu_times.cache_deactivate(self)
538 self.memory_info.cache_deactivate(self)
539 self.ppid.cache_deactivate(self)
540 if POSIX:
541 self.uids.cache_deactivate(self)
542 self._proc.oneshot_exit()
543
544 def as_dict(self, attrs=None, ad_value=None):
545 """Utility method returning process information as a
546 hashable dictionary.
547 If *attrs* is specified it must be a list of strings
548 reflecting available Process class' attribute names
549 (e.g. ['cpu_times', 'name']) else all public (read
550 only) attributes are assumed.
551 *ad_value* is the value which gets assigned in case
552 AccessDenied or ZombieProcess exception is raised when
553 retrieving that particular process information.
554 """
555 valid_names = _as_dict_attrnames
556 if attrs is not None:
557 if not isinstance(attrs, (list, tuple, set, frozenset)):
558 msg = f"invalid attrs type {type(attrs)}"
559 raise TypeError(msg)
560 attrs = set(attrs)
561 invalid_names = attrs - valid_names
562 if invalid_names:
563 msg = "invalid attr name{} {}".format(
564 "s" if len(invalid_names) > 1 else "",
565 ", ".join(map(repr, invalid_names)),
566 )
567 raise ValueError(msg)
568
569 retdict = {}
570 ls = attrs or valid_names
571 with self.oneshot():
572 for name in ls:
573 try:
574 if name == 'pid':
575 ret = self.pid
576 else:
577 meth = getattr(self, name)
578 ret = meth()
579 except (AccessDenied, ZombieProcess):
580 ret = ad_value
581 except NotImplementedError:
582 # in case of not implemented functionality (may happen
583 # on old or exotic systems) we want to crash only if
584 # the user explicitly asked for that particular attr
585 if attrs:
586 raise
587 continue
588 retdict[name] = ret
589 return retdict
590
591 def parent(self):
592 """Return the parent process as a Process object pre-emptively
593 checking whether PID has been reused.
594 If no parent is known return None.
595 """
596 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
597 if self.pid == lowest_pid:
598 return None
599 ppid = self.ppid()
600 if ppid is not None:
601 # Get a fresh (non-cached) ctime in case the system clock
602 # was updated. TODO: use a monotonic ctime on platforms
603 # where it's supported.
604 proc_ctime = Process(self.pid).create_time()
605 try:
606 parent = Process(ppid)
607 if parent.create_time() <= proc_ctime:
608 return parent
609 # ...else ppid has been reused by another process
610 except NoSuchProcess:
611 pass
612
613 def parents(self):
614 """Return the parents of this process as a list of Process
615 instances. If no parents are known return an empty list.
616 """
617 parents = []
618 proc = self.parent()
619 while proc is not None:
620 parents.append(proc)
621 proc = proc.parent()
622 return parents
623
624 def is_running(self):
625 """Return whether this process is running.
626
627 It also checks if PID has been reused by another process, in
628 which case it will remove the process from `process_iter()`
629 internal cache and return False.
630 """
631 if self._gone or self._pid_reused:
632 return False
633 try:
634 # Checking if PID is alive is not enough as the PID might
635 # have been reused by another process. Process identity /
636 # uniqueness over time is guaranteed by (PID + creation
637 # time) and that is verified in __eq__.
638 self._pid_reused = self != Process(self.pid)
639 if self._pid_reused:
640 _pids_reused.add(self.pid)
641 raise NoSuchProcess(self.pid)
642 return True
643 except ZombieProcess:
644 # We should never get here as it's already handled in
645 # Process.__init__; here just for extra safety.
646 return True
647 except NoSuchProcess:
648 self._gone = True
649 return False
650
651 # --- actual API
652
653 @memoize_when_activated
654 def ppid(self):
655 """The process parent PID.
656 On Windows the return value is cached after first call.
657 """
658 # On POSIX we don't want to cache the ppid as it may unexpectedly
659 # change to 1 (init) in case this process turns into a zombie:
660 # https://github.com/giampaolo/psutil/issues/321
661 # http://stackoverflow.com/questions/356722/
662
663 # XXX should we check creation time here rather than in
664 # Process.parent()?
665 self._raise_if_pid_reused()
666 if POSIX:
667 return self._proc.ppid()
668 else: # pragma: no cover
669 self._ppid = self._ppid or self._proc.ppid()
670 return self._ppid
671
672 def name(self):
673 """The process name. The return value is cached after first call."""
674 # Process name is only cached on Windows as on POSIX it may
675 # change, see:
676 # https://github.com/giampaolo/psutil/issues/692
677 if WINDOWS and self._name is not None:
678 return self._name
679 name = self._proc.name()
680 if POSIX and len(name) >= 15:
681 # On UNIX the name gets truncated to the first 15 characters.
682 # If it matches the first part of the cmdline we return that
683 # one instead because it's usually more explicative.
684 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
685 try:
686 cmdline = self.cmdline()
687 except (AccessDenied, ZombieProcess):
688 # Just pass and return the truncated name: it's better
689 # than nothing. Note: there are actual cases where a
690 # zombie process can return a name() but not a
691 # cmdline(), see:
692 # https://github.com/giampaolo/psutil/issues/2239
693 pass
694 else:
695 if cmdline:
696 extended_name = os.path.basename(cmdline[0])
697 if extended_name.startswith(name):
698 name = extended_name
699 self._name = name
700 self._proc._name = name
701 return name
702
703 def exe(self):
704 """The process executable as an absolute path.
705 May also be an empty string.
706 The return value is cached after first call.
707 """
708
709 def guess_it(fallback):
710 # try to guess exe from cmdline[0] in absence of a native
711 # exe representation
712 cmdline = self.cmdline()
713 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
714 exe = cmdline[0] # the possible exe
715 # Attempt to guess only in case of an absolute path.
716 # It is not safe otherwise as the process might have
717 # changed cwd.
718 if (
719 os.path.isabs(exe)
720 and os.path.isfile(exe)
721 and os.access(exe, os.X_OK)
722 ):
723 return exe
724 if isinstance(fallback, AccessDenied):
725 raise fallback
726 return fallback
727
728 if self._exe is None:
729 try:
730 exe = self._proc.exe()
731 except AccessDenied as err:
732 return guess_it(fallback=err)
733 else:
734 if not exe:
735 # underlying implementation can legitimately return an
736 # empty string; if that's the case we don't want to
737 # raise AD while guessing from the cmdline
738 try:
739 exe = guess_it(fallback=exe)
740 except AccessDenied:
741 pass
742 self._exe = exe
743 return self._exe
744
745 def cmdline(self):
746 """The command line this process has been called with."""
747 return self._proc.cmdline()
748
749 def status(self):
750 """The process current status as a STATUS_* constant."""
751 try:
752 return self._proc.status()
753 except ZombieProcess:
754 return STATUS_ZOMBIE
755
756 def username(self):
757 """The name of the user that owns the process.
758 On UNIX this is calculated by using *real* process uid.
759 """
760 if POSIX:
761 if pwd is None:
762 # might happen if python was installed from sources
763 msg = "requires pwd module shipped with standard python"
764 raise ImportError(msg)
765 real_uid = self.uids().real
766 try:
767 return pwd.getpwuid(real_uid).pw_name
768 except KeyError:
769 # the uid can't be resolved by the system
770 return str(real_uid)
771 else:
772 return self._proc.username()
773
774 def create_time(self):
775 """The process creation time as a floating point number
776 expressed in seconds since the epoch (seconds since January 1,
777 1970, at midnight UTC). The return value, which is cached after
778 first call, is based on the system clock, which means it may be
779 affected by changes such as manual adjustments or time
780 synchronization (e.g. NTP).
781 """
782 if self._create_time is None:
783 self._create_time = self._proc.create_time()
784 return self._create_time
785
786 def cwd(self):
787 """Process current working directory as an absolute path."""
788 return self._proc.cwd()
789
790 def nice(self, value=None):
791 """Get or set process niceness (priority)."""
792 if value is None:
793 return self._proc.nice_get()
794 else:
795 self._raise_if_pid_reused()
796 self._proc.nice_set(value)
797
798 if POSIX:
799
800 @memoize_when_activated
801 def uids(self):
802 """Return process UIDs as a (real, effective, saved)
803 namedtuple.
804 """
805 return self._proc.uids()
806
807 def gids(self):
808 """Return process GIDs as a (real, effective, saved)
809 namedtuple.
810 """
811 return self._proc.gids()
812
813 def terminal(self):
814 """The terminal associated with this process, if any,
815 else None.
816 """
817 return self._proc.terminal()
818
819 def num_fds(self):
820 """Return the number of file descriptors opened by this
821 process (POSIX only).
822 """
823 return self._proc.num_fds()
824
825 # Linux, BSD, AIX and Windows only
826 if hasattr(_psplatform.Process, "io_counters"):
827
828 def io_counters(self):
829 """Return process I/O statistics as a
830 (read_count, write_count, read_bytes, write_bytes)
831 namedtuple.
832 Those are the number of read/write calls performed and the
833 amount of bytes read and written by the process.
834 """
835 return self._proc.io_counters()
836
837 # Linux and Windows
838 if hasattr(_psplatform.Process, "ionice_get"):
839
840 def ionice(self, ioclass=None, value=None):
841 """Get or set process I/O niceness (priority).
842
843 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
844 *value* is a number which goes from 0 to 7. The higher the
845 value, the lower the I/O priority of the process.
846
847 On Windows only *ioclass* is used and it can be set to 2
848 (normal), 1 (low) or 0 (very low).
849
850 Available on Linux and Windows > Vista only.
851 """
852 if ioclass is None:
853 if value is not None:
854 msg = "'ioclass' argument must be specified"
855 raise ValueError(msg)
856 return self._proc.ionice_get()
857 else:
858 self._raise_if_pid_reused()
859 return self._proc.ionice_set(ioclass, value)
860
861 # Linux / FreeBSD only
862 if hasattr(_psplatform.Process, "rlimit"):
863
864 def rlimit(self, resource, limits=None):
865 """Get or set process resource limits as a (soft, hard)
866 tuple.
867
868 *resource* is one of the RLIMIT_* constants.
869 *limits* is supposed to be a (soft, hard) tuple.
870
871 See "man prlimit" for further info.
872 Available on Linux and FreeBSD only.
873 """
874 if limits is not None:
875 self._raise_if_pid_reused()
876 return self._proc.rlimit(resource, limits)
877
878 # Windows, Linux and FreeBSD only
879 if hasattr(_psplatform.Process, "cpu_affinity_get"):
880
881 def cpu_affinity(self, cpus=None):
882 """Get or set process CPU affinity.
883 If specified, *cpus* must be a list of CPUs for which you
884 want to set the affinity (e.g. [0, 1]).
885 If an empty list is passed, all egible CPUs are assumed
886 (and set).
887 (Windows, Linux and BSD only).
888 """
889 if cpus is None:
890 return sorted(set(self._proc.cpu_affinity_get()))
891 else:
892 self._raise_if_pid_reused()
893 if not cpus:
894 if hasattr(self._proc, "_get_eligible_cpus"):
895 cpus = self._proc._get_eligible_cpus()
896 else:
897 cpus = tuple(range(len(cpu_times(percpu=True))))
898 self._proc.cpu_affinity_set(list(set(cpus)))
899
900 # Linux, FreeBSD, SunOS
901 if hasattr(_psplatform.Process, "cpu_num"):
902
903 def cpu_num(self):
904 """Return what CPU this process is currently running on.
905 The returned number should be <= psutil.cpu_count()
906 and <= len(psutil.cpu_percent(percpu=True)).
907 It may be used in conjunction with
908 psutil.cpu_percent(percpu=True) to observe the system
909 workload distributed across CPUs.
910 """
911 return self._proc.cpu_num()
912
913 # All platforms has it, but maybe not in the future.
914 if hasattr(_psplatform.Process, "environ"):
915
916 def environ(self):
917 """The environment variables of the process as a dict. Note: this
918 might not reflect changes made after the process started.
919 """
920 return self._proc.environ()
921
922 if WINDOWS:
923
924 def num_handles(self):
925 """Return the number of handles opened by this process
926 (Windows only).
927 """
928 return self._proc.num_handles()
929
930 def num_ctx_switches(self):
931 """Return the number of voluntary and involuntary context
932 switches performed by this process.
933 """
934 return self._proc.num_ctx_switches()
935
936 def num_threads(self):
937 """Return the number of threads used by this process."""
938 return self._proc.num_threads()
939
940 if hasattr(_psplatform.Process, "threads"):
941
942 def threads(self):
943 """Return threads opened by process as a list of
944 (id, user_time, system_time) namedtuples representing
945 thread id and thread CPU times (user/system).
946 On OpenBSD this method requires root access.
947 """
948 return self._proc.threads()
949
950 def children(self, recursive=False):
951 """Return the children of this process as a list of Process
952 instances, pre-emptively checking whether PID has been reused.
953 If *recursive* is True return all the parent descendants.
954
955 Example (A == this process):
956
957 A ─┐
958 │
959 ├─ B (child) ─┐
960 │ └─ X (grandchild) ─┐
961 │ └─ Y (great grandchild)
962 ├─ C (child)
963 └─ D (child)
964
965 >>> import psutil
966 >>> p = psutil.Process()
967 >>> p.children()
968 B, C, D
969 >>> p.children(recursive=True)
970 B, X, Y, C, D
971
972 Note that in the example above if process X disappears
973 process Y won't be listed as the reference to process A
974 is lost.
975 """
976 self._raise_if_pid_reused()
977 ppid_map = _ppid_map()
978 # Get a fresh (non-cached) ctime in case the system clock was
979 # updated. TODO: use a monotonic ctime on platforms where it's
980 # supported.
981 proc_ctime = Process(self.pid).create_time()
982 ret = []
983 if not recursive:
984 for pid, ppid in ppid_map.items():
985 if ppid == self.pid:
986 try:
987 child = Process(pid)
988 # if child happens to be older than its parent
989 # (self) it means child's PID has been reused
990 if proc_ctime <= child.create_time():
991 ret.append(child)
992 except (NoSuchProcess, ZombieProcess):
993 pass
994 else:
995 # Construct a {pid: [child pids]} dict
996 reverse_ppid_map = collections.defaultdict(list)
997 for pid, ppid in ppid_map.items():
998 reverse_ppid_map[ppid].append(pid)
999 # Recursively traverse that dict, starting from self.pid,
1000 # such that we only call Process() on actual children
1001 seen = set()
1002 stack = [self.pid]
1003 while stack:
1004 pid = stack.pop()
1005 if pid in seen:
1006 # Since pids can be reused while the ppid_map is
1007 # constructed, there may be rare instances where
1008 # there's a cycle in the recorded process "tree".
1009 continue
1010 seen.add(pid)
1011 for child_pid in reverse_ppid_map[pid]:
1012 try:
1013 child = Process(child_pid)
1014 # if child happens to be older than its parent
1015 # (self) it means child's PID has been reused
1016 intime = proc_ctime <= child.create_time()
1017 if intime:
1018 ret.append(child)
1019 stack.append(child_pid)
1020 except (NoSuchProcess, ZombieProcess):
1021 pass
1022 return ret
1023
1024 def cpu_percent(self, interval=None):
1025 """Return a float representing the current process CPU
1026 utilization as a percentage.
1027
1028 When *interval* is 0.0 or None (default) compares process times
1029 to system CPU times elapsed since last call, returning
1030 immediately (non-blocking). That means that the first time
1031 this is called it will return a meaningful 0.0 value.
1032
1033 When *interval* is > 0.0 compares process times to system CPU
1034 times elapsed before and after the interval (blocking).
1035
1036 In this case is recommended for accuracy that this function
1037 be called with at least 0.1 seconds between calls.
1038
1039 A value > 100.0 can be returned in case of processes running
1040 multiple threads on different CPU cores.
1041
1042 The returned value is explicitly NOT split evenly between
1043 all available logical CPUs. This means that a busy loop process
1044 running on a system with 2 logical CPUs will be reported as
1045 having 100% CPU utilization instead of 50%.
1046
1047 Examples:
1048
1049 >>> import psutil
1050 >>> p = psutil.Process(os.getpid())
1051 >>> # blocking
1052 >>> p.cpu_percent(interval=1)
1053 2.0
1054 >>> # non-blocking (percentage since last call)
1055 >>> p.cpu_percent(interval=None)
1056 2.9
1057 >>>
1058 """
1059 blocking = interval is not None and interval > 0.0
1060 if interval is not None and interval < 0:
1061 msg = f"interval is not positive (got {interval!r})"
1062 raise ValueError(msg)
1063 num_cpus = cpu_count() or 1
1064
1065 def timer():
1066 return _timer() * num_cpus
1067
1068 if blocking:
1069 st1 = timer()
1070 pt1 = self._proc.cpu_times()
1071 time.sleep(interval)
1072 st2 = timer()
1073 pt2 = self._proc.cpu_times()
1074 else:
1075 st1 = self._last_sys_cpu_times
1076 pt1 = self._last_proc_cpu_times
1077 st2 = timer()
1078 pt2 = self._proc.cpu_times()
1079 if st1 is None or pt1 is None:
1080 self._last_sys_cpu_times = st2
1081 self._last_proc_cpu_times = pt2
1082 return 0.0
1083
1084 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1085 delta_time = st2 - st1
1086 # reset values for next call in case of interval == None
1087 self._last_sys_cpu_times = st2
1088 self._last_proc_cpu_times = pt2
1089
1090 try:
1091 # This is the utilization split evenly between all CPUs.
1092 # E.g. a busy loop process on a 2-CPU-cores system at this
1093 # point is reported as 50% instead of 100%.
1094 overall_cpus_percent = (delta_proc / delta_time) * 100
1095 except ZeroDivisionError:
1096 # interval was too low
1097 return 0.0
1098 else:
1099 # Note 1:
1100 # in order to emulate "top" we multiply the value for the num
1101 # of CPU cores. This way the busy process will be reported as
1102 # having 100% (or more) usage.
1103 #
1104 # Note 2:
1105 # taskmgr.exe on Windows differs in that it will show 50%
1106 # instead.
1107 #
1108 # Note 3:
1109 # a percentage > 100 is legitimate as it can result from a
1110 # process with multiple threads running on different CPU
1111 # cores (top does the same), see:
1112 # http://stackoverflow.com/questions/1032357
1113 # https://github.com/giampaolo/psutil/issues/474
1114 single_cpu_percent = overall_cpus_percent * num_cpus
1115 return round(single_cpu_percent, 1)
1116
1117 @memoize_when_activated
1118 def cpu_times(self):
1119 """Return a (user, system, children_user, children_system)
1120 namedtuple representing the accumulated process time, in
1121 seconds.
1122 This is similar to os.times() but per-process.
1123 On macOS and Windows children_user and children_system are
1124 always set to 0.
1125 """
1126 return self._proc.cpu_times()
1127
1128 @memoize_when_activated
1129 def memory_info(self):
1130 """Return a namedtuple with variable fields depending on the
1131 platform, representing memory information about the process.
1132
1133 The "portable" fields available on all platforms are `rss` and `vms`.
1134
1135 All numbers are expressed in bytes.
1136 """
1137 return self._proc.memory_info()
1138
1139 def memory_full_info(self):
1140 """This method returns the same information as memory_info(),
1141 plus, on some platform (Linux, macOS, Windows), also provides
1142 additional metrics (USS, PSS and swap).
1143 The additional metrics provide a better representation of actual
1144 process memory usage.
1145
1146 Namely USS is the memory which is unique to a process and which
1147 would be freed if the process was terminated right now.
1148
1149 It does so by passing through the whole process address.
1150 As such it usually requires higher user privileges than
1151 memory_info() and is considerably slower.
1152 """
1153 return self._proc.memory_full_info()
1154
1155 def memory_percent(self, memtype="rss"):
1156 """Compare process memory to total physical system memory and
1157 calculate process memory utilization as a percentage.
1158 *memtype* argument is a string that dictates what type of
1159 process memory you want to compare against (defaults to "rss").
1160 The list of available strings can be obtained like this:
1161
1162 >>> psutil.Process().memory_info()._fields
1163 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1164 """
1165 valid_types = list(_ntp.pfullmem._fields)
1166 if memtype not in valid_types:
1167 msg = (
1168 f"invalid memtype {memtype!r}; valid types are"
1169 f" {tuple(valid_types)!r}"
1170 )
1171 raise ValueError(msg)
1172 fun = (
1173 self.memory_info
1174 if memtype in _ntp.pmem._fields
1175 else self.memory_full_info
1176 )
1177 metrics = fun()
1178 value = getattr(metrics, memtype)
1179
1180 # use cached value if available
1181 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1182 if not total_phymem > 0:
1183 # we should never get here
1184 msg = (
1185 "can't calculate process memory percent because total physical"
1186 f" system memory is not positive ({total_phymem!r})"
1187 )
1188 raise ValueError(msg)
1189 return (value / float(total_phymem)) * 100
1190
1191 if hasattr(_psplatform.Process, "memory_maps"):
1192
1193 def memory_maps(self, grouped=True):
1194 """Return process' mapped memory regions as a list of namedtuples
1195 whose fields are variable depending on the platform.
1196
1197 If *grouped* is True the mapped regions with the same 'path'
1198 are grouped together and the different memory fields are summed.
1199
1200 If *grouped* is False every mapped region is shown as a single
1201 entity and the namedtuple will also include the mapped region's
1202 address space ('addr') and permission set ('perms').
1203 """
1204 it = self._proc.memory_maps()
1205 if grouped:
1206 d = {}
1207 for tupl in it:
1208 path = tupl[2]
1209 nums = tupl[3:]
1210 try:
1211 d[path] = list(map(lambda x, y: x + y, d[path], nums))
1212 except KeyError:
1213 d[path] = nums
1214 return [_ntp.pmmap_grouped(path, *d[path]) for path in d]
1215 else:
1216 return [_ntp.pmmap_ext(*x) for x in it]
1217
1218 def open_files(self):
1219 """Return files opened by process as a list of
1220 (path, fd) namedtuples including the absolute file name
1221 and file descriptor number.
1222 """
1223 return self._proc.open_files()
1224
1225 def net_connections(self, kind='inet'):
1226 """Return socket connections opened by process as a list of
1227 (fd, family, type, laddr, raddr, status) namedtuples.
1228 The *kind* parameter filters for connections that match the
1229 following criteria:
1230
1231 +------------+----------------------------------------------------+
1232 | Kind Value | Connections using |
1233 +------------+----------------------------------------------------+
1234 | inet | IPv4 and IPv6 |
1235 | inet4 | IPv4 |
1236 | inet6 | IPv6 |
1237 | tcp | TCP |
1238 | tcp4 | TCP over IPv4 |
1239 | tcp6 | TCP over IPv6 |
1240 | udp | UDP |
1241 | udp4 | UDP over IPv4 |
1242 | udp6 | UDP over IPv6 |
1243 | unix | UNIX socket (both UDP and TCP protocols) |
1244 | all | the sum of all the possible families and protocols |
1245 +------------+----------------------------------------------------+
1246 """
1247 _check_conn_kind(kind)
1248 return self._proc.net_connections(kind)
1249
1250 @_common.deprecated_method(replacement="net_connections")
1251 def connections(self, kind="inet"):
1252 return self.net_connections(kind=kind)
1253
1254 # --- signals
1255
1256 if POSIX:
1257
1258 def _send_signal(self, sig):
1259 assert not self.pid < 0, self.pid
1260 self._raise_if_pid_reused()
1261
1262 pid, ppid, name = self.pid, self._ppid, self._name
1263 if pid == 0:
1264 # see "man 2 kill"
1265 msg = (
1266 "preventing sending signal to process with PID 0 as it "
1267 "would affect every process in the process group of the "
1268 "calling process (os.getpid()) instead of PID 0"
1269 )
1270 raise ValueError(msg)
1271 try:
1272 os.kill(pid, sig)
1273 except ProcessLookupError as err:
1274 if OPENBSD and pid_exists(pid):
1275 # We do this because os.kill() lies in case of
1276 # zombie processes.
1277 raise ZombieProcess(pid, name, ppid) from err
1278 self._gone = True
1279 raise NoSuchProcess(pid, name) from err
1280 except PermissionError as err:
1281 raise AccessDenied(pid, name) from err
1282
1283 def send_signal(self, sig):
1284 """Send a signal *sig* to process pre-emptively checking
1285 whether PID has been reused (see signal module constants) .
1286 On Windows only SIGTERM is valid and is treated as an alias
1287 for kill().
1288 """
1289 if POSIX:
1290 self._send_signal(sig)
1291 else: # pragma: no cover
1292 self._raise_if_pid_reused()
1293 if sig != signal.SIGTERM and not self.is_running():
1294 msg = "process no longer exists"
1295 raise NoSuchProcess(self.pid, self._name, msg=msg)
1296 self._proc.send_signal(sig)
1297
1298 def suspend(self):
1299 """Suspend process execution with SIGSTOP pre-emptively checking
1300 whether PID has been reused.
1301 On Windows this has the effect of suspending all process threads.
1302 """
1303 if POSIX:
1304 self._send_signal(signal.SIGSTOP)
1305 else: # pragma: no cover
1306 self._raise_if_pid_reused()
1307 self._proc.suspend()
1308
1309 def resume(self):
1310 """Resume process execution with SIGCONT pre-emptively checking
1311 whether PID has been reused.
1312 On Windows this has the effect of resuming all process threads.
1313 """
1314 if POSIX:
1315 self._send_signal(signal.SIGCONT)
1316 else: # pragma: no cover
1317 self._raise_if_pid_reused()
1318 self._proc.resume()
1319
1320 def terminate(self):
1321 """Terminate the process with SIGTERM pre-emptively checking
1322 whether PID has been reused.
1323 On Windows this is an alias for kill().
1324 """
1325 if POSIX:
1326 self._send_signal(signal.SIGTERM)
1327 else: # pragma: no cover
1328 self._raise_if_pid_reused()
1329 self._proc.kill()
1330
1331 def kill(self):
1332 """Kill the current process with SIGKILL pre-emptively checking
1333 whether PID has been reused.
1334 """
1335 if POSIX:
1336 self._send_signal(signal.SIGKILL)
1337 else: # pragma: no cover
1338 self._raise_if_pid_reused()
1339 self._proc.kill()
1340
1341 def wait(self, timeout=None):
1342 """Wait for process to terminate, and if process is a children
1343 of os.getpid(), also return its exit code, else None.
1344 On Windows there's no such limitation (exit code is always
1345 returned).
1346
1347 If the process is already terminated, immediately return None
1348 instead of raising NoSuchProcess.
1349
1350 If *timeout* (in seconds) is specified and process is still
1351 alive, raise TimeoutExpired.
1352
1353 If *timeout=0* either return immediately or raise
1354 TimeoutExpired (non-blocking).
1355
1356 To wait for multiple Process objects use psutil.wait_procs().
1357 """
1358 if self.pid == 0:
1359 msg = "can't wait for PID 0"
1360 raise ValueError(msg)
1361 if timeout is not None:
1362 if not isinstance(timeout, (int, float)):
1363 msg = f"timeout must be an int or float (got {type(timeout)})"
1364 raise TypeError(msg)
1365 if timeout < 0:
1366 msg = f"timeout must be positive or zero (got {timeout})"
1367 raise ValueError(msg)
1368
1369 if self._exitcode is not _SENTINEL:
1370 return self._exitcode
1371
1372 try:
1373 self._exitcode = self._proc.wait(timeout)
1374 except TimeoutExpired as err:
1375 exc = TimeoutExpired(timeout, pid=self.pid, name=self._name)
1376 raise exc from err
1377
1378 return self._exitcode
1379
1380
1381# The valid attr names which can be processed by Process.as_dict().
1382# fmt: off
1383_as_dict_attrnames = {
1384 x for x in dir(Process) if not x.startswith("_") and x not in
1385 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1386 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1387 'connections', 'oneshot'}
1388}
1389# fmt: on
1390
1391
1392# =====================================================================
1393# --- Popen class
1394# =====================================================================
1395
1396
1397class Popen(Process):
1398 """Same as subprocess.Popen, but in addition it provides all
1399 psutil.Process methods in a single class.
1400 For the following methods which are common to both classes, psutil
1401 implementation takes precedence:
1402
1403 * send_signal()
1404 * terminate()
1405 * kill()
1406
1407 This is done in order to avoid killing another process in case its
1408 PID has been reused, fixing BPO-6973.
1409
1410 >>> import psutil
1411 >>> from subprocess import PIPE
1412 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1413 >>> p.name()
1414 'python'
1415 >>> p.uids()
1416 user(real=1000, effective=1000, saved=1000)
1417 >>> p.username()
1418 'giampaolo'
1419 >>> p.communicate()
1420 ('hi', None)
1421 >>> p.terminate()
1422 >>> p.wait(timeout=2)
1423 0
1424 >>>
1425 """
1426
1427 def __init__(self, *args, **kwargs):
1428 # Explicitly avoid to raise NoSuchProcess in case the process
1429 # spawned by subprocess.Popen terminates too quickly, see:
1430 # https://github.com/giampaolo/psutil/issues/193
1431 self.__subproc = subprocess.Popen(*args, **kwargs)
1432 self._init(self.__subproc.pid, _ignore_nsp=True)
1433
1434 def __dir__(self):
1435 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1436
1437 def __enter__(self):
1438 if hasattr(self.__subproc, '__enter__'):
1439 self.__subproc.__enter__()
1440 return self
1441
1442 def __exit__(self, *args, **kwargs):
1443 if hasattr(self.__subproc, '__exit__'):
1444 return self.__subproc.__exit__(*args, **kwargs)
1445 else:
1446 if self.stdout:
1447 self.stdout.close()
1448 if self.stderr:
1449 self.stderr.close()
1450 try:
1451 # Flushing a BufferedWriter may raise an error.
1452 if self.stdin:
1453 self.stdin.close()
1454 finally:
1455 # Wait for the process to terminate, to avoid zombies.
1456 self.wait()
1457
1458 def __getattribute__(self, name):
1459 try:
1460 return object.__getattribute__(self, name)
1461 except AttributeError:
1462 try:
1463 return object.__getattribute__(self.__subproc, name)
1464 except AttributeError:
1465 msg = f"{self.__class__!r} has no attribute {name!r}"
1466 raise AttributeError(msg) from None
1467
1468 def wait(self, timeout=None):
1469 if self.__subproc.returncode is not None:
1470 return self.__subproc.returncode
1471 ret = super().wait(timeout)
1472 self.__subproc.returncode = ret
1473 return ret
1474
1475
1476# =====================================================================
1477# --- system processes related functions
1478# =====================================================================
1479
1480
1481def pids():
1482 """Return a list of current running PIDs."""
1483 global _LOWEST_PID
1484 ret = sorted(_psplatform.pids())
1485 _LOWEST_PID = ret[0]
1486 return ret
1487
1488
1489def pid_exists(pid):
1490 """Return True if given PID exists in the current process list.
1491 This is faster than doing "pid in psutil.pids()" and
1492 should be preferred.
1493 """
1494 if pid < 0:
1495 return False
1496 elif pid == 0 and POSIX:
1497 # On POSIX we use os.kill() to determine PID existence.
1498 # According to "man 2 kill" PID 0 has a special meaning
1499 # though: it refers to <<every process in the process
1500 # group of the calling process>> and that is not we want
1501 # to do here.
1502 return pid in pids()
1503 else:
1504 return _psplatform.pid_exists(pid)
1505
1506
1507_pmap = {}
1508_pids_reused = set()
1509
1510
1511def process_iter(attrs=None, ad_value=None):
1512 """Return a generator yielding a Process instance for all
1513 running processes.
1514
1515 Every new Process instance is only created once and then cached
1516 into an internal table which is updated every time this is used.
1517 Cache can optionally be cleared via `process_iter.cache_clear()`.
1518
1519 The sorting order in which processes are yielded is based on
1520 their PIDs.
1521
1522 *attrs* and *ad_value* have the same meaning as in
1523 Process.as_dict(). If *attrs* is specified as_dict() is called
1524 and the resulting dict is stored as a 'info' attribute attached
1525 to returned Process instance.
1526 If *attrs* is an empty list it will retrieve all process info
1527 (slow).
1528 """
1529 global _pmap
1530
1531 def add(pid):
1532 proc = Process(pid)
1533 pmap[proc.pid] = proc
1534 return proc
1535
1536 def remove(pid):
1537 pmap.pop(pid, None)
1538
1539 pmap = _pmap.copy()
1540 a = set(pids())
1541 b = set(pmap.keys())
1542 new_pids = a - b
1543 gone_pids = b - a
1544 for pid in gone_pids:
1545 remove(pid)
1546 while _pids_reused:
1547 pid = _pids_reused.pop()
1548 debug(f"refreshing Process instance for reused PID {pid}")
1549 remove(pid)
1550 try:
1551 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1552 for pid, proc in ls:
1553 try:
1554 if proc is None: # new process
1555 proc = add(pid)
1556 if attrs is not None:
1557 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1558 yield proc
1559 except NoSuchProcess:
1560 remove(pid)
1561 finally:
1562 _pmap = pmap
1563
1564
1565process_iter.cache_clear = lambda: _pmap.clear() # noqa: PLW0108
1566process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache."
1567
1568
1569def wait_procs(procs, timeout=None, callback=None):
1570 """Convenience function which waits for a list of processes to
1571 terminate.
1572
1573 Return a (gone, alive) tuple indicating which processes
1574 are gone and which ones are still alive.
1575
1576 The gone ones will have a new *returncode* attribute indicating
1577 process exit status (may be None).
1578
1579 *callback* is a function which gets called every time a process
1580 terminates (a Process instance is passed as callback argument).
1581
1582 Function will return as soon as all processes terminate or when
1583 *timeout* occurs.
1584 Differently from Process.wait() it will not raise TimeoutExpired if
1585 *timeout* occurs.
1586
1587 Typical use case is:
1588
1589 - send SIGTERM to a list of processes
1590 - give them some time to terminate
1591 - send SIGKILL to those ones which are still alive
1592
1593 Example:
1594
1595 >>> def on_terminate(proc):
1596 ... print("process {} terminated".format(proc))
1597 ...
1598 >>> for p in procs:
1599 ... p.terminate()
1600 ...
1601 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1602 >>> for p in alive:
1603 ... p.kill()
1604 """
1605
1606 def check_gone(proc, timeout):
1607 try:
1608 returncode = proc.wait(timeout=timeout)
1609 except (TimeoutExpired, subprocess.TimeoutExpired):
1610 pass
1611 else:
1612 if returncode is not None or not proc.is_running():
1613 # Set new Process instance attribute.
1614 proc.returncode = returncode
1615 gone.add(proc)
1616 if callback is not None:
1617 callback(proc)
1618
1619 if timeout is not None and not timeout >= 0:
1620 msg = f"timeout must be a positive integer, got {timeout}"
1621 raise ValueError(msg)
1622 if callback is not None and not callable(callback):
1623 msg = f"callback {callback!r} is not a callable"
1624 raise TypeError(msg)
1625
1626 gone = set()
1627 alive = set(procs)
1628 if timeout is not None:
1629 deadline = _timer() + timeout
1630
1631 while alive:
1632 if timeout is not None and timeout <= 0:
1633 break
1634 for proc in alive:
1635 # Make sure that every complete iteration (all processes)
1636 # will last max 1 sec.
1637 # We do this because we don't want to wait too long on a
1638 # single process: in case it terminates too late other
1639 # processes may disappear in the meantime and their PID
1640 # reused.
1641 max_timeout = 1.0 / len(alive)
1642 if timeout is not None:
1643 timeout = min((deadline - _timer()), max_timeout)
1644 if timeout <= 0:
1645 break
1646 check_gone(proc, timeout)
1647 else:
1648 check_gone(proc, max_timeout)
1649 alive = alive - gone # noqa: PLR6104
1650
1651 if alive:
1652 # Last attempt over processes survived so far.
1653 # timeout == 0 won't make this function wait any further.
1654 for proc in alive:
1655 check_gone(proc, 0)
1656 alive = alive - gone # noqa: PLR6104
1657
1658 return (list(gone), list(alive))
1659
1660
1661# =====================================================================
1662# --- CPU related functions
1663# =====================================================================
1664
1665
1666def cpu_count(logical=True):
1667 """Return the number of logical CPUs in the system (same as
1668 os.cpu_count()).
1669
1670 If *logical* is False return the number of physical cores only
1671 (e.g. hyper thread CPUs are excluded).
1672
1673 Return None if undetermined.
1674
1675 The return value is cached after first call.
1676 If desired cache can be cleared like this:
1677
1678 >>> psutil.cpu_count.cache_clear()
1679 """
1680 if logical:
1681 ret = _psplatform.cpu_count_logical()
1682 else:
1683 ret = _psplatform.cpu_count_cores()
1684 if ret is not None and ret < 1:
1685 ret = None
1686 return ret
1687
1688
1689def cpu_times(percpu=False):
1690 """Return system-wide CPU times as a namedtuple.
1691 Every CPU time represents the seconds the CPU has spent in the
1692 given mode. The namedtuple's fields availability varies depending on the
1693 platform:
1694
1695 - user
1696 - system
1697 - idle
1698 - nice (UNIX)
1699 - iowait (Linux)
1700 - irq (Linux, FreeBSD)
1701 - softirq (Linux)
1702 - steal (Linux >= 2.6.11)
1703 - guest (Linux >= 2.6.24)
1704 - guest_nice (Linux >= 3.2.0)
1705
1706 When *percpu* is True return a list of namedtuples for each CPU.
1707 First element of the list refers to first CPU, second element
1708 to second CPU and so on.
1709 The order of the list is consistent across calls.
1710 """
1711 if not percpu:
1712 return _psplatform.cpu_times()
1713 else:
1714 return _psplatform.per_cpu_times()
1715
1716
1717try:
1718 _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1719except Exception: # noqa: BLE001
1720 # Don't want to crash at import time.
1721 _last_cpu_times = {}
1722
1723try:
1724 _last_per_cpu_times = {
1725 threading.current_thread().ident: cpu_times(percpu=True)
1726 }
1727except Exception: # noqa: BLE001
1728 # Don't want to crash at import time.
1729 _last_per_cpu_times = {}
1730
1731
1732def _cpu_tot_time(times):
1733 """Given a cpu_time() ntuple calculates the total CPU time
1734 (including idle time).
1735 """
1736 tot = sum(times)
1737 if LINUX:
1738 # On Linux guest times are already accounted in "user" or
1739 # "nice" times, so we subtract them from total.
1740 # Htop does the same. References:
1741 # https://github.com/giampaolo/psutil/pull/940
1742 # http://unix.stackexchange.com/questions/178045
1743 # https://github.com/torvalds/linux/blob/
1744 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1745 # cputime.c#L158
1746 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1747 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1748 return tot
1749
1750
1751def _cpu_busy_time(times):
1752 """Given a cpu_time() ntuple calculates the busy CPU time.
1753 We do so by subtracting all idle CPU times.
1754 """
1755 busy = _cpu_tot_time(times)
1756 busy -= times.idle
1757 # Linux: "iowait" is time during which the CPU does not do anything
1758 # (waits for IO to complete). On Linux IO wait is *not* accounted
1759 # in "idle" time so we subtract it. Htop does the same.
1760 # References:
1761 # https://github.com/torvalds/linux/blob/
1762 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1763 busy -= getattr(times, "iowait", 0)
1764 return busy
1765
1766
1767def _cpu_times_deltas(t1, t2):
1768 assert t1._fields == t2._fields, (t1, t2)
1769 field_deltas = []
1770 for field in _ntp.scputimes._fields:
1771 field_delta = getattr(t2, field) - getattr(t1, field)
1772 # CPU times are always supposed to increase over time
1773 # or at least remain the same and that's because time
1774 # cannot go backwards.
1775 # Surprisingly sometimes this might not be the case (at
1776 # least on Windows and Linux), see:
1777 # https://github.com/giampaolo/psutil/issues/392
1778 # https://github.com/giampaolo/psutil/issues/645
1779 # https://github.com/giampaolo/psutil/issues/1210
1780 # Trim negative deltas to zero to ignore decreasing fields.
1781 # top does the same. Reference:
1782 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1783 field_delta = max(0, field_delta)
1784 field_deltas.append(field_delta)
1785 return _ntp.scputimes(*field_deltas)
1786
1787
1788def cpu_percent(interval=None, percpu=False):
1789 """Return a float representing the current system-wide CPU
1790 utilization as a percentage.
1791
1792 When *interval* is > 0.0 compares system CPU times elapsed before
1793 and after the interval (blocking).
1794
1795 When *interval* is 0.0 or None compares system CPU times elapsed
1796 since last call or module import, returning immediately (non
1797 blocking). That means the first time this is called it will
1798 return a meaningless 0.0 value which you should ignore.
1799 In this case is recommended for accuracy that this function be
1800 called with at least 0.1 seconds between calls.
1801
1802 When *percpu* is True returns a list of floats representing the
1803 utilization as a percentage for each CPU.
1804 First element of the list refers to first CPU, second element
1805 to second CPU and so on.
1806 The order of the list is consistent across calls.
1807
1808 Examples:
1809
1810 >>> # blocking, system-wide
1811 >>> psutil.cpu_percent(interval=1)
1812 2.0
1813 >>>
1814 >>> # blocking, per-cpu
1815 >>> psutil.cpu_percent(interval=1, percpu=True)
1816 [2.0, 1.0]
1817 >>>
1818 >>> # non-blocking (percentage since last call)
1819 >>> psutil.cpu_percent(interval=None)
1820 2.9
1821 >>>
1822 """
1823 tid = threading.current_thread().ident
1824 blocking = interval is not None and interval > 0.0
1825 if interval is not None and interval < 0:
1826 msg = f"interval is not positive (got {interval})"
1827 raise ValueError(msg)
1828
1829 def calculate(t1, t2):
1830 times_delta = _cpu_times_deltas(t1, t2)
1831 all_delta = _cpu_tot_time(times_delta)
1832 busy_delta = _cpu_busy_time(times_delta)
1833
1834 try:
1835 busy_perc = (busy_delta / all_delta) * 100
1836 except ZeroDivisionError:
1837 return 0.0
1838 else:
1839 return round(busy_perc, 1)
1840
1841 # system-wide usage
1842 if not percpu:
1843 if blocking:
1844 t1 = cpu_times()
1845 time.sleep(interval)
1846 else:
1847 t1 = _last_cpu_times.get(tid) or cpu_times()
1848 _last_cpu_times[tid] = cpu_times()
1849 return calculate(t1, _last_cpu_times[tid])
1850 # per-cpu usage
1851 else:
1852 ret = []
1853 if blocking:
1854 tot1 = cpu_times(percpu=True)
1855 time.sleep(interval)
1856 else:
1857 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1858 _last_per_cpu_times[tid] = cpu_times(percpu=True)
1859 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1860 ret.append(calculate(t1, t2))
1861 return ret
1862
1863
1864# Use a separate dict for cpu_times_percent(), so it's independent from
1865# cpu_percent() and they can both be used within the same program.
1866_last_cpu_times_2 = _last_cpu_times.copy()
1867_last_per_cpu_times_2 = _last_per_cpu_times.copy()
1868
1869
1870def cpu_times_percent(interval=None, percpu=False):
1871 """Same as cpu_percent() but provides utilization percentages
1872 for each specific CPU time as is returned by cpu_times().
1873 For instance, on Linux we'll get:
1874
1875 >>> cpu_times_percent()
1876 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1877 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1878 >>>
1879
1880 *interval* and *percpu* arguments have the same meaning as in
1881 cpu_percent().
1882 """
1883 tid = threading.current_thread().ident
1884 blocking = interval is not None and interval > 0.0
1885 if interval is not None and interval < 0:
1886 msg = f"interval is not positive (got {interval!r})"
1887 raise ValueError(msg)
1888
1889 def calculate(t1, t2):
1890 nums = []
1891 times_delta = _cpu_times_deltas(t1, t2)
1892 all_delta = _cpu_tot_time(times_delta)
1893 # "scale" is the value to multiply each delta with to get percentages.
1894 # We use "max" to avoid division by zero (if all_delta is 0, then all
1895 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1896 # fraction because cpu times are integers)
1897 scale = 100.0 / max(1, all_delta)
1898 for field_delta in times_delta:
1899 field_perc = field_delta * scale
1900 field_perc = round(field_perc, 1)
1901 # make sure we don't return negative values or values over 100%
1902 field_perc = min(max(0.0, field_perc), 100.0)
1903 nums.append(field_perc)
1904 return _ntp.scputimes(*nums)
1905
1906 # system-wide usage
1907 if not percpu:
1908 if blocking:
1909 t1 = cpu_times()
1910 time.sleep(interval)
1911 else:
1912 t1 = _last_cpu_times_2.get(tid) or cpu_times()
1913 _last_cpu_times_2[tid] = cpu_times()
1914 return calculate(t1, _last_cpu_times_2[tid])
1915 # per-cpu usage
1916 else:
1917 ret = []
1918 if blocking:
1919 tot1 = cpu_times(percpu=True)
1920 time.sleep(interval)
1921 else:
1922 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1923 _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1924 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1925 ret.append(calculate(t1, t2))
1926 return ret
1927
1928
1929def cpu_stats():
1930 """Return CPU statistics."""
1931 return _psplatform.cpu_stats()
1932
1933
1934if hasattr(_psplatform, "cpu_freq"):
1935
1936 def cpu_freq(percpu=False):
1937 """Return CPU frequency as a namedtuple including current,
1938 min and max frequency expressed in Mhz.
1939
1940 If *percpu* is True and the system supports per-cpu frequency
1941 retrieval (Linux only) a list of frequencies is returned for
1942 each CPU. If not a list with one element is returned.
1943 """
1944 ret = _psplatform.cpu_freq()
1945 if percpu:
1946 return ret
1947 else:
1948 num_cpus = float(len(ret))
1949 if num_cpus == 0:
1950 return None
1951 elif num_cpus == 1:
1952 return ret[0]
1953 else:
1954 currs, mins, maxs = 0.0, 0.0, 0.0
1955 set_none = False
1956 for cpu in ret:
1957 currs += cpu.current
1958 # On Linux if /proc/cpuinfo is used min/max are set
1959 # to None.
1960 if LINUX and cpu.min is None:
1961 set_none = True
1962 continue
1963 mins += cpu.min
1964 maxs += cpu.max
1965
1966 current = currs / num_cpus
1967
1968 if set_none:
1969 min_ = max_ = None
1970 else:
1971 min_ = mins / num_cpus
1972 max_ = maxs / num_cpus
1973
1974 return _ntp.scpufreq(current, min_, max_)
1975
1976 __all__.append("cpu_freq")
1977
1978
1979if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1980 # Perform this hasattr check once on import time to either use the
1981 # platform based code or proxy straight from the os module.
1982 if hasattr(os, "getloadavg"):
1983 getloadavg = os.getloadavg
1984 else:
1985 getloadavg = _psplatform.getloadavg
1986
1987 __all__.append("getloadavg")
1988
1989
1990# =====================================================================
1991# --- system memory related functions
1992# =====================================================================
1993
1994
1995def virtual_memory():
1996 """Return statistics about system memory usage as a namedtuple
1997 including the following fields, expressed in bytes:
1998
1999 - total:
2000 total physical memory available.
2001
2002 - available:
2003 the memory that can be given instantly to processes without the
2004 system going into swap.
2005 This is calculated by summing different memory values depending
2006 on the platform and it is supposed to be used to monitor actual
2007 memory usage in a cross platform fashion.
2008
2009 - percent:
2010 the percentage usage calculated as (total - available) / total * 100
2011
2012 - used:
2013 memory used, calculated differently depending on the platform and
2014 designed for informational purposes only:
2015 macOS: active + wired
2016 BSD: active + wired + cached
2017 Linux: total - free
2018
2019 - free:
2020 memory not being used at all (zeroed) that is readily available;
2021 note that this doesn't reflect the actual memory available
2022 (use 'available' instead)
2023
2024 Platform-specific fields:
2025
2026 - active (UNIX):
2027 memory currently in use or very recently used, and so it is in RAM.
2028
2029 - inactive (UNIX):
2030 memory that is marked as not used.
2031
2032 - buffers (BSD, Linux):
2033 cache for things like file system metadata.
2034
2035 - cached (BSD, macOS):
2036 cache for various things.
2037
2038 - wired (macOS, BSD):
2039 memory that is marked to always stay in RAM. It is never moved to disk.
2040
2041 - shared (BSD):
2042 memory that may be simultaneously accessed by multiple processes.
2043
2044 The sum of 'used' and 'available' does not necessarily equal total.
2045 On Windows 'available' and 'free' are the same.
2046 """
2047 global _TOTAL_PHYMEM
2048 ret = _psplatform.virtual_memory()
2049 # cached for later use in Process.memory_percent()
2050 _TOTAL_PHYMEM = ret.total
2051 return ret
2052
2053
2054def swap_memory():
2055 """Return system swap memory statistics as a namedtuple including
2056 the following fields:
2057
2058 - total: total swap memory in bytes
2059 - used: used swap memory in bytes
2060 - free: free swap memory in bytes
2061 - percent: the percentage usage
2062 - sin: no. of bytes the system has swapped in from disk (cumulative)
2063 - sout: no. of bytes the system has swapped out from disk (cumulative)
2064
2065 'sin' and 'sout' on Windows are meaningless and always set to 0.
2066 """
2067 return _psplatform.swap_memory()
2068
2069
2070# =====================================================================
2071# --- disks/partitions related functions
2072# =====================================================================
2073
2074
2075def disk_usage(path):
2076 """Return disk usage statistics about the given *path* as a
2077 namedtuple including total, used and free space expressed in bytes
2078 plus the percentage usage.
2079 """
2080 return _psplatform.disk_usage(path)
2081
2082
2083def disk_partitions(all=False):
2084 """Return mounted partitions as a list of
2085 (device, mountpoint, fstype, opts) namedtuple.
2086 'opts' field is a raw string separated by commas indicating mount
2087 options which may vary depending on the platform.
2088
2089 If *all* parameter is False return physical devices only and ignore
2090 all others.
2091 """
2092 return _psplatform.disk_partitions(all)
2093
2094
2095def disk_io_counters(perdisk=False, nowrap=True):
2096 """Return system disk I/O statistics as a namedtuple including
2097 the following fields:
2098
2099 - read_count: number of reads
2100 - write_count: number of writes
2101 - read_bytes: number of bytes read
2102 - write_bytes: number of bytes written
2103 - read_time: time spent reading from disk (in ms)
2104 - write_time: time spent writing to disk (in ms)
2105
2106 Platform specific:
2107
2108 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2109 - read_merged_count (Linux): number of merged reads
2110 - write_merged_count (Linux): number of merged writes
2111
2112 If *perdisk* is True return the same information for every
2113 physical disk installed on the system as a dictionary
2114 with partition names as the keys and the namedtuple
2115 described above as the values.
2116
2117 If *nowrap* is True it detects and adjust the numbers which overflow
2118 and wrap (restart from 0) and add "old value" to "new value" so that
2119 the returned numbers will always be increasing or remain the same,
2120 but never decrease.
2121 "disk_io_counters.cache_clear()" can be used to invalidate the
2122 cache.
2123
2124 On recent Windows versions 'diskperf -y' command may need to be
2125 executed first otherwise this function won't find any disk.
2126 """
2127 kwargs = dict(perdisk=perdisk) if LINUX else {}
2128 rawdict = _psplatform.disk_io_counters(**kwargs)
2129 if not rawdict:
2130 return {} if perdisk else None
2131 if nowrap:
2132 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2133 if perdisk:
2134 for disk, fields in rawdict.items():
2135 rawdict[disk] = _ntp.sdiskio(*fields)
2136 return rawdict
2137 else:
2138 return _ntp.sdiskio(*(sum(x) for x in zip(*rawdict.values())))
2139
2140
2141disk_io_counters.cache_clear = functools.partial(
2142 _wrap_numbers.cache_clear, 'psutil.disk_io_counters'
2143)
2144disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2145
2146
2147# =====================================================================
2148# --- network related functions
2149# =====================================================================
2150
2151
2152def net_io_counters(pernic=False, nowrap=True):
2153 """Return network I/O statistics as a namedtuple including
2154 the following fields:
2155
2156 - bytes_sent: number of bytes sent
2157 - bytes_recv: number of bytes received
2158 - packets_sent: number of packets sent
2159 - packets_recv: number of packets received
2160 - errin: total number of errors while receiving
2161 - errout: total number of errors while sending
2162 - dropin: total number of incoming packets which were dropped
2163 - dropout: total number of outgoing packets which were dropped
2164 (always 0 on macOS and BSD)
2165
2166 If *pernic* is True return the same information for every
2167 network interface installed on the system as a dictionary
2168 with network interface names as the keys and the namedtuple
2169 described above as the values.
2170
2171 If *nowrap* is True it detects and adjust the numbers which overflow
2172 and wrap (restart from 0) and add "old value" to "new value" so that
2173 the returned numbers will always be increasing or remain the same,
2174 but never decrease.
2175 "net_io_counters.cache_clear()" can be used to invalidate the
2176 cache.
2177 """
2178 rawdict = _psplatform.net_io_counters()
2179 if not rawdict:
2180 return {} if pernic else None
2181 if nowrap:
2182 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2183 if pernic:
2184 for nic, fields in rawdict.items():
2185 rawdict[nic] = _ntp.snetio(*fields)
2186 return rawdict
2187 else:
2188 return _ntp.snetio(*[sum(x) for x in zip(*rawdict.values())])
2189
2190
2191net_io_counters.cache_clear = functools.partial(
2192 _wrap_numbers.cache_clear, 'psutil.net_io_counters'
2193)
2194net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2195
2196
2197def net_connections(kind='inet'):
2198 """Return system-wide socket connections as a list of
2199 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2200 In case of limited privileges 'fd' and 'pid' may be set to -1
2201 and None respectively.
2202 The *kind* parameter filters for connections that fit the
2203 following criteria:
2204
2205 +------------+----------------------------------------------------+
2206 | Kind Value | Connections using |
2207 +------------+----------------------------------------------------+
2208 | inet | IPv4 and IPv6 |
2209 | inet4 | IPv4 |
2210 | inet6 | IPv6 |
2211 | tcp | TCP |
2212 | tcp4 | TCP over IPv4 |
2213 | tcp6 | TCP over IPv6 |
2214 | udp | UDP |
2215 | udp4 | UDP over IPv4 |
2216 | udp6 | UDP over IPv6 |
2217 | unix | UNIX socket (both UDP and TCP protocols) |
2218 | all | the sum of all the possible families and protocols |
2219 +------------+----------------------------------------------------+
2220
2221 On macOS this function requires root privileges.
2222 """
2223 _check_conn_kind(kind)
2224 return _psplatform.net_connections(kind)
2225
2226
2227def net_if_addrs():
2228 """Return the addresses associated to each NIC (network interface
2229 card) installed on the system as a dictionary whose keys are the
2230 NIC names and value is a list of namedtuples for each address
2231 assigned to the NIC. Each namedtuple includes 5 fields:
2232
2233 - family: can be either socket.AF_INET, socket.AF_INET6 or
2234 psutil.AF_LINK, which refers to a MAC address.
2235 - address: is the primary address and it is always set.
2236 - netmask: and 'broadcast' and 'ptp' may be None.
2237 - ptp: stands for "point to point" and references the
2238 destination address on a point to point interface
2239 (typically a VPN).
2240 - broadcast: and *ptp* are mutually exclusive.
2241
2242 Note: you can have more than one address of the same family
2243 associated with each interface.
2244 """
2245 rawlist = _psplatform.net_if_addrs()
2246 rawlist.sort(key=lambda x: x[1]) # sort by family
2247 ret = collections.defaultdict(list)
2248 for name, fam, addr, mask, broadcast, ptp in rawlist:
2249 try:
2250 fam = socket.AddressFamily(fam)
2251 except ValueError:
2252 if WINDOWS and fam == -1:
2253 fam = _psplatform.AF_LINK
2254 elif (
2255 hasattr(_psplatform, "AF_LINK") and fam == _psplatform.AF_LINK
2256 ):
2257 # Linux defines AF_LINK as an alias for AF_PACKET.
2258 # We re-set the family here so that repr(family)
2259 # will show AF_LINK rather than AF_PACKET
2260 fam = _psplatform.AF_LINK
2261
2262 if fam == _psplatform.AF_LINK:
2263 # The underlying C function may return an incomplete MAC
2264 # address in which case we fill it with null bytes, see:
2265 # https://github.com/giampaolo/psutil/issues/786
2266 separator = ":" if POSIX else "-"
2267 while addr.count(separator) < 5:
2268 addr += f"{separator}00"
2269
2270 nt = _ntp.snicaddr(fam, addr, mask, broadcast, ptp)
2271
2272 # On Windows broadcast is None, so we determine it via
2273 # ipaddress module.
2274 if WINDOWS and fam in {socket.AF_INET, socket.AF_INET6}:
2275 try:
2276 broadcast = _common.broadcast_addr(nt)
2277 except Exception as err: # noqa: BLE001
2278 debug(err)
2279 else:
2280 if broadcast is not None:
2281 nt._replace(broadcast=broadcast)
2282
2283 ret[name].append(nt)
2284
2285 return dict(ret)
2286
2287
2288def net_if_stats():
2289 """Return information about each NIC (network interface card)
2290 installed on the system as a dictionary whose keys are the
2291 NIC names and value is a namedtuple with the following fields:
2292
2293 - isup: whether the interface is up (bool)
2294 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2295 NIC_DUPLEX_UNKNOWN
2296 - speed: the NIC speed expressed in mega bits (MB); if it can't
2297 be determined (e.g. 'localhost') it will be set to 0.
2298 - mtu: the maximum transmission unit expressed in bytes.
2299 """
2300 return _psplatform.net_if_stats()
2301
2302
2303# =====================================================================
2304# --- sensors
2305# =====================================================================
2306
2307
2308# Linux, macOS
2309if hasattr(_psplatform, "sensors_temperatures"):
2310
2311 def sensors_temperatures(fahrenheit=False):
2312 """Return hardware temperatures. Each entry is a namedtuple
2313 representing a certain hardware sensor (it may be a CPU, an
2314 hard disk or something else, depending on the OS and its
2315 configuration).
2316 All temperatures are expressed in celsius unless *fahrenheit*
2317 is set to True.
2318 """
2319
2320 def convert(n):
2321 if n is not None:
2322 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2323
2324 ret = collections.defaultdict(list)
2325 rawdict = _psplatform.sensors_temperatures()
2326
2327 for name, values in rawdict.items():
2328 while values:
2329 label, current, high, critical = values.pop(0)
2330 current = convert(current)
2331 high = convert(high)
2332 critical = convert(critical)
2333
2334 if high and not critical:
2335 critical = high
2336 elif critical and not high:
2337 high = critical
2338
2339 ret[name].append(_ntp.shwtemp(label, current, high, critical))
2340
2341 return dict(ret)
2342
2343 __all__.append("sensors_temperatures")
2344
2345
2346# Linux
2347if hasattr(_psplatform, "sensors_fans"):
2348
2349 def sensors_fans():
2350 """Return fans speed. Each entry is a namedtuple
2351 representing a certain hardware sensor.
2352 All speed are expressed in RPM (rounds per minute).
2353 """
2354 return _psplatform.sensors_fans()
2355
2356 __all__.append("sensors_fans")
2357
2358
2359# Linux, Windows, FreeBSD, macOS
2360if hasattr(_psplatform, "sensors_battery"):
2361
2362 def sensors_battery():
2363 """Return battery information. If no battery is installed
2364 returns None.
2365
2366 - percent: battery power left as a percentage.
2367 - secsleft: a rough approximation of how many seconds are left
2368 before the battery runs out of power. May be
2369 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2370 - power_plugged: True if the AC power cable is connected.
2371 """
2372 return _psplatform.sensors_battery()
2373
2374 __all__.append("sensors_battery")
2375
2376
2377# =====================================================================
2378# --- other system related functions
2379# =====================================================================
2380
2381
2382def boot_time():
2383 """Return the system boot time expressed in seconds since the epoch
2384 (seconds since January 1, 1970, at midnight UTC). The returned
2385 value is based on the system clock, which means it may be affected
2386 by changes such as manual adjustments or time synchronization (e.g.
2387 NTP).
2388 """
2389 return _psplatform.boot_time()
2390
2391
2392def users():
2393 """Return users currently connected on the system as a list of
2394 namedtuples including the following fields.
2395
2396 - user: the name of the user
2397 - terminal: the tty or pseudo-tty associated with the user, if any.
2398 - host: the host name associated with the entry, if any.
2399 - started: the creation time as a floating point number expressed in
2400 seconds since the epoch.
2401 """
2402 return _psplatform.users()
2403
2404
2405# =====================================================================
2406# --- Windows services
2407# =====================================================================
2408
2409
2410if WINDOWS:
2411
2412 def win_service_iter():
2413 """Return a generator yielding a WindowsService instance for all
2414 Windows services installed.
2415 """
2416 return _psplatform.win_service_iter()
2417
2418 def win_service_get(name):
2419 """Get a Windows service by *name*.
2420 Raise NoSuchProcess if no service with such name exists.
2421 """
2422 return _psplatform.win_service_get(name)
2423
2424
2425# =====================================================================
2426# --- malloc / heap
2427# =====================================================================
2428
2429
2430# Linux + glibc, Windows, macOS, FreeBSD, NetBSD
2431if hasattr(_psplatform, "heap_info"):
2432
2433 def heap_info():
2434 """Return low-level heap statistics from the C heap allocator
2435 (glibc).
2436
2437 - `heap_used`: the total number of bytes allocated via
2438 malloc/free. These are typically allocations smaller than
2439 MMAP_THRESHOLD.
2440
2441 - `mmap_used`: the total number of bytes allocated via `mmap()`
2442 or via large ``malloc()`` allocations.
2443
2444 - `heap_count` (Windows only): number of private heaps created
2445 via `HeapCreate()`.
2446 """
2447 return _ntp.pheap(*_psplatform.heap_info())
2448
2449 def heap_trim():
2450 """Request that the underlying allocator free any unused memory
2451 it's holding in the heap (typically small `malloc()`
2452 allocations).
2453
2454 In practice, modern allocators rarely comply, so this is not a
2455 general-purpose memory-reduction tool and won't meaningfully
2456 shrink RSS in real programs. Its primary value is in **leak
2457 detection tools**.
2458
2459 Calling `heap_trim()` before taking measurements helps reduce
2460 allocator noise, giving you a cleaner baseline so that changes
2461 in `heap_used` come from the code you're testing, not from
2462 internal allocator caching or fragmentation. Its effectiveness
2463 depends on allocator behavior and fragmentation patterns.
2464 """
2465 _psplatform.heap_trim()
2466
2467 __all__.append("heap_info")
2468 __all__.append("heap_trim")
2469
2470
2471# =====================================================================
2472
2473
2474def _set_debug(value):
2475 """Enable or disable PSUTIL_DEBUG option, which prints debugging
2476 messages to stderr.
2477 """
2478 import psutil._common
2479
2480 psutil._common.PSUTIL_DEBUG = bool(value)
2481 _psplatform.cext.set_debug(bool(value))
2482
2483
2484del memoize_when_activated