1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""psutil is a cross-platform library for retrieving information on
6running processes and system utilization (CPU, memory, disks, network,
7sensors) in Python. Supported platforms:
8
9 - Linux
10 - Windows
11 - macOS
12 - FreeBSD
13 - OpenBSD
14 - NetBSD
15 - Sun Solaris
16 - AIX
17
18Supported Python versions are cPython 3.6+ and PyPy.
19"""
20
21import collections
22import contextlib
23import datetime
24import functools
25import os
26import signal
27import socket
28import subprocess
29import sys
30import threading
31import time
32
33try:
34 import pwd
35except ImportError:
36 pwd = None
37
38from . import _common
39from ._common import AIX
40from ._common import BSD
41from ._common import CONN_CLOSE
42from ._common import CONN_CLOSE_WAIT
43from ._common import CONN_CLOSING
44from ._common import CONN_ESTABLISHED
45from ._common import CONN_FIN_WAIT1
46from ._common import CONN_FIN_WAIT2
47from ._common import CONN_LAST_ACK
48from ._common import CONN_LISTEN
49from ._common import CONN_NONE
50from ._common import CONN_SYN_RECV
51from ._common import CONN_SYN_SENT
52from ._common import CONN_TIME_WAIT
53from ._common import FREEBSD
54from ._common import LINUX
55from ._common import MACOS
56from ._common import NETBSD
57from ._common import NIC_DUPLEX_FULL
58from ._common import NIC_DUPLEX_HALF
59from ._common import NIC_DUPLEX_UNKNOWN
60from ._common import OPENBSD
61from ._common import OSX # deprecated alias
62from ._common import POSIX
63from ._common import POWER_TIME_UNKNOWN
64from ._common import POWER_TIME_UNLIMITED
65from ._common import STATUS_DEAD
66from ._common import STATUS_DISK_SLEEP
67from ._common import STATUS_IDLE
68from ._common import STATUS_LOCKED
69from ._common import STATUS_PARKED
70from ._common import STATUS_RUNNING
71from ._common import STATUS_SLEEPING
72from ._common import STATUS_STOPPED
73from ._common import STATUS_TRACING_STOP
74from ._common import STATUS_WAITING
75from ._common import STATUS_WAKING
76from ._common import STATUS_ZOMBIE
77from ._common import SUNOS
78from ._common import WINDOWS
79from ._common import AccessDenied
80from ._common import Error
81from ._common import NoSuchProcess
82from ._common import TimeoutExpired
83from ._common import ZombieProcess
84from ._common import debug
85from ._common import memoize_when_activated
86from ._common import wrap_numbers as _wrap_numbers
87
88if LINUX:
89 # This is public API and it will be retrieved from _pslinux.py
90 # via sys.modules.
91 PROCFS_PATH = "/proc"
92
93 from . import _pslinux as _psplatform
94 from ._pslinux import IOPRIO_CLASS_BE # noqa: F401
95 from ._pslinux import IOPRIO_CLASS_IDLE # noqa: F401
96 from ._pslinux import IOPRIO_CLASS_NONE # noqa: F401
97 from ._pslinux import IOPRIO_CLASS_RT # noqa: F401
98
99elif WINDOWS:
100 from . import _pswindows as _psplatform
101 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # noqa: F401
102 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # noqa: F401
103 from ._psutil_windows import HIGH_PRIORITY_CLASS # noqa: F401
104 from ._psutil_windows import IDLE_PRIORITY_CLASS # noqa: F401
105 from ._psutil_windows import NORMAL_PRIORITY_CLASS # noqa: F401
106 from ._psutil_windows import REALTIME_PRIORITY_CLASS # noqa: F401
107 from ._pswindows import CONN_DELETE_TCB # noqa: F401
108 from ._pswindows import IOPRIO_HIGH # noqa: F401
109 from ._pswindows import IOPRIO_LOW # noqa: F401
110 from ._pswindows import IOPRIO_NORMAL # noqa: F401
111 from ._pswindows import IOPRIO_VERYLOW # noqa: F401
112
113elif MACOS:
114 from . import _psosx as _psplatform
115
116elif BSD:
117 from . import _psbsd as _psplatform
118
119elif SUNOS:
120 from . import _pssunos as _psplatform
121 from ._pssunos import CONN_BOUND # noqa: F401
122 from ._pssunos import CONN_IDLE # noqa: F401
123
124 # This is public writable API which is read from _pslinux.py and
125 # _pssunos.py via sys.modules.
126 PROCFS_PATH = "/proc"
127
128elif AIX:
129 from . import _psaix as _psplatform
130
131 # This is public API and it will be retrieved from _pslinux.py
132 # via sys.modules.
133 PROCFS_PATH = "/proc"
134
135else: # pragma: no cover
136 msg = f"platform {sys.platform} is not supported"
137 raise NotImplementedError(msg)
138
139
140# fmt: off
141__all__ = [
142 # exceptions
143 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
144 "TimeoutExpired",
145
146 # constants
147 "version_info", "__version__",
148
149 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
150 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
151 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
152 "STATUS_PARKED",
153
154 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
155 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
156 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
157 # "CONN_IDLE", "CONN_BOUND",
158
159 "AF_LINK",
160
161 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
162
163 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
164
165 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
166 "SUNOS", "WINDOWS", "AIX",
167
168 # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
169 # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
170 # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
171 # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
172
173 # classes
174 "Process", "Popen",
175
176 # functions
177 "pid_exists", "pids", "process_iter", "wait_procs", # proc
178 "virtual_memory", "swap_memory", # memory
179 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
180 "cpu_stats", # "cpu_freq", "getloadavg"
181 "net_io_counters", "net_connections", "net_if_addrs", # network
182 "net_if_stats",
183 "disk_io_counters", "disk_partitions", "disk_usage", # disk
184 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
185 "users", "boot_time", # others
186]
187# fmt: on
188
189
190__all__.extend(_psplatform.__extra__all__)
191
192# Linux, FreeBSD
193if hasattr(_psplatform.Process, "rlimit"):
194 # Populate global namespace with RLIM* constants.
195 _globals = globals()
196 _name = None
197 for _name in dir(_psplatform.cext):
198 if _name.startswith('RLIM') and _name.isupper():
199 _globals[_name] = getattr(_psplatform.cext, _name)
200 __all__.append(_name)
201 del _globals, _name
202
203AF_LINK = _psplatform.AF_LINK
204
205__author__ = "Giampaolo Rodola'"
206__version__ = "7.1.3"
207version_info = tuple(int(num) for num in __version__.split('.'))
208
209_timer = getattr(time, 'monotonic', time.time)
210_TOTAL_PHYMEM = None
211_LOWEST_PID = None
212_SENTINEL = object()
213
214# Sanity check in case the user messed up with psutil installation
215# or did something weird with sys.path. In this case we might end
216# up importing a python module using a C extension module which
217# was compiled for a different version of psutil.
218# We want to prevent that by failing sooner rather than later.
219# See: https://github.com/giampaolo/psutil/issues/564
220if int(__version__.replace('.', '')) != getattr(
221 _psplatform.cext, 'version', None
222):
223 msg = f"version conflict: {_psplatform.cext.__file__!r} C extension "
224 msg += "module was built for another version of psutil"
225 if hasattr(_psplatform.cext, 'version'):
226 v = ".".join(list(str(_psplatform.cext.version)))
227 msg += f" ({v} instead of {__version__})"
228 else:
229 msg += f" (different than {__version__})"
230 what = getattr(
231 _psplatform.cext,
232 "__file__",
233 "the existing psutil install directory",
234 )
235 msg += f"; you may try to 'pip uninstall psutil', manually remove {what}"
236 msg += " or clean the virtual env somehow, then reinstall"
237 raise ImportError(msg)
238
239
240# =====================================================================
241# --- Utils
242# =====================================================================
243
244
245if hasattr(_psplatform, 'ppid_map'):
246 # Faster version (Windows and Linux).
247 _ppid_map = _psplatform.ppid_map
248else: # pragma: no cover
249
250 def _ppid_map():
251 """Return a {pid: ppid, ...} dict for all running processes in
252 one shot. Used to speed up Process.children().
253 """
254 ret = {}
255 for pid in pids():
256 try:
257 ret[pid] = _psplatform.Process(pid).ppid()
258 except (NoSuchProcess, ZombieProcess):
259 pass
260 return ret
261
262
263def _pprint_secs(secs):
264 """Format seconds in a human readable form."""
265 now = time.time()
266 secs_ago = int(now - secs)
267 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S"
268 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
269
270
271def _check_conn_kind(kind):
272 """Check net_connections()'s `kind` parameter."""
273 kinds = tuple(_common.conn_tmap)
274 if kind not in kinds:
275 msg = f"invalid kind argument {kind!r}; valid ones are: {kinds}"
276 raise ValueError(msg)
277
278
279# =====================================================================
280# --- Process class
281# =====================================================================
282
283
284class Process:
285 """Represents an OS process with the given PID.
286 If PID is omitted current process PID (os.getpid()) is used.
287 Raise NoSuchProcess if PID does not exist.
288
289 Note that most of the methods of this class do not make sure that
290 the PID of the process being queried has been reused. That means
291 that you may end up retrieving information for another process.
292
293 The only exceptions for which process identity is pre-emptively
294 checked and guaranteed are:
295
296 - parent()
297 - children()
298 - nice() (set)
299 - ionice() (set)
300 - rlimit() (set)
301 - cpu_affinity (set)
302 - suspend()
303 - resume()
304 - send_signal()
305 - terminate()
306 - kill()
307
308 To prevent this problem for all other methods you can use
309 is_running() before querying the process.
310 """
311
312 def __init__(self, pid=None):
313 self._init(pid)
314
315 def _init(self, pid, _ignore_nsp=False):
316 if pid is None:
317 pid = os.getpid()
318 else:
319 if pid < 0:
320 msg = f"pid must be a positive integer (got {pid})"
321 raise ValueError(msg)
322 try:
323 _psplatform.cext.check_pid_range(pid)
324 except OverflowError as err:
325 msg = "process PID out of range"
326 raise NoSuchProcess(pid, msg=msg) from err
327
328 self._pid = pid
329 self._name = None
330 self._exe = None
331 self._create_time = None
332 self._gone = False
333 self._pid_reused = False
334 self._hash = None
335 self._lock = threading.RLock()
336 # used for caching on Windows only (on POSIX ppid may change)
337 self._ppid = None
338 # platform-specific modules define an _psplatform.Process
339 # implementation class
340 self._proc = _psplatform.Process(pid)
341 self._last_sys_cpu_times = None
342 self._last_proc_cpu_times = None
343 self._exitcode = _SENTINEL
344 self._ident = (self.pid, None)
345 try:
346 self._ident = self._get_ident()
347 except AccessDenied:
348 # This should happen on Windows only, since we use the fast
349 # create time method. AFAIK, on all other platforms we are
350 # able to get create time for all PIDs.
351 pass
352 except ZombieProcess:
353 # Zombies can still be queried by this class (although
354 # not always) and pids() return them so just go on.
355 pass
356 except NoSuchProcess:
357 if not _ignore_nsp:
358 msg = "process PID not found"
359 raise NoSuchProcess(pid, msg=msg) from None
360 self._gone = True
361
362 def _get_ident(self):
363 """Return a (pid, uid) tuple which is supposed to identify a
364 Process instance univocally over time. The PID alone is not
365 enough, as it can be assigned to a new process after this one
366 terminates, so we add process creation time to the mix. We need
367 this in order to prevent killing the wrong process later on.
368 This is also known as PID reuse or PID recycling problem.
369
370 The reliability of this strategy mostly depends on
371 create_time() precision, which is 0.01 secs on Linux. The
372 assumption is that, after a process terminates, the kernel
373 won't reuse the same PID after such a short period of time
374 (0.01 secs). Technically this is inherently racy, but
375 practically it should be good enough.
376
377 NOTE: unreliable on FreeBSD and OpenBSD as ctime is subject to
378 system clock updates.
379 """
380
381 if WINDOWS:
382 # Use create_time() fast method in order to speedup
383 # `process_iter()`. This means we'll get AccessDenied for
384 # most ADMIN processes, but that's fine since it means
385 # we'll also get AccessDenied on kill().
386 # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555
387 self._create_time = self._proc.create_time(fast_only=True)
388 return (self.pid, self._create_time)
389 elif LINUX or NETBSD or OSX:
390 # Use 'monotonic' process starttime since boot to form unique
391 # process identity, since it is stable over changes to system
392 # time.
393 return (self.pid, self._proc.create_time(monotonic=True))
394 else:
395 return (self.pid, self.create_time())
396
397 def __str__(self):
398 info = collections.OrderedDict()
399 info["pid"] = self.pid
400 if self._name:
401 info['name'] = self._name
402 with self.oneshot():
403 if self._pid_reused:
404 info["status"] = "terminated + PID reused"
405 else:
406 try:
407 info["name"] = self.name()
408 info["status"] = self.status()
409 except ZombieProcess:
410 info["status"] = "zombie"
411 except NoSuchProcess:
412 info["status"] = "terminated"
413 except AccessDenied:
414 pass
415
416 if self._exitcode not in {_SENTINEL, None}:
417 info["exitcode"] = self._exitcode
418 if self._create_time is not None:
419 info['started'] = _pprint_secs(self._create_time)
420
421 return "{}.{}({})".format(
422 self.__class__.__module__,
423 self.__class__.__name__,
424 ", ".join([f"{k}={v!r}" for k, v in info.items()]),
425 )
426
427 __repr__ = __str__
428
429 def __eq__(self, other):
430 # Test for equality with another Process object based
431 # on PID and creation time.
432 if not isinstance(other, Process):
433 return NotImplemented
434 if OPENBSD or NETBSD or SUNOS: # pragma: no cover
435 # Zombie processes on Open/NetBSD/illumos/Solaris have a
436 # creation time of 0.0. This covers the case when a process
437 # started normally (so it has a ctime), then it turned into a
438 # zombie. It's important to do this because is_running()
439 # depends on __eq__.
440 pid1, ident1 = self._ident
441 pid2, ident2 = other._ident
442 if pid1 == pid2:
443 if ident1 and not ident2:
444 try:
445 return self.status() == STATUS_ZOMBIE
446 except Error:
447 pass
448 return self._ident == other._ident
449
450 def __ne__(self, other):
451 return not self == other
452
453 def __hash__(self):
454 if self._hash is None:
455 self._hash = hash(self._ident)
456 return self._hash
457
458 def _raise_if_pid_reused(self):
459 """Raises NoSuchProcess in case process PID has been reused."""
460 if self._pid_reused or (not self.is_running() and self._pid_reused):
461 # We may directly raise NSP in here already if PID is just
462 # not running, but I prefer NSP to be raised naturally by
463 # the actual Process API call. This way unit tests will tell
464 # us if the API is broken (aka don't raise NSP when it
465 # should). We also remain consistent with all other "get"
466 # APIs which don't use _raise_if_pid_reused().
467 msg = "process no longer exists and its PID has been reused"
468 raise NoSuchProcess(self.pid, self._name, msg=msg)
469
470 @property
471 def pid(self):
472 """The process PID."""
473 return self._pid
474
475 # --- utility methods
476
477 @contextlib.contextmanager
478 def oneshot(self):
479 """Utility context manager which considerably speeds up the
480 retrieval of multiple process information at the same time.
481
482 Internally different process info (e.g. name, ppid, uids,
483 gids, ...) may be fetched by using the same routine, but
484 only one information is returned and the others are discarded.
485 When using this context manager the internal routine is
486 executed once (in the example below on name()) and the
487 other info are cached.
488
489 The cache is cleared when exiting the context manager block.
490 The advice is to use this every time you retrieve more than
491 one information about the process. If you're lucky, you'll
492 get a hell of a speedup.
493
494 >>> import psutil
495 >>> p = psutil.Process()
496 >>> with p.oneshot():
497 ... p.name() # collect multiple info
498 ... p.cpu_times() # return cached value
499 ... p.cpu_percent() # return cached value
500 ... p.create_time() # return cached value
501 ...
502 >>>
503 """
504 with self._lock:
505 if hasattr(self, "_cache"):
506 # NOOP: this covers the use case where the user enters the
507 # context twice:
508 #
509 # >>> with p.oneshot():
510 # ... with p.oneshot():
511 # ...
512 #
513 # Also, since as_dict() internally uses oneshot()
514 # I expect that the code below will be a pretty common
515 # "mistake" that the user will make, so let's guard
516 # against that:
517 #
518 # >>> with p.oneshot():
519 # ... p.as_dict()
520 # ...
521 yield
522 else:
523 try:
524 # cached in case cpu_percent() is used
525 self.cpu_times.cache_activate(self)
526 # cached in case memory_percent() is used
527 self.memory_info.cache_activate(self)
528 # cached in case parent() is used
529 self.ppid.cache_activate(self)
530 # cached in case username() is used
531 if POSIX:
532 self.uids.cache_activate(self)
533 # specific implementation cache
534 self._proc.oneshot_enter()
535 yield
536 finally:
537 self.cpu_times.cache_deactivate(self)
538 self.memory_info.cache_deactivate(self)
539 self.ppid.cache_deactivate(self)
540 if POSIX:
541 self.uids.cache_deactivate(self)
542 self._proc.oneshot_exit()
543
544 def as_dict(self, attrs=None, ad_value=None):
545 """Utility method returning process information as a
546 hashable dictionary.
547 If *attrs* is specified it must be a list of strings
548 reflecting available Process class' attribute names
549 (e.g. ['cpu_times', 'name']) else all public (read
550 only) attributes are assumed.
551 *ad_value* is the value which gets assigned in case
552 AccessDenied or ZombieProcess exception is raised when
553 retrieving that particular process information.
554 """
555 valid_names = _as_dict_attrnames
556 if attrs is not None:
557 if not isinstance(attrs, (list, tuple, set, frozenset)):
558 msg = f"invalid attrs type {type(attrs)}"
559 raise TypeError(msg)
560 attrs = set(attrs)
561 invalid_names = attrs - valid_names
562 if invalid_names:
563 msg = "invalid attr name{} {}".format(
564 "s" if len(invalid_names) > 1 else "",
565 ", ".join(map(repr, invalid_names)),
566 )
567 raise ValueError(msg)
568
569 retdict = {}
570 ls = attrs or valid_names
571 with self.oneshot():
572 for name in ls:
573 try:
574 if name == 'pid':
575 ret = self.pid
576 else:
577 meth = getattr(self, name)
578 ret = meth()
579 except (AccessDenied, ZombieProcess):
580 ret = ad_value
581 except NotImplementedError:
582 # in case of not implemented functionality (may happen
583 # on old or exotic systems) we want to crash only if
584 # the user explicitly asked for that particular attr
585 if attrs:
586 raise
587 continue
588 retdict[name] = ret
589 return retdict
590
591 def parent(self):
592 """Return the parent process as a Process object pre-emptively
593 checking whether PID has been reused.
594 If no parent is known return None.
595 """
596 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
597 if self.pid == lowest_pid:
598 return None
599 ppid = self.ppid()
600 if ppid is not None:
601 # Get a fresh (non-cached) ctime in case the system clock
602 # was updated. TODO: use a monotonic ctime on platforms
603 # where it's supported.
604 proc_ctime = Process(self.pid).create_time()
605 try:
606 parent = Process(ppid)
607 if parent.create_time() <= proc_ctime:
608 return parent
609 # ...else ppid has been reused by another process
610 except NoSuchProcess:
611 pass
612
613 def parents(self):
614 """Return the parents of this process as a list of Process
615 instances. If no parents are known return an empty list.
616 """
617 parents = []
618 proc = self.parent()
619 while proc is not None:
620 parents.append(proc)
621 proc = proc.parent()
622 return parents
623
624 def is_running(self):
625 """Return whether this process is running.
626
627 It also checks if PID has been reused by another process, in
628 which case it will remove the process from `process_iter()`
629 internal cache and return False.
630 """
631 if self._gone or self._pid_reused:
632 return False
633 try:
634 # Checking if PID is alive is not enough as the PID might
635 # have been reused by another process. Process identity /
636 # uniqueness over time is guaranteed by (PID + creation
637 # time) and that is verified in __eq__.
638 self._pid_reused = self != Process(self.pid)
639 if self._pid_reused:
640 _pids_reused.add(self.pid)
641 raise NoSuchProcess(self.pid)
642 return True
643 except ZombieProcess:
644 # We should never get here as it's already handled in
645 # Process.__init__; here just for extra safety.
646 return True
647 except NoSuchProcess:
648 self._gone = True
649 return False
650
651 # --- actual API
652
653 @memoize_when_activated
654 def ppid(self):
655 """The process parent PID.
656 On Windows the return value is cached after first call.
657 """
658 # On POSIX we don't want to cache the ppid as it may unexpectedly
659 # change to 1 (init) in case this process turns into a zombie:
660 # https://github.com/giampaolo/psutil/issues/321
661 # http://stackoverflow.com/questions/356722/
662
663 # XXX should we check creation time here rather than in
664 # Process.parent()?
665 self._raise_if_pid_reused()
666 if POSIX:
667 return self._proc.ppid()
668 else: # pragma: no cover
669 self._ppid = self._ppid or self._proc.ppid()
670 return self._ppid
671
672 def name(self):
673 """The process name. The return value is cached after first call."""
674 # Process name is only cached on Windows as on POSIX it may
675 # change, see:
676 # https://github.com/giampaolo/psutil/issues/692
677 if WINDOWS and self._name is not None:
678 return self._name
679 name = self._proc.name()
680 if POSIX and len(name) >= 15:
681 # On UNIX the name gets truncated to the first 15 characters.
682 # If it matches the first part of the cmdline we return that
683 # one instead because it's usually more explicative.
684 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
685 try:
686 cmdline = self.cmdline()
687 except (AccessDenied, ZombieProcess):
688 # Just pass and return the truncated name: it's better
689 # than nothing. Note: there are actual cases where a
690 # zombie process can return a name() but not a
691 # cmdline(), see:
692 # https://github.com/giampaolo/psutil/issues/2239
693 pass
694 else:
695 if cmdline:
696 extended_name = os.path.basename(cmdline[0])
697 if extended_name.startswith(name):
698 name = extended_name
699 self._name = name
700 self._proc._name = name
701 return name
702
703 def exe(self):
704 """The process executable as an absolute path.
705 May also be an empty string.
706 The return value is cached after first call.
707 """
708
709 def guess_it(fallback):
710 # try to guess exe from cmdline[0] in absence of a native
711 # exe representation
712 cmdline = self.cmdline()
713 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
714 exe = cmdline[0] # the possible exe
715 # Attempt to guess only in case of an absolute path.
716 # It is not safe otherwise as the process might have
717 # changed cwd.
718 if (
719 os.path.isabs(exe)
720 and os.path.isfile(exe)
721 and os.access(exe, os.X_OK)
722 ):
723 return exe
724 if isinstance(fallback, AccessDenied):
725 raise fallback
726 return fallback
727
728 if self._exe is None:
729 try:
730 exe = self._proc.exe()
731 except AccessDenied as err:
732 return guess_it(fallback=err)
733 else:
734 if not exe:
735 # underlying implementation can legitimately return an
736 # empty string; if that's the case we don't want to
737 # raise AD while guessing from the cmdline
738 try:
739 exe = guess_it(fallback=exe)
740 except AccessDenied:
741 pass
742 self._exe = exe
743 return self._exe
744
745 def cmdline(self):
746 """The command line this process has been called with."""
747 return self._proc.cmdline()
748
749 def status(self):
750 """The process current status as a STATUS_* constant."""
751 try:
752 return self._proc.status()
753 except ZombieProcess:
754 return STATUS_ZOMBIE
755
756 def username(self):
757 """The name of the user that owns the process.
758 On UNIX this is calculated by using *real* process uid.
759 """
760 if POSIX:
761 if pwd is None:
762 # might happen if python was installed from sources
763 msg = "requires pwd module shipped with standard python"
764 raise ImportError(msg)
765 real_uid = self.uids().real
766 try:
767 return pwd.getpwuid(real_uid).pw_name
768 except KeyError:
769 # the uid can't be resolved by the system
770 return str(real_uid)
771 else:
772 return self._proc.username()
773
774 def create_time(self):
775 """The process creation time as a floating point number
776 expressed in seconds since the epoch (seconds since January 1,
777 1970, at midnight UTC). The return value, which is cached after
778 first call, is based on the system clock, which means it may be
779 affected by changes such as manual adjustments or time
780 synchronization (e.g. NTP).
781 """
782 if self._create_time is None:
783 self._create_time = self._proc.create_time()
784 return self._create_time
785
786 def cwd(self):
787 """Process current working directory as an absolute path."""
788 return self._proc.cwd()
789
790 def nice(self, value=None):
791 """Get or set process niceness (priority)."""
792 if value is None:
793 return self._proc.nice_get()
794 else:
795 self._raise_if_pid_reused()
796 self._proc.nice_set(value)
797
798 if POSIX:
799
800 @memoize_when_activated
801 def uids(self):
802 """Return process UIDs as a (real, effective, saved)
803 namedtuple.
804 """
805 return self._proc.uids()
806
807 def gids(self):
808 """Return process GIDs as a (real, effective, saved)
809 namedtuple.
810 """
811 return self._proc.gids()
812
813 def terminal(self):
814 """The terminal associated with this process, if any,
815 else None.
816 """
817 return self._proc.terminal()
818
819 def num_fds(self):
820 """Return the number of file descriptors opened by this
821 process (POSIX only).
822 """
823 return self._proc.num_fds()
824
825 # Linux, BSD, AIX and Windows only
826 if hasattr(_psplatform.Process, "io_counters"):
827
828 def io_counters(self):
829 """Return process I/O statistics as a
830 (read_count, write_count, read_bytes, write_bytes)
831 namedtuple.
832 Those are the number of read/write calls performed and the
833 amount of bytes read and written by the process.
834 """
835 return self._proc.io_counters()
836
837 # Linux and Windows
838 if hasattr(_psplatform.Process, "ionice_get"):
839
840 def ionice(self, ioclass=None, value=None):
841 """Get or set process I/O niceness (priority).
842
843 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
844 *value* is a number which goes from 0 to 7. The higher the
845 value, the lower the I/O priority of the process.
846
847 On Windows only *ioclass* is used and it can be set to 2
848 (normal), 1 (low) or 0 (very low).
849
850 Available on Linux and Windows > Vista only.
851 """
852 if ioclass is None:
853 if value is not None:
854 msg = "'ioclass' argument must be specified"
855 raise ValueError(msg)
856 return self._proc.ionice_get()
857 else:
858 self._raise_if_pid_reused()
859 return self._proc.ionice_set(ioclass, value)
860
861 # Linux / FreeBSD only
862 if hasattr(_psplatform.Process, "rlimit"):
863
864 def rlimit(self, resource, limits=None):
865 """Get or set process resource limits as a (soft, hard)
866 tuple.
867
868 *resource* is one of the RLIMIT_* constants.
869 *limits* is supposed to be a (soft, hard) tuple.
870
871 See "man prlimit" for further info.
872 Available on Linux and FreeBSD only.
873 """
874 if limits is not None:
875 self._raise_if_pid_reused()
876 return self._proc.rlimit(resource, limits)
877
878 # Windows, Linux and FreeBSD only
879 if hasattr(_psplatform.Process, "cpu_affinity_get"):
880
881 def cpu_affinity(self, cpus=None):
882 """Get or set process CPU affinity.
883 If specified, *cpus* must be a list of CPUs for which you
884 want to set the affinity (e.g. [0, 1]).
885 If an empty list is passed, all egible CPUs are assumed
886 (and set).
887 (Windows, Linux and BSD only).
888 """
889 if cpus is None:
890 return sorted(set(self._proc.cpu_affinity_get()))
891 else:
892 self._raise_if_pid_reused()
893 if not cpus:
894 if hasattr(self._proc, "_get_eligible_cpus"):
895 cpus = self._proc._get_eligible_cpus()
896 else:
897 cpus = tuple(range(len(cpu_times(percpu=True))))
898 self._proc.cpu_affinity_set(list(set(cpus)))
899
900 # Linux, FreeBSD, SunOS
901 if hasattr(_psplatform.Process, "cpu_num"):
902
903 def cpu_num(self):
904 """Return what CPU this process is currently running on.
905 The returned number should be <= psutil.cpu_count()
906 and <= len(psutil.cpu_percent(percpu=True)).
907 It may be used in conjunction with
908 psutil.cpu_percent(percpu=True) to observe the system
909 workload distributed across CPUs.
910 """
911 return self._proc.cpu_num()
912
913 # All platforms has it, but maybe not in the future.
914 if hasattr(_psplatform.Process, "environ"):
915
916 def environ(self):
917 """The environment variables of the process as a dict. Note: this
918 might not reflect changes made after the process started.
919 """
920 return self._proc.environ()
921
922 if WINDOWS:
923
924 def num_handles(self):
925 """Return the number of handles opened by this process
926 (Windows only).
927 """
928 return self._proc.num_handles()
929
930 def num_ctx_switches(self):
931 """Return the number of voluntary and involuntary context
932 switches performed by this process.
933 """
934 return self._proc.num_ctx_switches()
935
936 def num_threads(self):
937 """Return the number of threads used by this process."""
938 return self._proc.num_threads()
939
940 if hasattr(_psplatform.Process, "threads"):
941
942 def threads(self):
943 """Return threads opened by process as a list of
944 (id, user_time, system_time) namedtuples representing
945 thread id and thread CPU times (user/system).
946 On OpenBSD this method requires root access.
947 """
948 return self._proc.threads()
949
950 def children(self, recursive=False):
951 """Return the children of this process as a list of Process
952 instances, pre-emptively checking whether PID has been reused.
953 If *recursive* is True return all the parent descendants.
954
955 Example (A == this process):
956
957 A ─┐
958 │
959 ├─ B (child) ─┐
960 │ └─ X (grandchild) ─┐
961 │ └─ Y (great grandchild)
962 ├─ C (child)
963 └─ D (child)
964
965 >>> import psutil
966 >>> p = psutil.Process()
967 >>> p.children()
968 B, C, D
969 >>> p.children(recursive=True)
970 B, X, Y, C, D
971
972 Note that in the example above if process X disappears
973 process Y won't be listed as the reference to process A
974 is lost.
975 """
976 self._raise_if_pid_reused()
977 ppid_map = _ppid_map()
978 # Get a fresh (non-cached) ctime in case the system clock was
979 # updated. TODO: use a monotonic ctime on platforms where it's
980 # supported.
981 proc_ctime = Process(self.pid).create_time()
982 ret = []
983 if not recursive:
984 for pid, ppid in ppid_map.items():
985 if ppid == self.pid:
986 try:
987 child = Process(pid)
988 # if child happens to be older than its parent
989 # (self) it means child's PID has been reused
990 if proc_ctime <= child.create_time():
991 ret.append(child)
992 except (NoSuchProcess, ZombieProcess):
993 pass
994 else:
995 # Construct a {pid: [child pids]} dict
996 reverse_ppid_map = collections.defaultdict(list)
997 for pid, ppid in ppid_map.items():
998 reverse_ppid_map[ppid].append(pid)
999 # Recursively traverse that dict, starting from self.pid,
1000 # such that we only call Process() on actual children
1001 seen = set()
1002 stack = [self.pid]
1003 while stack:
1004 pid = stack.pop()
1005 if pid in seen:
1006 # Since pids can be reused while the ppid_map is
1007 # constructed, there may be rare instances where
1008 # there's a cycle in the recorded process "tree".
1009 continue
1010 seen.add(pid)
1011 for child_pid in reverse_ppid_map[pid]:
1012 try:
1013 child = Process(child_pid)
1014 # if child happens to be older than its parent
1015 # (self) it means child's PID has been reused
1016 intime = proc_ctime <= child.create_time()
1017 if intime:
1018 ret.append(child)
1019 stack.append(child_pid)
1020 except (NoSuchProcess, ZombieProcess):
1021 pass
1022 return ret
1023
1024 def cpu_percent(self, interval=None):
1025 """Return a float representing the current process CPU
1026 utilization as a percentage.
1027
1028 When *interval* is 0.0 or None (default) compares process times
1029 to system CPU times elapsed since last call, returning
1030 immediately (non-blocking). That means that the first time
1031 this is called it will return a meaningful 0.0 value.
1032
1033 When *interval* is > 0.0 compares process times to system CPU
1034 times elapsed before and after the interval (blocking).
1035
1036 In this case is recommended for accuracy that this function
1037 be called with at least 0.1 seconds between calls.
1038
1039 A value > 100.0 can be returned in case of processes running
1040 multiple threads on different CPU cores.
1041
1042 The returned value is explicitly NOT split evenly between
1043 all available logical CPUs. This means that a busy loop process
1044 running on a system with 2 logical CPUs will be reported as
1045 having 100% CPU utilization instead of 50%.
1046
1047 Examples:
1048
1049 >>> import psutil
1050 >>> p = psutil.Process(os.getpid())
1051 >>> # blocking
1052 >>> p.cpu_percent(interval=1)
1053 2.0
1054 >>> # non-blocking (percentage since last call)
1055 >>> p.cpu_percent(interval=None)
1056 2.9
1057 >>>
1058 """
1059 blocking = interval is not None and interval > 0.0
1060 if interval is not None and interval < 0:
1061 msg = f"interval is not positive (got {interval!r})"
1062 raise ValueError(msg)
1063 num_cpus = cpu_count() or 1
1064
1065 def timer():
1066 return _timer() * num_cpus
1067
1068 if blocking:
1069 st1 = timer()
1070 pt1 = self._proc.cpu_times()
1071 time.sleep(interval)
1072 st2 = timer()
1073 pt2 = self._proc.cpu_times()
1074 else:
1075 st1 = self._last_sys_cpu_times
1076 pt1 = self._last_proc_cpu_times
1077 st2 = timer()
1078 pt2 = self._proc.cpu_times()
1079 if st1 is None or pt1 is None:
1080 self._last_sys_cpu_times = st2
1081 self._last_proc_cpu_times = pt2
1082 return 0.0
1083
1084 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1085 delta_time = st2 - st1
1086 # reset values for next call in case of interval == None
1087 self._last_sys_cpu_times = st2
1088 self._last_proc_cpu_times = pt2
1089
1090 try:
1091 # This is the utilization split evenly between all CPUs.
1092 # E.g. a busy loop process on a 2-CPU-cores system at this
1093 # point is reported as 50% instead of 100%.
1094 overall_cpus_percent = (delta_proc / delta_time) * 100
1095 except ZeroDivisionError:
1096 # interval was too low
1097 return 0.0
1098 else:
1099 # Note 1:
1100 # in order to emulate "top" we multiply the value for the num
1101 # of CPU cores. This way the busy process will be reported as
1102 # having 100% (or more) usage.
1103 #
1104 # Note 2:
1105 # taskmgr.exe on Windows differs in that it will show 50%
1106 # instead.
1107 #
1108 # Note 3:
1109 # a percentage > 100 is legitimate as it can result from a
1110 # process with multiple threads running on different CPU
1111 # cores (top does the same), see:
1112 # http://stackoverflow.com/questions/1032357
1113 # https://github.com/giampaolo/psutil/issues/474
1114 single_cpu_percent = overall_cpus_percent * num_cpus
1115 return round(single_cpu_percent, 1)
1116
1117 @memoize_when_activated
1118 def cpu_times(self):
1119 """Return a (user, system, children_user, children_system)
1120 namedtuple representing the accumulated process time, in
1121 seconds.
1122 This is similar to os.times() but per-process.
1123 On macOS and Windows children_user and children_system are
1124 always set to 0.
1125 """
1126 return self._proc.cpu_times()
1127
1128 @memoize_when_activated
1129 def memory_info(self):
1130 """Return a namedtuple with variable fields depending on the
1131 platform, representing memory information about the process.
1132
1133 The "portable" fields available on all platforms are `rss` and `vms`.
1134
1135 All numbers are expressed in bytes.
1136 """
1137 return self._proc.memory_info()
1138
1139 def memory_full_info(self):
1140 """This method returns the same information as memory_info(),
1141 plus, on some platform (Linux, macOS, Windows), also provides
1142 additional metrics (USS, PSS and swap).
1143 The additional metrics provide a better representation of actual
1144 process memory usage.
1145
1146 Namely USS is the memory which is unique to a process and which
1147 would be freed if the process was terminated right now.
1148
1149 It does so by passing through the whole process address.
1150 As such it usually requires higher user privileges than
1151 memory_info() and is considerably slower.
1152 """
1153 return self._proc.memory_full_info()
1154
1155 def memory_percent(self, memtype="rss"):
1156 """Compare process memory to total physical system memory and
1157 calculate process memory utilization as a percentage.
1158 *memtype* argument is a string that dictates what type of
1159 process memory you want to compare against (defaults to "rss").
1160 The list of available strings can be obtained like this:
1161
1162 >>> psutil.Process().memory_info()._fields
1163 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1164 """
1165 valid_types = list(_psplatform.pfullmem._fields)
1166 if memtype not in valid_types:
1167 msg = (
1168 f"invalid memtype {memtype!r}; valid types are"
1169 f" {tuple(valid_types)!r}"
1170 )
1171 raise ValueError(msg)
1172 fun = (
1173 self.memory_info
1174 if memtype in _psplatform.pmem._fields
1175 else self.memory_full_info
1176 )
1177 metrics = fun()
1178 value = getattr(metrics, memtype)
1179
1180 # use cached value if available
1181 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1182 if not total_phymem > 0:
1183 # we should never get here
1184 msg = (
1185 "can't calculate process memory percent because total physical"
1186 f" system memory is not positive ({total_phymem!r})"
1187 )
1188 raise ValueError(msg)
1189 return (value / float(total_phymem)) * 100
1190
1191 if hasattr(_psplatform.Process, "memory_maps"):
1192
1193 def memory_maps(self, grouped=True):
1194 """Return process' mapped memory regions as a list of namedtuples
1195 whose fields are variable depending on the platform.
1196
1197 If *grouped* is True the mapped regions with the same 'path'
1198 are grouped together and the different memory fields are summed.
1199
1200 If *grouped* is False every mapped region is shown as a single
1201 entity and the namedtuple will also include the mapped region's
1202 address space ('addr') and permission set ('perms').
1203 """
1204 it = self._proc.memory_maps()
1205 if grouped:
1206 d = {}
1207 for tupl in it:
1208 path = tupl[2]
1209 nums = tupl[3:]
1210 try:
1211 d[path] = list(map(lambda x, y: x + y, d[path], nums))
1212 except KeyError:
1213 d[path] = nums
1214 nt = _psplatform.pmmap_grouped
1215 return [nt(path, *d[path]) for path in d]
1216 else:
1217 nt = _psplatform.pmmap_ext
1218 return [nt(*x) for x in it]
1219
1220 def open_files(self):
1221 """Return files opened by process as a list of
1222 (path, fd) namedtuples including the absolute file name
1223 and file descriptor number.
1224 """
1225 return self._proc.open_files()
1226
1227 def net_connections(self, kind='inet'):
1228 """Return socket connections opened by process as a list of
1229 (fd, family, type, laddr, raddr, status) namedtuples.
1230 The *kind* parameter filters for connections that match the
1231 following criteria:
1232
1233 +------------+----------------------------------------------------+
1234 | Kind Value | Connections using |
1235 +------------+----------------------------------------------------+
1236 | inet | IPv4 and IPv6 |
1237 | inet4 | IPv4 |
1238 | inet6 | IPv6 |
1239 | tcp | TCP |
1240 | tcp4 | TCP over IPv4 |
1241 | tcp6 | TCP over IPv6 |
1242 | udp | UDP |
1243 | udp4 | UDP over IPv4 |
1244 | udp6 | UDP over IPv6 |
1245 | unix | UNIX socket (both UDP and TCP protocols) |
1246 | all | the sum of all the possible families and protocols |
1247 +------------+----------------------------------------------------+
1248 """
1249 _check_conn_kind(kind)
1250 return self._proc.net_connections(kind)
1251
1252 @_common.deprecated_method(replacement="net_connections")
1253 def connections(self, kind="inet"):
1254 return self.net_connections(kind=kind)
1255
1256 # --- signals
1257
1258 if POSIX:
1259
1260 def _send_signal(self, sig):
1261 assert not self.pid < 0, self.pid
1262 self._raise_if_pid_reused()
1263
1264 pid, ppid, name = self.pid, self._ppid, self._name
1265 if pid == 0:
1266 # see "man 2 kill"
1267 msg = (
1268 "preventing sending signal to process with PID 0 as it "
1269 "would affect every process in the process group of the "
1270 "calling process (os.getpid()) instead of PID 0"
1271 )
1272 raise ValueError(msg)
1273 try:
1274 os.kill(pid, sig)
1275 except ProcessLookupError as err:
1276 if OPENBSD and pid_exists(pid):
1277 # We do this because os.kill() lies in case of
1278 # zombie processes.
1279 raise ZombieProcess(pid, name, ppid) from err
1280 self._gone = True
1281 raise NoSuchProcess(pid, name) from err
1282 except PermissionError as err:
1283 raise AccessDenied(pid, name) from err
1284
1285 def send_signal(self, sig):
1286 """Send a signal *sig* to process pre-emptively checking
1287 whether PID has been reused (see signal module constants) .
1288 On Windows only SIGTERM is valid and is treated as an alias
1289 for kill().
1290 """
1291 if POSIX:
1292 self._send_signal(sig)
1293 else: # pragma: no cover
1294 self._raise_if_pid_reused()
1295 if sig != signal.SIGTERM and not self.is_running():
1296 msg = "process no longer exists"
1297 raise NoSuchProcess(self.pid, self._name, msg=msg)
1298 self._proc.send_signal(sig)
1299
1300 def suspend(self):
1301 """Suspend process execution with SIGSTOP pre-emptively checking
1302 whether PID has been reused.
1303 On Windows this has the effect of suspending all process threads.
1304 """
1305 if POSIX:
1306 self._send_signal(signal.SIGSTOP)
1307 else: # pragma: no cover
1308 self._raise_if_pid_reused()
1309 self._proc.suspend()
1310
1311 def resume(self):
1312 """Resume process execution with SIGCONT pre-emptively checking
1313 whether PID has been reused.
1314 On Windows this has the effect of resuming all process threads.
1315 """
1316 if POSIX:
1317 self._send_signal(signal.SIGCONT)
1318 else: # pragma: no cover
1319 self._raise_if_pid_reused()
1320 self._proc.resume()
1321
1322 def terminate(self):
1323 """Terminate the process with SIGTERM pre-emptively checking
1324 whether PID has been reused.
1325 On Windows this is an alias for kill().
1326 """
1327 if POSIX:
1328 self._send_signal(signal.SIGTERM)
1329 else: # pragma: no cover
1330 self._raise_if_pid_reused()
1331 self._proc.kill()
1332
1333 def kill(self):
1334 """Kill the current process with SIGKILL pre-emptively checking
1335 whether PID has been reused.
1336 """
1337 if POSIX:
1338 self._send_signal(signal.SIGKILL)
1339 else: # pragma: no cover
1340 self._raise_if_pid_reused()
1341 self._proc.kill()
1342
1343 def wait(self, timeout=None):
1344 """Wait for process to terminate and, if process is a children
1345 of os.getpid(), also return its exit code, else None.
1346 On Windows there's no such limitation (exit code is always
1347 returned).
1348
1349 If the process is already terminated immediately return None
1350 instead of raising NoSuchProcess.
1351
1352 If *timeout* (in seconds) is specified and process is still
1353 alive raise TimeoutExpired.
1354
1355 To wait for multiple Process(es) use psutil.wait_procs().
1356 """
1357 if timeout is not None and not timeout >= 0:
1358 msg = "timeout must be a positive integer"
1359 raise ValueError(msg)
1360 if self._exitcode is not _SENTINEL:
1361 return self._exitcode
1362 self._exitcode = self._proc.wait(timeout)
1363 return self._exitcode
1364
1365
1366# The valid attr names which can be processed by Process.as_dict().
1367# fmt: off
1368_as_dict_attrnames = {
1369 x for x in dir(Process) if not x.startswith("_") and x not in
1370 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1371 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1372 'connections', 'oneshot'}
1373}
1374# fmt: on
1375
1376
1377# =====================================================================
1378# --- Popen class
1379# =====================================================================
1380
1381
1382class Popen(Process):
1383 """Same as subprocess.Popen, but in addition it provides all
1384 psutil.Process methods in a single class.
1385 For the following methods which are common to both classes, psutil
1386 implementation takes precedence:
1387
1388 * send_signal()
1389 * terminate()
1390 * kill()
1391
1392 This is done in order to avoid killing another process in case its
1393 PID has been reused, fixing BPO-6973.
1394
1395 >>> import psutil
1396 >>> from subprocess import PIPE
1397 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1398 >>> p.name()
1399 'python'
1400 >>> p.uids()
1401 user(real=1000, effective=1000, saved=1000)
1402 >>> p.username()
1403 'giampaolo'
1404 >>> p.communicate()
1405 ('hi', None)
1406 >>> p.terminate()
1407 >>> p.wait(timeout=2)
1408 0
1409 >>>
1410 """
1411
1412 def __init__(self, *args, **kwargs):
1413 # Explicitly avoid to raise NoSuchProcess in case the process
1414 # spawned by subprocess.Popen terminates too quickly, see:
1415 # https://github.com/giampaolo/psutil/issues/193
1416 self.__subproc = subprocess.Popen(*args, **kwargs)
1417 self._init(self.__subproc.pid, _ignore_nsp=True)
1418
1419 def __dir__(self):
1420 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1421
1422 def __enter__(self):
1423 if hasattr(self.__subproc, '__enter__'):
1424 self.__subproc.__enter__()
1425 return self
1426
1427 def __exit__(self, *args, **kwargs):
1428 if hasattr(self.__subproc, '__exit__'):
1429 return self.__subproc.__exit__(*args, **kwargs)
1430 else:
1431 if self.stdout:
1432 self.stdout.close()
1433 if self.stderr:
1434 self.stderr.close()
1435 try:
1436 # Flushing a BufferedWriter may raise an error.
1437 if self.stdin:
1438 self.stdin.close()
1439 finally:
1440 # Wait for the process to terminate, to avoid zombies.
1441 self.wait()
1442
1443 def __getattribute__(self, name):
1444 try:
1445 return object.__getattribute__(self, name)
1446 except AttributeError:
1447 try:
1448 return object.__getattribute__(self.__subproc, name)
1449 except AttributeError:
1450 msg = f"{self.__class__!r} has no attribute {name!r}"
1451 raise AttributeError(msg) from None
1452
1453 def wait(self, timeout=None):
1454 if self.__subproc.returncode is not None:
1455 return self.__subproc.returncode
1456 ret = super().wait(timeout)
1457 self.__subproc.returncode = ret
1458 return ret
1459
1460
1461# =====================================================================
1462# --- system processes related functions
1463# =====================================================================
1464
1465
1466def pids():
1467 """Return a list of current running PIDs."""
1468 global _LOWEST_PID
1469 ret = sorted(_psplatform.pids())
1470 _LOWEST_PID = ret[0]
1471 return ret
1472
1473
1474def pid_exists(pid):
1475 """Return True if given PID exists in the current process list.
1476 This is faster than doing "pid in psutil.pids()" and
1477 should be preferred.
1478 """
1479 if pid < 0:
1480 return False
1481 elif pid == 0 and POSIX:
1482 # On POSIX we use os.kill() to determine PID existence.
1483 # According to "man 2 kill" PID 0 has a special meaning
1484 # though: it refers to <<every process in the process
1485 # group of the calling process>> and that is not we want
1486 # to do here.
1487 return pid in pids()
1488 else:
1489 return _psplatform.pid_exists(pid)
1490
1491
1492_pmap = {}
1493_pids_reused = set()
1494
1495
1496def process_iter(attrs=None, ad_value=None):
1497 """Return a generator yielding a Process instance for all
1498 running processes.
1499
1500 Every new Process instance is only created once and then cached
1501 into an internal table which is updated every time this is used.
1502 Cache can optionally be cleared via `process_iter.cache_clear()`.
1503
1504 The sorting order in which processes are yielded is based on
1505 their PIDs.
1506
1507 *attrs* and *ad_value* have the same meaning as in
1508 Process.as_dict(). If *attrs* is specified as_dict() is called
1509 and the resulting dict is stored as a 'info' attribute attached
1510 to returned Process instance.
1511 If *attrs* is an empty list it will retrieve all process info
1512 (slow).
1513 """
1514 global _pmap
1515
1516 def add(pid):
1517 proc = Process(pid)
1518 pmap[proc.pid] = proc
1519 return proc
1520
1521 def remove(pid):
1522 pmap.pop(pid, None)
1523
1524 pmap = _pmap.copy()
1525 a = set(pids())
1526 b = set(pmap.keys())
1527 new_pids = a - b
1528 gone_pids = b - a
1529 for pid in gone_pids:
1530 remove(pid)
1531 while _pids_reused:
1532 pid = _pids_reused.pop()
1533 debug(f"refreshing Process instance for reused PID {pid}")
1534 remove(pid)
1535 try:
1536 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1537 for pid, proc in ls:
1538 try:
1539 if proc is None: # new process
1540 proc = add(pid)
1541 if attrs is not None:
1542 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1543 yield proc
1544 except NoSuchProcess:
1545 remove(pid)
1546 finally:
1547 _pmap = pmap
1548
1549
1550process_iter.cache_clear = lambda: _pmap.clear() # noqa: PLW0108
1551process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache."
1552
1553
1554def wait_procs(procs, timeout=None, callback=None):
1555 """Convenience function which waits for a list of processes to
1556 terminate.
1557
1558 Return a (gone, alive) tuple indicating which processes
1559 are gone and which ones are still alive.
1560
1561 The gone ones will have a new *returncode* attribute indicating
1562 process exit status (may be None).
1563
1564 *callback* is a function which gets called every time a process
1565 terminates (a Process instance is passed as callback argument).
1566
1567 Function will return as soon as all processes terminate or when
1568 *timeout* occurs.
1569 Differently from Process.wait() it will not raise TimeoutExpired if
1570 *timeout* occurs.
1571
1572 Typical use case is:
1573
1574 - send SIGTERM to a list of processes
1575 - give them some time to terminate
1576 - send SIGKILL to those ones which are still alive
1577
1578 Example:
1579
1580 >>> def on_terminate(proc):
1581 ... print("process {} terminated".format(proc))
1582 ...
1583 >>> for p in procs:
1584 ... p.terminate()
1585 ...
1586 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1587 >>> for p in alive:
1588 ... p.kill()
1589 """
1590
1591 def check_gone(proc, timeout):
1592 try:
1593 returncode = proc.wait(timeout=timeout)
1594 except (TimeoutExpired, subprocess.TimeoutExpired):
1595 pass
1596 else:
1597 if returncode is not None or not proc.is_running():
1598 # Set new Process instance attribute.
1599 proc.returncode = returncode
1600 gone.add(proc)
1601 if callback is not None:
1602 callback(proc)
1603
1604 if timeout is not None and not timeout >= 0:
1605 msg = f"timeout must be a positive integer, got {timeout}"
1606 raise ValueError(msg)
1607 gone = set()
1608 alive = set(procs)
1609 if callback is not None and not callable(callback):
1610 msg = f"callback {callback!r} is not a callable"
1611 raise TypeError(msg)
1612 if timeout is not None:
1613 deadline = _timer() + timeout
1614
1615 while alive:
1616 if timeout is not None and timeout <= 0:
1617 break
1618 for proc in alive:
1619 # Make sure that every complete iteration (all processes)
1620 # will last max 1 sec.
1621 # We do this because we don't want to wait too long on a
1622 # single process: in case it terminates too late other
1623 # processes may disappear in the meantime and their PID
1624 # reused.
1625 max_timeout = 1.0 / len(alive)
1626 if timeout is not None:
1627 timeout = min((deadline - _timer()), max_timeout)
1628 if timeout <= 0:
1629 break
1630 check_gone(proc, timeout)
1631 else:
1632 check_gone(proc, max_timeout)
1633 alive = alive - gone # noqa: PLR6104
1634
1635 if alive:
1636 # Last attempt over processes survived so far.
1637 # timeout == 0 won't make this function wait any further.
1638 for proc in alive:
1639 check_gone(proc, 0)
1640 alive = alive - gone # noqa: PLR6104
1641
1642 return (list(gone), list(alive))
1643
1644
1645# =====================================================================
1646# --- CPU related functions
1647# =====================================================================
1648
1649
1650def cpu_count(logical=True):
1651 """Return the number of logical CPUs in the system (same as
1652 os.cpu_count()).
1653
1654 If *logical* is False return the number of physical cores only
1655 (e.g. hyper thread CPUs are excluded).
1656
1657 Return None if undetermined.
1658
1659 The return value is cached after first call.
1660 If desired cache can be cleared like this:
1661
1662 >>> psutil.cpu_count.cache_clear()
1663 """
1664 if logical:
1665 ret = _psplatform.cpu_count_logical()
1666 else:
1667 ret = _psplatform.cpu_count_cores()
1668 if ret is not None and ret < 1:
1669 ret = None
1670 return ret
1671
1672
1673def cpu_times(percpu=False):
1674 """Return system-wide CPU times as a namedtuple.
1675 Every CPU time represents the seconds the CPU has spent in the
1676 given mode. The namedtuple's fields availability varies depending on the
1677 platform:
1678
1679 - user
1680 - system
1681 - idle
1682 - nice (UNIX)
1683 - iowait (Linux)
1684 - irq (Linux, FreeBSD)
1685 - softirq (Linux)
1686 - steal (Linux >= 2.6.11)
1687 - guest (Linux >= 2.6.24)
1688 - guest_nice (Linux >= 3.2.0)
1689
1690 When *percpu* is True return a list of namedtuples for each CPU.
1691 First element of the list refers to first CPU, second element
1692 to second CPU and so on.
1693 The order of the list is consistent across calls.
1694 """
1695 if not percpu:
1696 return _psplatform.cpu_times()
1697 else:
1698 return _psplatform.per_cpu_times()
1699
1700
1701try:
1702 _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1703except Exception: # noqa: BLE001
1704 # Don't want to crash at import time.
1705 _last_cpu_times = {}
1706
1707try:
1708 _last_per_cpu_times = {
1709 threading.current_thread().ident: cpu_times(percpu=True)
1710 }
1711except Exception: # noqa: BLE001
1712 # Don't want to crash at import time.
1713 _last_per_cpu_times = {}
1714
1715
1716def _cpu_tot_time(times):
1717 """Given a cpu_time() ntuple calculates the total CPU time
1718 (including idle time).
1719 """
1720 tot = sum(times)
1721 if LINUX:
1722 # On Linux guest times are already accounted in "user" or
1723 # "nice" times, so we subtract them from total.
1724 # Htop does the same. References:
1725 # https://github.com/giampaolo/psutil/pull/940
1726 # http://unix.stackexchange.com/questions/178045
1727 # https://github.com/torvalds/linux/blob/
1728 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1729 # cputime.c#L158
1730 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1731 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1732 return tot
1733
1734
1735def _cpu_busy_time(times):
1736 """Given a cpu_time() ntuple calculates the busy CPU time.
1737 We do so by subtracting all idle CPU times.
1738 """
1739 busy = _cpu_tot_time(times)
1740 busy -= times.idle
1741 # Linux: "iowait" is time during which the CPU does not do anything
1742 # (waits for IO to complete). On Linux IO wait is *not* accounted
1743 # in "idle" time so we subtract it. Htop does the same.
1744 # References:
1745 # https://github.com/torvalds/linux/blob/
1746 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1747 busy -= getattr(times, "iowait", 0)
1748 return busy
1749
1750
1751def _cpu_times_deltas(t1, t2):
1752 assert t1._fields == t2._fields, (t1, t2)
1753 field_deltas = []
1754 for field in _psplatform.scputimes._fields:
1755 field_delta = getattr(t2, field) - getattr(t1, field)
1756 # CPU times are always supposed to increase over time
1757 # or at least remain the same and that's because time
1758 # cannot go backwards.
1759 # Surprisingly sometimes this might not be the case (at
1760 # least on Windows and Linux), see:
1761 # https://github.com/giampaolo/psutil/issues/392
1762 # https://github.com/giampaolo/psutil/issues/645
1763 # https://github.com/giampaolo/psutil/issues/1210
1764 # Trim negative deltas to zero to ignore decreasing fields.
1765 # top does the same. Reference:
1766 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1767 field_delta = max(0, field_delta)
1768 field_deltas.append(field_delta)
1769 return _psplatform.scputimes(*field_deltas)
1770
1771
1772def cpu_percent(interval=None, percpu=False):
1773 """Return a float representing the current system-wide CPU
1774 utilization as a percentage.
1775
1776 When *interval* is > 0.0 compares system CPU times elapsed before
1777 and after the interval (blocking).
1778
1779 When *interval* is 0.0 or None compares system CPU times elapsed
1780 since last call or module import, returning immediately (non
1781 blocking). That means the first time this is called it will
1782 return a meaningless 0.0 value which you should ignore.
1783 In this case is recommended for accuracy that this function be
1784 called with at least 0.1 seconds between calls.
1785
1786 When *percpu* is True returns a list of floats representing the
1787 utilization as a percentage for each CPU.
1788 First element of the list refers to first CPU, second element
1789 to second CPU and so on.
1790 The order of the list is consistent across calls.
1791
1792 Examples:
1793
1794 >>> # blocking, system-wide
1795 >>> psutil.cpu_percent(interval=1)
1796 2.0
1797 >>>
1798 >>> # blocking, per-cpu
1799 >>> psutil.cpu_percent(interval=1, percpu=True)
1800 [2.0, 1.0]
1801 >>>
1802 >>> # non-blocking (percentage since last call)
1803 >>> psutil.cpu_percent(interval=None)
1804 2.9
1805 >>>
1806 """
1807 tid = threading.current_thread().ident
1808 blocking = interval is not None and interval > 0.0
1809 if interval is not None and interval < 0:
1810 msg = f"interval is not positive (got {interval})"
1811 raise ValueError(msg)
1812
1813 def calculate(t1, t2):
1814 times_delta = _cpu_times_deltas(t1, t2)
1815 all_delta = _cpu_tot_time(times_delta)
1816 busy_delta = _cpu_busy_time(times_delta)
1817
1818 try:
1819 busy_perc = (busy_delta / all_delta) * 100
1820 except ZeroDivisionError:
1821 return 0.0
1822 else:
1823 return round(busy_perc, 1)
1824
1825 # system-wide usage
1826 if not percpu:
1827 if blocking:
1828 t1 = cpu_times()
1829 time.sleep(interval)
1830 else:
1831 t1 = _last_cpu_times.get(tid) or cpu_times()
1832 _last_cpu_times[tid] = cpu_times()
1833 return calculate(t1, _last_cpu_times[tid])
1834 # per-cpu usage
1835 else:
1836 ret = []
1837 if blocking:
1838 tot1 = cpu_times(percpu=True)
1839 time.sleep(interval)
1840 else:
1841 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1842 _last_per_cpu_times[tid] = cpu_times(percpu=True)
1843 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1844 ret.append(calculate(t1, t2))
1845 return ret
1846
1847
1848# Use a separate dict for cpu_times_percent(), so it's independent from
1849# cpu_percent() and they can both be used within the same program.
1850_last_cpu_times_2 = _last_cpu_times.copy()
1851_last_per_cpu_times_2 = _last_per_cpu_times.copy()
1852
1853
1854def cpu_times_percent(interval=None, percpu=False):
1855 """Same as cpu_percent() but provides utilization percentages
1856 for each specific CPU time as is returned by cpu_times().
1857 For instance, on Linux we'll get:
1858
1859 >>> cpu_times_percent()
1860 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1861 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1862 >>>
1863
1864 *interval* and *percpu* arguments have the same meaning as in
1865 cpu_percent().
1866 """
1867 tid = threading.current_thread().ident
1868 blocking = interval is not None and interval > 0.0
1869 if interval is not None and interval < 0:
1870 msg = f"interval is not positive (got {interval!r})"
1871 raise ValueError(msg)
1872
1873 def calculate(t1, t2):
1874 nums = []
1875 times_delta = _cpu_times_deltas(t1, t2)
1876 all_delta = _cpu_tot_time(times_delta)
1877 # "scale" is the value to multiply each delta with to get percentages.
1878 # We use "max" to avoid division by zero (if all_delta is 0, then all
1879 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1880 # fraction because cpu times are integers)
1881 scale = 100.0 / max(1, all_delta)
1882 for field_delta in times_delta:
1883 field_perc = field_delta * scale
1884 field_perc = round(field_perc, 1)
1885 # make sure we don't return negative values or values over 100%
1886 field_perc = min(max(0.0, field_perc), 100.0)
1887 nums.append(field_perc)
1888 return _psplatform.scputimes(*nums)
1889
1890 # system-wide usage
1891 if not percpu:
1892 if blocking:
1893 t1 = cpu_times()
1894 time.sleep(interval)
1895 else:
1896 t1 = _last_cpu_times_2.get(tid) or cpu_times()
1897 _last_cpu_times_2[tid] = cpu_times()
1898 return calculate(t1, _last_cpu_times_2[tid])
1899 # per-cpu usage
1900 else:
1901 ret = []
1902 if blocking:
1903 tot1 = cpu_times(percpu=True)
1904 time.sleep(interval)
1905 else:
1906 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1907 _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1908 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1909 ret.append(calculate(t1, t2))
1910 return ret
1911
1912
1913def cpu_stats():
1914 """Return CPU statistics."""
1915 return _psplatform.cpu_stats()
1916
1917
1918if hasattr(_psplatform, "cpu_freq"):
1919
1920 def cpu_freq(percpu=False):
1921 """Return CPU frequency as a namedtuple including current,
1922 min and max frequency expressed in Mhz.
1923
1924 If *percpu* is True and the system supports per-cpu frequency
1925 retrieval (Linux only) a list of frequencies is returned for
1926 each CPU. If not a list with one element is returned.
1927 """
1928 ret = _psplatform.cpu_freq()
1929 if percpu:
1930 return ret
1931 else:
1932 num_cpus = float(len(ret))
1933 if num_cpus == 0:
1934 return None
1935 elif num_cpus == 1:
1936 return ret[0]
1937 else:
1938 currs, mins, maxs = 0.0, 0.0, 0.0
1939 set_none = False
1940 for cpu in ret:
1941 currs += cpu.current
1942 # On Linux if /proc/cpuinfo is used min/max are set
1943 # to None.
1944 if LINUX and cpu.min is None:
1945 set_none = True
1946 continue
1947 mins += cpu.min
1948 maxs += cpu.max
1949
1950 current = currs / num_cpus
1951
1952 if set_none:
1953 min_ = max_ = None
1954 else:
1955 min_ = mins / num_cpus
1956 max_ = maxs / num_cpus
1957
1958 return _common.scpufreq(current, min_, max_)
1959
1960 __all__.append("cpu_freq")
1961
1962
1963if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1964 # Perform this hasattr check once on import time to either use the
1965 # platform based code or proxy straight from the os module.
1966 if hasattr(os, "getloadavg"):
1967 getloadavg = os.getloadavg
1968 else:
1969 getloadavg = _psplatform.getloadavg
1970
1971 __all__.append("getloadavg")
1972
1973
1974# =====================================================================
1975# --- system memory related functions
1976# =====================================================================
1977
1978
1979def virtual_memory():
1980 """Return statistics about system memory usage as a namedtuple
1981 including the following fields, expressed in bytes:
1982
1983 - total:
1984 total physical memory available.
1985
1986 - available:
1987 the memory that can be given instantly to processes without the
1988 system going into swap.
1989 This is calculated by summing different memory values depending
1990 on the platform and it is supposed to be used to monitor actual
1991 memory usage in a cross platform fashion.
1992
1993 - percent:
1994 the percentage usage calculated as (total - available) / total * 100
1995
1996 - used:
1997 memory used, calculated differently depending on the platform and
1998 designed for informational purposes only:
1999 macOS: active + wired
2000 BSD: active + wired + cached
2001 Linux: total - free
2002
2003 - free:
2004 memory not being used at all (zeroed) that is readily available;
2005 note that this doesn't reflect the actual memory available
2006 (use 'available' instead)
2007
2008 Platform-specific fields:
2009
2010 - active (UNIX):
2011 memory currently in use or very recently used, and so it is in RAM.
2012
2013 - inactive (UNIX):
2014 memory that is marked as not used.
2015
2016 - buffers (BSD, Linux):
2017 cache for things like file system metadata.
2018
2019 - cached (BSD, macOS):
2020 cache for various things.
2021
2022 - wired (macOS, BSD):
2023 memory that is marked to always stay in RAM. It is never moved to disk.
2024
2025 - shared (BSD):
2026 memory that may be simultaneously accessed by multiple processes.
2027
2028 The sum of 'used' and 'available' does not necessarily equal total.
2029 On Windows 'available' and 'free' are the same.
2030 """
2031 global _TOTAL_PHYMEM
2032 ret = _psplatform.virtual_memory()
2033 # cached for later use in Process.memory_percent()
2034 _TOTAL_PHYMEM = ret.total
2035 return ret
2036
2037
2038def swap_memory():
2039 """Return system swap memory statistics as a namedtuple including
2040 the following fields:
2041
2042 - total: total swap memory in bytes
2043 - used: used swap memory in bytes
2044 - free: free swap memory in bytes
2045 - percent: the percentage usage
2046 - sin: no. of bytes the system has swapped in from disk (cumulative)
2047 - sout: no. of bytes the system has swapped out from disk (cumulative)
2048
2049 'sin' and 'sout' on Windows are meaningless and always set to 0.
2050 """
2051 return _psplatform.swap_memory()
2052
2053
2054# =====================================================================
2055# --- disks/partitions related functions
2056# =====================================================================
2057
2058
2059def disk_usage(path):
2060 """Return disk usage statistics about the given *path* as a
2061 namedtuple including total, used and free space expressed in bytes
2062 plus the percentage usage.
2063 """
2064 return _psplatform.disk_usage(path)
2065
2066
2067def disk_partitions(all=False):
2068 """Return mounted partitions as a list of
2069 (device, mountpoint, fstype, opts) namedtuple.
2070 'opts' field is a raw string separated by commas indicating mount
2071 options which may vary depending on the platform.
2072
2073 If *all* parameter is False return physical devices only and ignore
2074 all others.
2075 """
2076 return _psplatform.disk_partitions(all)
2077
2078
2079def disk_io_counters(perdisk=False, nowrap=True):
2080 """Return system disk I/O statistics as a namedtuple including
2081 the following fields:
2082
2083 - read_count: number of reads
2084 - write_count: number of writes
2085 - read_bytes: number of bytes read
2086 - write_bytes: number of bytes written
2087 - read_time: time spent reading from disk (in ms)
2088 - write_time: time spent writing to disk (in ms)
2089
2090 Platform specific:
2091
2092 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2093 - read_merged_count (Linux): number of merged reads
2094 - write_merged_count (Linux): number of merged writes
2095
2096 If *perdisk* is True return the same information for every
2097 physical disk installed on the system as a dictionary
2098 with partition names as the keys and the namedtuple
2099 described above as the values.
2100
2101 If *nowrap* is True it detects and adjust the numbers which overflow
2102 and wrap (restart from 0) and add "old value" to "new value" so that
2103 the returned numbers will always be increasing or remain the same,
2104 but never decrease.
2105 "disk_io_counters.cache_clear()" can be used to invalidate the
2106 cache.
2107
2108 On recent Windows versions 'diskperf -y' command may need to be
2109 executed first otherwise this function won't find any disk.
2110 """
2111 kwargs = dict(perdisk=perdisk) if LINUX else {}
2112 rawdict = _psplatform.disk_io_counters(**kwargs)
2113 if not rawdict:
2114 return {} if perdisk else None
2115 if nowrap:
2116 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2117 nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
2118 if perdisk:
2119 for disk, fields in rawdict.items():
2120 rawdict[disk] = nt(*fields)
2121 return rawdict
2122 else:
2123 return nt(*(sum(x) for x in zip(*rawdict.values())))
2124
2125
2126disk_io_counters.cache_clear = functools.partial(
2127 _wrap_numbers.cache_clear, 'psutil.disk_io_counters'
2128)
2129disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2130
2131
2132# =====================================================================
2133# --- network related functions
2134# =====================================================================
2135
2136
2137def net_io_counters(pernic=False, nowrap=True):
2138 """Return network I/O statistics as a namedtuple including
2139 the following fields:
2140
2141 - bytes_sent: number of bytes sent
2142 - bytes_recv: number of bytes received
2143 - packets_sent: number of packets sent
2144 - packets_recv: number of packets received
2145 - errin: total number of errors while receiving
2146 - errout: total number of errors while sending
2147 - dropin: total number of incoming packets which were dropped
2148 - dropout: total number of outgoing packets which were dropped
2149 (always 0 on macOS and BSD)
2150
2151 If *pernic* is True return the same information for every
2152 network interface installed on the system as a dictionary
2153 with network interface names as the keys and the namedtuple
2154 described above as the values.
2155
2156 If *nowrap* is True it detects and adjust the numbers which overflow
2157 and wrap (restart from 0) and add "old value" to "new value" so that
2158 the returned numbers will always be increasing or remain the same,
2159 but never decrease.
2160 "net_io_counters.cache_clear()" can be used to invalidate the
2161 cache.
2162 """
2163 rawdict = _psplatform.net_io_counters()
2164 if not rawdict:
2165 return {} if pernic else None
2166 if nowrap:
2167 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2168 if pernic:
2169 for nic, fields in rawdict.items():
2170 rawdict[nic] = _common.snetio(*fields)
2171 return rawdict
2172 else:
2173 return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
2174
2175
2176net_io_counters.cache_clear = functools.partial(
2177 _wrap_numbers.cache_clear, 'psutil.net_io_counters'
2178)
2179net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2180
2181
2182def net_connections(kind='inet'):
2183 """Return system-wide socket connections as a list of
2184 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2185 In case of limited privileges 'fd' and 'pid' may be set to -1
2186 and None respectively.
2187 The *kind* parameter filters for connections that fit the
2188 following criteria:
2189
2190 +------------+----------------------------------------------------+
2191 | Kind Value | Connections using |
2192 +------------+----------------------------------------------------+
2193 | inet | IPv4 and IPv6 |
2194 | inet4 | IPv4 |
2195 | inet6 | IPv6 |
2196 | tcp | TCP |
2197 | tcp4 | TCP over IPv4 |
2198 | tcp6 | TCP over IPv6 |
2199 | udp | UDP |
2200 | udp4 | UDP over IPv4 |
2201 | udp6 | UDP over IPv6 |
2202 | unix | UNIX socket (both UDP and TCP protocols) |
2203 | all | the sum of all the possible families and protocols |
2204 +------------+----------------------------------------------------+
2205
2206 On macOS this function requires root privileges.
2207 """
2208 _check_conn_kind(kind)
2209 return _psplatform.net_connections(kind)
2210
2211
2212def net_if_addrs():
2213 """Return the addresses associated to each NIC (network interface
2214 card) installed on the system as a dictionary whose keys are the
2215 NIC names and value is a list of namedtuples for each address
2216 assigned to the NIC. Each namedtuple includes 5 fields:
2217
2218 - family: can be either socket.AF_INET, socket.AF_INET6 or
2219 psutil.AF_LINK, which refers to a MAC address.
2220 - address: is the primary address and it is always set.
2221 - netmask: and 'broadcast' and 'ptp' may be None.
2222 - ptp: stands for "point to point" and references the
2223 destination address on a point to point interface
2224 (typically a VPN).
2225 - broadcast: and *ptp* are mutually exclusive.
2226
2227 Note: you can have more than one address of the same family
2228 associated with each interface.
2229 """
2230 rawlist = _psplatform.net_if_addrs()
2231 rawlist.sort(key=lambda x: x[1]) # sort by family
2232 ret = collections.defaultdict(list)
2233 for name, fam, addr, mask, broadcast, ptp in rawlist:
2234 try:
2235 fam = socket.AddressFamily(fam)
2236 except ValueError:
2237 if WINDOWS and fam == -1:
2238 fam = _psplatform.AF_LINK
2239 elif (
2240 hasattr(_psplatform, "AF_LINK") and fam == _psplatform.AF_LINK
2241 ):
2242 # Linux defines AF_LINK as an alias for AF_PACKET.
2243 # We re-set the family here so that repr(family)
2244 # will show AF_LINK rather than AF_PACKET
2245 fam = _psplatform.AF_LINK
2246
2247 if fam == _psplatform.AF_LINK:
2248 # The underlying C function may return an incomplete MAC
2249 # address in which case we fill it with null bytes, see:
2250 # https://github.com/giampaolo/psutil/issues/786
2251 separator = ":" if POSIX else "-"
2252 while addr.count(separator) < 5:
2253 addr += f"{separator}00"
2254
2255 nt = _common.snicaddr(fam, addr, mask, broadcast, ptp)
2256
2257 # On Windows broadcast is None, so we determine it via
2258 # ipaddress module.
2259 if WINDOWS and fam in {socket.AF_INET, socket.AF_INET6}:
2260 try:
2261 broadcast = _common.broadcast_addr(nt)
2262 except Exception as err: # noqa: BLE001
2263 debug(err)
2264 else:
2265 if broadcast is not None:
2266 nt._replace(broadcast=broadcast)
2267
2268 ret[name].append(nt)
2269
2270 return dict(ret)
2271
2272
2273def net_if_stats():
2274 """Return information about each NIC (network interface card)
2275 installed on the system as a dictionary whose keys are the
2276 NIC names and value is a namedtuple with the following fields:
2277
2278 - isup: whether the interface is up (bool)
2279 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2280 NIC_DUPLEX_UNKNOWN
2281 - speed: the NIC speed expressed in mega bits (MB); if it can't
2282 be determined (e.g. 'localhost') it will be set to 0.
2283 - mtu: the maximum transmission unit expressed in bytes.
2284 """
2285 return _psplatform.net_if_stats()
2286
2287
2288# =====================================================================
2289# --- sensors
2290# =====================================================================
2291
2292
2293# Linux, macOS
2294if hasattr(_psplatform, "sensors_temperatures"):
2295
2296 def sensors_temperatures(fahrenheit=False):
2297 """Return hardware temperatures. Each entry is a namedtuple
2298 representing a certain hardware sensor (it may be a CPU, an
2299 hard disk or something else, depending on the OS and its
2300 configuration).
2301 All temperatures are expressed in celsius unless *fahrenheit*
2302 is set to True.
2303 """
2304
2305 def convert(n):
2306 if n is not None:
2307 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2308
2309 ret = collections.defaultdict(list)
2310 rawdict = _psplatform.sensors_temperatures()
2311
2312 for name, values in rawdict.items():
2313 while values:
2314 label, current, high, critical = values.pop(0)
2315 current = convert(current)
2316 high = convert(high)
2317 critical = convert(critical)
2318
2319 if high and not critical:
2320 critical = high
2321 elif critical and not high:
2322 high = critical
2323
2324 ret[name].append(
2325 _common.shwtemp(label, current, high, critical)
2326 )
2327
2328 return dict(ret)
2329
2330 __all__.append("sensors_temperatures")
2331
2332
2333# Linux
2334if hasattr(_psplatform, "sensors_fans"):
2335
2336 def sensors_fans():
2337 """Return fans speed. Each entry is a namedtuple
2338 representing a certain hardware sensor.
2339 All speed are expressed in RPM (rounds per minute).
2340 """
2341 return _psplatform.sensors_fans()
2342
2343 __all__.append("sensors_fans")
2344
2345
2346# Linux, Windows, FreeBSD, macOS
2347if hasattr(_psplatform, "sensors_battery"):
2348
2349 def sensors_battery():
2350 """Return battery information. If no battery is installed
2351 returns None.
2352
2353 - percent: battery power left as a percentage.
2354 - secsleft: a rough approximation of how many seconds are left
2355 before the battery runs out of power. May be
2356 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2357 - power_plugged: True if the AC power cable is connected.
2358 """
2359 return _psplatform.sensors_battery()
2360
2361 __all__.append("sensors_battery")
2362
2363
2364# =====================================================================
2365# --- other system related functions
2366# =====================================================================
2367
2368
2369def boot_time():
2370 """Return the system boot time expressed in seconds since the epoch
2371 (seconds since January 1, 1970, at midnight UTC). The returned
2372 value is based on the system clock, which means it may be affected
2373 by changes such as manual adjustments or time synchronization (e.g.
2374 NTP).
2375 """
2376 return _psplatform.boot_time()
2377
2378
2379def users():
2380 """Return users currently connected on the system as a list of
2381 namedtuples including the following fields.
2382
2383 - user: the name of the user
2384 - terminal: the tty or pseudo-tty associated with the user, if any.
2385 - host: the host name associated with the entry, if any.
2386 - started: the creation time as a floating point number expressed in
2387 seconds since the epoch.
2388 """
2389 return _psplatform.users()
2390
2391
2392# =====================================================================
2393# --- Windows services
2394# =====================================================================
2395
2396
2397if WINDOWS:
2398
2399 def win_service_iter():
2400 """Return a generator yielding a WindowsService instance for all
2401 Windows services installed.
2402 """
2403 return _psplatform.win_service_iter()
2404
2405 def win_service_get(name):
2406 """Get a Windows service by *name*.
2407 Raise NoSuchProcess if no service with such name exists.
2408 """
2409 return _psplatform.win_service_get(name)
2410
2411
2412# =====================================================================
2413
2414
2415def _set_debug(value):
2416 """Enable or disable PSUTIL_DEBUG option, which prints debugging
2417 messages to stderr.
2418 """
2419 import psutil._common
2420
2421 psutil._common.PSUTIL_DEBUG = bool(value)
2422 _psplatform.cext.set_debug(bool(value))
2423
2424
2425del memoize_when_activated