1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""psutil is a cross-platform library for retrieving information on
6running processes and system utilization (CPU, memory, disks, network,
7sensors) in Python. Supported platforms:
8
9 - Linux
10 - Windows
11 - macOS
12 - FreeBSD
13 - OpenBSD
14 - NetBSD
15 - Sun Solaris
16 - AIX
17
18Supported Python versions are cPython 3.6+ and PyPy.
19"""
20
21import collections
22import contextlib
23import datetime
24import functools
25import os
26import signal
27import socket
28import subprocess
29import sys
30import threading
31import time
32
33try:
34 import pwd
35except ImportError:
36 pwd = None
37
38from . import _common
39from ._common import AIX
40from ._common import BSD
41from ._common import CONN_CLOSE
42from ._common import CONN_CLOSE_WAIT
43from ._common import CONN_CLOSING
44from ._common import CONN_ESTABLISHED
45from ._common import CONN_FIN_WAIT1
46from ._common import CONN_FIN_WAIT2
47from ._common import CONN_LAST_ACK
48from ._common import CONN_LISTEN
49from ._common import CONN_NONE
50from ._common import CONN_SYN_RECV
51from ._common import CONN_SYN_SENT
52from ._common import CONN_TIME_WAIT
53from ._common import FREEBSD
54from ._common import LINUX
55from ._common import MACOS
56from ._common import NETBSD
57from ._common import NIC_DUPLEX_FULL
58from ._common import NIC_DUPLEX_HALF
59from ._common import NIC_DUPLEX_UNKNOWN
60from ._common import OPENBSD
61from ._common import OSX # deprecated alias
62from ._common import POSIX
63from ._common import POWER_TIME_UNKNOWN
64from ._common import POWER_TIME_UNLIMITED
65from ._common import STATUS_DEAD
66from ._common import STATUS_DISK_SLEEP
67from ._common import STATUS_IDLE
68from ._common import STATUS_LOCKED
69from ._common import STATUS_PARKED
70from ._common import STATUS_RUNNING
71from ._common import STATUS_SLEEPING
72from ._common import STATUS_STOPPED
73from ._common import STATUS_TRACING_STOP
74from ._common import STATUS_WAITING
75from ._common import STATUS_WAKING
76from ._common import STATUS_ZOMBIE
77from ._common import SUNOS
78from ._common import WINDOWS
79from ._common import AccessDenied
80from ._common import Error
81from ._common import NoSuchProcess
82from ._common import TimeoutExpired
83from ._common import ZombieProcess
84from ._common import debug
85from ._common import memoize_when_activated
86from ._common import wrap_numbers as _wrap_numbers
87
88if LINUX:
89 # This is public API and it will be retrieved from _pslinux.py
90 # via sys.modules.
91 PROCFS_PATH = "/proc"
92
93 from . import _pslinux as _psplatform
94 from ._pslinux import IOPRIO_CLASS_BE # noqa: F401
95 from ._pslinux import IOPRIO_CLASS_IDLE # noqa: F401
96 from ._pslinux import IOPRIO_CLASS_NONE # noqa: F401
97 from ._pslinux import IOPRIO_CLASS_RT # noqa: F401
98
99elif WINDOWS:
100 from . import _pswindows as _psplatform
101 from ._psutil_windows import ABOVE_NORMAL_PRIORITY_CLASS # noqa: F401
102 from ._psutil_windows import BELOW_NORMAL_PRIORITY_CLASS # noqa: F401
103 from ._psutil_windows import HIGH_PRIORITY_CLASS # noqa: F401
104 from ._psutil_windows import IDLE_PRIORITY_CLASS # noqa: F401
105 from ._psutil_windows import NORMAL_PRIORITY_CLASS # noqa: F401
106 from ._psutil_windows import REALTIME_PRIORITY_CLASS # noqa: F401
107 from ._pswindows import CONN_DELETE_TCB # noqa: F401
108 from ._pswindows import IOPRIO_HIGH # noqa: F401
109 from ._pswindows import IOPRIO_LOW # noqa: F401
110 from ._pswindows import IOPRIO_NORMAL # noqa: F401
111 from ._pswindows import IOPRIO_VERYLOW # noqa: F401
112
113elif MACOS:
114 from . import _psosx as _psplatform
115
116elif BSD:
117 from . import _psbsd as _psplatform
118
119elif SUNOS:
120 from . import _pssunos as _psplatform
121 from ._pssunos import CONN_BOUND # noqa: F401
122 from ._pssunos import CONN_IDLE # noqa: F401
123
124 # This is public writable API which is read from _pslinux.py and
125 # _pssunos.py via sys.modules.
126 PROCFS_PATH = "/proc"
127
128elif AIX:
129 from . import _psaix as _psplatform
130
131 # This is public API and it will be retrieved from _pslinux.py
132 # via sys.modules.
133 PROCFS_PATH = "/proc"
134
135else: # pragma: no cover
136 msg = f"platform {sys.platform} is not supported"
137 raise NotImplementedError(msg)
138
139
140# fmt: off
141__all__ = [
142 # exceptions
143 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied",
144 "TimeoutExpired",
145
146 # constants
147 "version_info", "__version__",
148
149 "STATUS_RUNNING", "STATUS_IDLE", "STATUS_SLEEPING", "STATUS_DISK_SLEEP",
150 "STATUS_STOPPED", "STATUS_TRACING_STOP", "STATUS_ZOMBIE", "STATUS_DEAD",
151 "STATUS_WAKING", "STATUS_LOCKED", "STATUS_WAITING", "STATUS_LOCKED",
152 "STATUS_PARKED",
153
154 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
155 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
156 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", "CONN_NONE",
157 # "CONN_IDLE", "CONN_BOUND",
158
159 "AF_LINK",
160
161 "NIC_DUPLEX_FULL", "NIC_DUPLEX_HALF", "NIC_DUPLEX_UNKNOWN",
162
163 "POWER_TIME_UNKNOWN", "POWER_TIME_UNLIMITED",
164
165 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX",
166 "SUNOS", "WINDOWS", "AIX",
167
168 # "RLIM_INFINITY", "RLIMIT_AS", "RLIMIT_CORE", "RLIMIT_CPU", "RLIMIT_DATA",
169 # "RLIMIT_FSIZE", "RLIMIT_LOCKS", "RLIMIT_MEMLOCK", "RLIMIT_NOFILE",
170 # "RLIMIT_NPROC", "RLIMIT_RSS", "RLIMIT_STACK", "RLIMIT_MSGQUEUE",
171 # "RLIMIT_NICE", "RLIMIT_RTPRIO", "RLIMIT_RTTIME", "RLIMIT_SIGPENDING",
172
173 # classes
174 "Process", "Popen",
175
176 # functions
177 "pid_exists", "pids", "process_iter", "wait_procs", # proc
178 "virtual_memory", "swap_memory", # memory
179 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu
180 "cpu_stats", # "cpu_freq", "getloadavg"
181 "net_io_counters", "net_connections", "net_if_addrs", # network
182 "net_if_stats",
183 "disk_io_counters", "disk_partitions", "disk_usage", # disk
184 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors
185 "users", "boot_time", # others
186]
187# fmt: on
188
189
190__all__.extend(_psplatform.__extra__all__)
191
192# Linux, FreeBSD
193if hasattr(_psplatform.Process, "rlimit"):
194 # Populate global namespace with RLIM* constants.
195 from . import _psutil_posix
196
197 _globals = globals()
198 _name = None
199 for _name in dir(_psutil_posix):
200 if _name.startswith('RLIM') and _name.isupper():
201 _globals[_name] = getattr(_psutil_posix, _name)
202 __all__.append(_name)
203 del _globals, _name
204
205AF_LINK = _psplatform.AF_LINK
206
207__author__ = "Giampaolo Rodola'"
208__version__ = "7.0.1"
209version_info = tuple(int(num) for num in __version__.split('.'))
210
211_timer = getattr(time, 'monotonic', time.time)
212_TOTAL_PHYMEM = None
213_LOWEST_PID = None
214_SENTINEL = object()
215
216# Sanity check in case the user messed up with psutil installation
217# or did something weird with sys.path. In this case we might end
218# up importing a python module using a C extension module which
219# was compiled for a different version of psutil.
220# We want to prevent that by failing sooner rather than later.
221# See: https://github.com/giampaolo/psutil/issues/564
222if int(__version__.replace('.', '')) != getattr(
223 _psplatform.cext, 'version', None
224):
225 msg = f"version conflict: {_psplatform.cext.__file__!r} C extension "
226 msg += "module was built for another version of psutil"
227 if hasattr(_psplatform.cext, 'version'):
228 v = ".".join(list(str(_psplatform.cext.version)))
229 msg += f" ({v} instead of {__version__})"
230 else:
231 msg += f" (different than {__version__})"
232 what = getattr(
233 _psplatform.cext,
234 "__file__",
235 "the existing psutil install directory",
236 )
237 msg += f"; you may try to 'pip uninstall psutil', manually remove {what}"
238 msg += " or clean the virtual env somehow, then reinstall"
239 raise ImportError(msg)
240
241
242# =====================================================================
243# --- Utils
244# =====================================================================
245
246
247if hasattr(_psplatform, 'ppid_map'):
248 # Faster version (Windows and Linux).
249 _ppid_map = _psplatform.ppid_map
250else: # pragma: no cover
251
252 def _ppid_map():
253 """Return a {pid: ppid, ...} dict for all running processes in
254 one shot. Used to speed up Process.children().
255 """
256 ret = {}
257 for pid in pids():
258 try:
259 ret[pid] = _psplatform.Process(pid).ppid()
260 except (NoSuchProcess, ZombieProcess):
261 pass
262 return ret
263
264
265def _pprint_secs(secs):
266 """Format seconds in a human readable form."""
267 now = time.time()
268 secs_ago = int(now - secs)
269 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S"
270 return datetime.datetime.fromtimestamp(secs).strftime(fmt)
271
272
273def _check_conn_kind(kind):
274 """Check net_connections()'s `kind` parameter."""
275 kinds = tuple(_common.conn_tmap)
276 if kind not in kinds:
277 msg = f"invalid kind argument {kind!r}; valid ones are: {kinds}"
278 raise ValueError(msg)
279
280
281# =====================================================================
282# --- Process class
283# =====================================================================
284
285
286class Process:
287 """Represents an OS process with the given PID.
288 If PID is omitted current process PID (os.getpid()) is used.
289 Raise NoSuchProcess if PID does not exist.
290
291 Note that most of the methods of this class do not make sure that
292 the PID of the process being queried has been reused. That means
293 that you may end up retrieving information for another process.
294
295 The only exceptions for which process identity is pre-emptively
296 checked and guaranteed are:
297
298 - parent()
299 - children()
300 - nice() (set)
301 - ionice() (set)
302 - rlimit() (set)
303 - cpu_affinity (set)
304 - suspend()
305 - resume()
306 - send_signal()
307 - terminate()
308 - kill()
309
310 To prevent this problem for all other methods you can use
311 is_running() before querying the process.
312 """
313
314 def __init__(self, pid=None):
315 self._init(pid)
316
317 def _init(self, pid, _ignore_nsp=False):
318 if pid is None:
319 pid = os.getpid()
320 else:
321 if pid < 0:
322 msg = f"pid must be a positive integer (got {pid})"
323 raise ValueError(msg)
324 try:
325 _psplatform.cext.check_pid_range(pid)
326 except OverflowError as err:
327 msg = "process PID out of range"
328 raise NoSuchProcess(pid, msg=msg) from err
329
330 self._pid = pid
331 self._name = None
332 self._exe = None
333 self._create_time = None
334 self._gone = False
335 self._pid_reused = False
336 self._hash = None
337 self._lock = threading.RLock()
338 # used for caching on Windows only (on POSIX ppid may change)
339 self._ppid = None
340 # platform-specific modules define an _psplatform.Process
341 # implementation class
342 self._proc = _psplatform.Process(pid)
343 self._last_sys_cpu_times = None
344 self._last_proc_cpu_times = None
345 self._exitcode = _SENTINEL
346 self._ident = (self.pid, None)
347 try:
348 self._ident = self._get_ident()
349 except AccessDenied:
350 # This should happen on Windows only, since we use the fast
351 # create time method. AFAIK, on all other platforms we are
352 # able to get create time for all PIDs.
353 pass
354 except ZombieProcess:
355 # Zombies can still be queried by this class (although
356 # not always) and pids() return them so just go on.
357 pass
358 except NoSuchProcess:
359 if not _ignore_nsp:
360 msg = "process PID not found"
361 raise NoSuchProcess(pid, msg=msg) from None
362 self._gone = True
363
364 def _get_ident(self):
365 """Return a (pid, uid) tuple which is supposed to identify a
366 Process instance univocally over time. The PID alone is not
367 enough, as it can be assigned to a new process after this one
368 terminates, so we add process creation time to the mix. We need
369 this in order to prevent killing the wrong process later on.
370 This is also known as PID reuse or PID recycling problem.
371
372 The reliability of this strategy mostly depends on
373 create_time() precision, which is 0.01 secs on Linux. The
374 assumption is that, after a process terminates, the kernel
375 won't reuse the same PID after such a short period of time
376 (0.01 secs). Technically this is inherently racy, but
377 practically it should be good enough.
378
379 NOTE: unreliable on FreeBSD and OpenBSD as ctime is subject to
380 system clock updates.
381 """
382
383 if WINDOWS:
384 # Use create_time() fast method in order to speedup
385 # `process_iter()`. This means we'll get AccessDenied for
386 # most ADMIN processes, but that's fine since it means
387 # we'll also get AccessDenied on kill().
388 # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555
389 self._create_time = self._proc.create_time(fast_only=True)
390 return (self.pid, self._create_time)
391 elif LINUX or NETBSD or OSX:
392 # Use 'monotonic' process starttime since boot to form unique
393 # process identity, since it is stable over changes to system
394 # time.
395 return (self.pid, self._proc.create_time(monotonic=True))
396 else:
397 return (self.pid, self.create_time())
398
399 def __str__(self):
400 info = collections.OrderedDict()
401 info["pid"] = self.pid
402 if self._name:
403 info['name'] = self._name
404 with self.oneshot():
405 if self._pid_reused:
406 info["status"] = "terminated + PID reused"
407 else:
408 try:
409 info["name"] = self.name()
410 info["status"] = self.status()
411 except ZombieProcess:
412 info["status"] = "zombie"
413 except NoSuchProcess:
414 info["status"] = "terminated"
415 except AccessDenied:
416 pass
417
418 if self._exitcode not in {_SENTINEL, None}:
419 info["exitcode"] = self._exitcode
420 if self._create_time is not None:
421 info['started'] = _pprint_secs(self._create_time)
422
423 return "{}.{}({})".format(
424 self.__class__.__module__,
425 self.__class__.__name__,
426 ", ".join([f"{k}={v!r}" for k, v in info.items()]),
427 )
428
429 __repr__ = __str__
430
431 def __eq__(self, other):
432 # Test for equality with another Process object based
433 # on PID and creation time.
434 if not isinstance(other, Process):
435 return NotImplemented
436 if OPENBSD or NETBSD: # pragma: no cover
437 # Zombie processes on Open/NetBSD have a creation time of
438 # 0.0. This covers the case when a process started normally
439 # (so it has a ctime), then it turned into a zombie. It's
440 # important to do this because is_running() depends on
441 # __eq__.
442 pid1, ident1 = self._ident
443 pid2, ident2 = other._ident
444 if pid1 == pid2:
445 if ident1 and not ident2:
446 try:
447 return self.status() == STATUS_ZOMBIE
448 except Error:
449 pass
450 return self._ident == other._ident
451
452 def __ne__(self, other):
453 return not self == other
454
455 def __hash__(self):
456 if self._hash is None:
457 self._hash = hash(self._ident)
458 return self._hash
459
460 def _raise_if_pid_reused(self):
461 """Raises NoSuchProcess in case process PID has been reused."""
462 if self._pid_reused or (not self.is_running() and self._pid_reused):
463 # We may directly raise NSP in here already if PID is just
464 # not running, but I prefer NSP to be raised naturally by
465 # the actual Process API call. This way unit tests will tell
466 # us if the API is broken (aka don't raise NSP when it
467 # should). We also remain consistent with all other "get"
468 # APIs which don't use _raise_if_pid_reused().
469 msg = "process no longer exists and its PID has been reused"
470 raise NoSuchProcess(self.pid, self._name, msg=msg)
471
472 @property
473 def pid(self):
474 """The process PID."""
475 return self._pid
476
477 # --- utility methods
478
479 @contextlib.contextmanager
480 def oneshot(self):
481 """Utility context manager which considerably speeds up the
482 retrieval of multiple process information at the same time.
483
484 Internally different process info (e.g. name, ppid, uids,
485 gids, ...) may be fetched by using the same routine, but
486 only one information is returned and the others are discarded.
487 When using this context manager the internal routine is
488 executed once (in the example below on name()) and the
489 other info are cached.
490
491 The cache is cleared when exiting the context manager block.
492 The advice is to use this every time you retrieve more than
493 one information about the process. If you're lucky, you'll
494 get a hell of a speedup.
495
496 >>> import psutil
497 >>> p = psutil.Process()
498 >>> with p.oneshot():
499 ... p.name() # collect multiple info
500 ... p.cpu_times() # return cached value
501 ... p.cpu_percent() # return cached value
502 ... p.create_time() # return cached value
503 ...
504 >>>
505 """
506 with self._lock:
507 if hasattr(self, "_cache"):
508 # NOOP: this covers the use case where the user enters the
509 # context twice:
510 #
511 # >>> with p.oneshot():
512 # ... with p.oneshot():
513 # ...
514 #
515 # Also, since as_dict() internally uses oneshot()
516 # I expect that the code below will be a pretty common
517 # "mistake" that the user will make, so let's guard
518 # against that:
519 #
520 # >>> with p.oneshot():
521 # ... p.as_dict()
522 # ...
523 yield
524 else:
525 try:
526 # cached in case cpu_percent() is used
527 self.cpu_times.cache_activate(self)
528 # cached in case memory_percent() is used
529 self.memory_info.cache_activate(self)
530 # cached in case parent() is used
531 self.ppid.cache_activate(self)
532 # cached in case username() is used
533 if POSIX:
534 self.uids.cache_activate(self)
535 # specific implementation cache
536 self._proc.oneshot_enter()
537 yield
538 finally:
539 self.cpu_times.cache_deactivate(self)
540 self.memory_info.cache_deactivate(self)
541 self.ppid.cache_deactivate(self)
542 if POSIX:
543 self.uids.cache_deactivate(self)
544 self._proc.oneshot_exit()
545
546 def as_dict(self, attrs=None, ad_value=None):
547 """Utility method returning process information as a
548 hashable dictionary.
549 If *attrs* is specified it must be a list of strings
550 reflecting available Process class' attribute names
551 (e.g. ['cpu_times', 'name']) else all public (read
552 only) attributes are assumed.
553 *ad_value* is the value which gets assigned in case
554 AccessDenied or ZombieProcess exception is raised when
555 retrieving that particular process information.
556 """
557 valid_names = _as_dict_attrnames
558 if attrs is not None:
559 if not isinstance(attrs, (list, tuple, set, frozenset)):
560 msg = f"invalid attrs type {type(attrs)}"
561 raise TypeError(msg)
562 attrs = set(attrs)
563 invalid_names = attrs - valid_names
564 if invalid_names:
565 msg = "invalid attr name{} {}".format(
566 "s" if len(invalid_names) > 1 else "",
567 ", ".join(map(repr, invalid_names)),
568 )
569 raise ValueError(msg)
570
571 retdict = {}
572 ls = attrs or valid_names
573 with self.oneshot():
574 for name in ls:
575 try:
576 if name == 'pid':
577 ret = self.pid
578 else:
579 meth = getattr(self, name)
580 ret = meth()
581 except (AccessDenied, ZombieProcess):
582 ret = ad_value
583 except NotImplementedError:
584 # in case of not implemented functionality (may happen
585 # on old or exotic systems) we want to crash only if
586 # the user explicitly asked for that particular attr
587 if attrs:
588 raise
589 continue
590 retdict[name] = ret
591 return retdict
592
593 def parent(self):
594 """Return the parent process as a Process object pre-emptively
595 checking whether PID has been reused.
596 If no parent is known return None.
597 """
598 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0]
599 if self.pid == lowest_pid:
600 return None
601 ppid = self.ppid()
602 if ppid is not None:
603 # Get a fresh (non-cached) ctime in case the system clock
604 # was updated. TODO: use a monotonic ctime on platforms
605 # where it's supported.
606 proc_ctime = Process(self.pid).create_time()
607 try:
608 parent = Process(ppid)
609 if parent.create_time() <= proc_ctime:
610 return parent
611 # ...else ppid has been reused by another process
612 except NoSuchProcess:
613 pass
614
615 def parents(self):
616 """Return the parents of this process as a list of Process
617 instances. If no parents are known return an empty list.
618 """
619 parents = []
620 proc = self.parent()
621 while proc is not None:
622 parents.append(proc)
623 proc = proc.parent()
624 return parents
625
626 def is_running(self):
627 """Return whether this process is running.
628
629 It also checks if PID has been reused by another process, in
630 which case it will remove the process from `process_iter()`
631 internal cache and return False.
632 """
633 if self._gone or self._pid_reused:
634 return False
635 try:
636 # Checking if PID is alive is not enough as the PID might
637 # have been reused by another process. Process identity /
638 # uniqueness over time is guaranteed by (PID + creation
639 # time) and that is verified in __eq__.
640 self._pid_reused = self != Process(self.pid)
641 if self._pid_reused:
642 _pids_reused.add(self.pid)
643 raise NoSuchProcess(self.pid)
644 return True
645 except ZombieProcess:
646 # We should never get here as it's already handled in
647 # Process.__init__; here just for extra safety.
648 return True
649 except NoSuchProcess:
650 self._gone = True
651 return False
652
653 # --- actual API
654
655 @memoize_when_activated
656 def ppid(self):
657 """The process parent PID.
658 On Windows the return value is cached after first call.
659 """
660 # On POSIX we don't want to cache the ppid as it may unexpectedly
661 # change to 1 (init) in case this process turns into a zombie:
662 # https://github.com/giampaolo/psutil/issues/321
663 # http://stackoverflow.com/questions/356722/
664
665 # XXX should we check creation time here rather than in
666 # Process.parent()?
667 self._raise_if_pid_reused()
668 if POSIX:
669 return self._proc.ppid()
670 else: # pragma: no cover
671 self._ppid = self._ppid or self._proc.ppid()
672 return self._ppid
673
674 def name(self):
675 """The process name. The return value is cached after first call."""
676 # Process name is only cached on Windows as on POSIX it may
677 # change, see:
678 # https://github.com/giampaolo/psutil/issues/692
679 if WINDOWS and self._name is not None:
680 return self._name
681 name = self._proc.name()
682 if POSIX and len(name) >= 15:
683 # On UNIX the name gets truncated to the first 15 characters.
684 # If it matches the first part of the cmdline we return that
685 # one instead because it's usually more explicative.
686 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon".
687 try:
688 cmdline = self.cmdline()
689 except (AccessDenied, ZombieProcess):
690 # Just pass and return the truncated name: it's better
691 # than nothing. Note: there are actual cases where a
692 # zombie process can return a name() but not a
693 # cmdline(), see:
694 # https://github.com/giampaolo/psutil/issues/2239
695 pass
696 else:
697 if cmdline:
698 extended_name = os.path.basename(cmdline[0])
699 if extended_name.startswith(name):
700 name = extended_name
701 self._name = name
702 self._proc._name = name
703 return name
704
705 def exe(self):
706 """The process executable as an absolute path.
707 May also be an empty string.
708 The return value is cached after first call.
709 """
710
711 def guess_it(fallback):
712 # try to guess exe from cmdline[0] in absence of a native
713 # exe representation
714 cmdline = self.cmdline()
715 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'):
716 exe = cmdline[0] # the possible exe
717 # Attempt to guess only in case of an absolute path.
718 # It is not safe otherwise as the process might have
719 # changed cwd.
720 if (
721 os.path.isabs(exe)
722 and os.path.isfile(exe)
723 and os.access(exe, os.X_OK)
724 ):
725 return exe
726 if isinstance(fallback, AccessDenied):
727 raise fallback
728 return fallback
729
730 if self._exe is None:
731 try:
732 exe = self._proc.exe()
733 except AccessDenied as err:
734 return guess_it(fallback=err)
735 else:
736 if not exe:
737 # underlying implementation can legitimately return an
738 # empty string; if that's the case we don't want to
739 # raise AD while guessing from the cmdline
740 try:
741 exe = guess_it(fallback=exe)
742 except AccessDenied:
743 pass
744 self._exe = exe
745 return self._exe
746
747 def cmdline(self):
748 """The command line this process has been called with."""
749 return self._proc.cmdline()
750
751 def status(self):
752 """The process current status as a STATUS_* constant."""
753 try:
754 return self._proc.status()
755 except ZombieProcess:
756 return STATUS_ZOMBIE
757
758 def username(self):
759 """The name of the user that owns the process.
760 On UNIX this is calculated by using *real* process uid.
761 """
762 if POSIX:
763 if pwd is None:
764 # might happen if python was installed from sources
765 msg = "requires pwd module shipped with standard python"
766 raise ImportError(msg)
767 real_uid = self.uids().real
768 try:
769 return pwd.getpwuid(real_uid).pw_name
770 except KeyError:
771 # the uid can't be resolved by the system
772 return str(real_uid)
773 else:
774 return self._proc.username()
775
776 def create_time(self):
777 """The process creation time as a floating point number
778 expressed in seconds since the epoch (seconds since January 1,
779 1970, at midnight UTC). The return value, which is cached after
780 first call, is based on the system clock, which means it may be
781 affected by changes such as manual adjustments or time
782 synchronization (e.g. NTP).
783 """
784 if self._create_time is None:
785 self._create_time = self._proc.create_time()
786 return self._create_time
787
788 def cwd(self):
789 """Process current working directory as an absolute path."""
790 return self._proc.cwd()
791
792 def nice(self, value=None):
793 """Get or set process niceness (priority)."""
794 if value is None:
795 return self._proc.nice_get()
796 else:
797 self._raise_if_pid_reused()
798 self._proc.nice_set(value)
799
800 if POSIX:
801
802 @memoize_when_activated
803 def uids(self):
804 """Return process UIDs as a (real, effective, saved)
805 namedtuple.
806 """
807 return self._proc.uids()
808
809 def gids(self):
810 """Return process GIDs as a (real, effective, saved)
811 namedtuple.
812 """
813 return self._proc.gids()
814
815 def terminal(self):
816 """The terminal associated with this process, if any,
817 else None.
818 """
819 return self._proc.terminal()
820
821 def num_fds(self):
822 """Return the number of file descriptors opened by this
823 process (POSIX only).
824 """
825 return self._proc.num_fds()
826
827 # Linux, BSD, AIX and Windows only
828 if hasattr(_psplatform.Process, "io_counters"):
829
830 def io_counters(self):
831 """Return process I/O statistics as a
832 (read_count, write_count, read_bytes, write_bytes)
833 namedtuple.
834 Those are the number of read/write calls performed and the
835 amount of bytes read and written by the process.
836 """
837 return self._proc.io_counters()
838
839 # Linux and Windows
840 if hasattr(_psplatform.Process, "ionice_get"):
841
842 def ionice(self, ioclass=None, value=None):
843 """Get or set process I/O niceness (priority).
844
845 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants.
846 *value* is a number which goes from 0 to 7. The higher the
847 value, the lower the I/O priority of the process.
848
849 On Windows only *ioclass* is used and it can be set to 2
850 (normal), 1 (low) or 0 (very low).
851
852 Available on Linux and Windows > Vista only.
853 """
854 if ioclass is None:
855 if value is not None:
856 msg = "'ioclass' argument must be specified"
857 raise ValueError(msg)
858 return self._proc.ionice_get()
859 else:
860 self._raise_if_pid_reused()
861 return self._proc.ionice_set(ioclass, value)
862
863 # Linux / FreeBSD only
864 if hasattr(_psplatform.Process, "rlimit"):
865
866 def rlimit(self, resource, limits=None):
867 """Get or set process resource limits as a (soft, hard)
868 tuple.
869
870 *resource* is one of the RLIMIT_* constants.
871 *limits* is supposed to be a (soft, hard) tuple.
872
873 See "man prlimit" for further info.
874 Available on Linux and FreeBSD only.
875 """
876 if limits is not None:
877 self._raise_if_pid_reused()
878 return self._proc.rlimit(resource, limits)
879
880 # Windows, Linux and FreeBSD only
881 if hasattr(_psplatform.Process, "cpu_affinity_get"):
882
883 def cpu_affinity(self, cpus=None):
884 """Get or set process CPU affinity.
885 If specified, *cpus* must be a list of CPUs for which you
886 want to set the affinity (e.g. [0, 1]).
887 If an empty list is passed, all egible CPUs are assumed
888 (and set).
889 (Windows, Linux and BSD only).
890 """
891 if cpus is None:
892 return sorted(set(self._proc.cpu_affinity_get()))
893 else:
894 self._raise_if_pid_reused()
895 if not cpus:
896 if hasattr(self._proc, "_get_eligible_cpus"):
897 cpus = self._proc._get_eligible_cpus()
898 else:
899 cpus = tuple(range(len(cpu_times(percpu=True))))
900 self._proc.cpu_affinity_set(list(set(cpus)))
901
902 # Linux, FreeBSD, SunOS
903 if hasattr(_psplatform.Process, "cpu_num"):
904
905 def cpu_num(self):
906 """Return what CPU this process is currently running on.
907 The returned number should be <= psutil.cpu_count()
908 and <= len(psutil.cpu_percent(percpu=True)).
909 It may be used in conjunction with
910 psutil.cpu_percent(percpu=True) to observe the system
911 workload distributed across CPUs.
912 """
913 return self._proc.cpu_num()
914
915 # All platforms has it, but maybe not in the future.
916 if hasattr(_psplatform.Process, "environ"):
917
918 def environ(self):
919 """The environment variables of the process as a dict. Note: this
920 might not reflect changes made after the process started.
921 """
922 return self._proc.environ()
923
924 if WINDOWS:
925
926 def num_handles(self):
927 """Return the number of handles opened by this process
928 (Windows only).
929 """
930 return self._proc.num_handles()
931
932 def num_ctx_switches(self):
933 """Return the number of voluntary and involuntary context
934 switches performed by this process.
935 """
936 return self._proc.num_ctx_switches()
937
938 def num_threads(self):
939 """Return the number of threads used by this process."""
940 return self._proc.num_threads()
941
942 if hasattr(_psplatform.Process, "threads"):
943
944 def threads(self):
945 """Return threads opened by process as a list of
946 (id, user_time, system_time) namedtuples representing
947 thread id and thread CPU times (user/system).
948 On OpenBSD this method requires root access.
949 """
950 return self._proc.threads()
951
952 def children(self, recursive=False):
953 """Return the children of this process as a list of Process
954 instances, pre-emptively checking whether PID has been reused.
955 If *recursive* is True return all the parent descendants.
956
957 Example (A == this process):
958
959 A ─┐
960 │
961 ├─ B (child) ─┐
962 │ └─ X (grandchild) ─┐
963 │ └─ Y (great grandchild)
964 ├─ C (child)
965 └─ D (child)
966
967 >>> import psutil
968 >>> p = psutil.Process()
969 >>> p.children()
970 B, C, D
971 >>> p.children(recursive=True)
972 B, X, Y, C, D
973
974 Note that in the example above if process X disappears
975 process Y won't be listed as the reference to process A
976 is lost.
977 """
978 self._raise_if_pid_reused()
979 ppid_map = _ppid_map()
980 # Get a fresh (non-cached) ctime in case the system clock was
981 # updated. TODO: use a monotonic ctime on platforms where it's
982 # supported.
983 proc_ctime = Process(self.pid).create_time()
984 ret = []
985 if not recursive:
986 for pid, ppid in ppid_map.items():
987 if ppid == self.pid:
988 try:
989 child = Process(pid)
990 # if child happens to be older than its parent
991 # (self) it means child's PID has been reused
992 if proc_ctime <= child.create_time():
993 ret.append(child)
994 except (NoSuchProcess, ZombieProcess):
995 pass
996 else:
997 # Construct a {pid: [child pids]} dict
998 reverse_ppid_map = collections.defaultdict(list)
999 for pid, ppid in ppid_map.items():
1000 reverse_ppid_map[ppid].append(pid)
1001 # Recursively traverse that dict, starting from self.pid,
1002 # such that we only call Process() on actual children
1003 seen = set()
1004 stack = [self.pid]
1005 while stack:
1006 pid = stack.pop()
1007 if pid in seen:
1008 # Since pids can be reused while the ppid_map is
1009 # constructed, there may be rare instances where
1010 # there's a cycle in the recorded process "tree".
1011 continue
1012 seen.add(pid)
1013 for child_pid in reverse_ppid_map[pid]:
1014 try:
1015 child = Process(child_pid)
1016 # if child happens to be older than its parent
1017 # (self) it means child's PID has been reused
1018 intime = proc_ctime <= child.create_time()
1019 if intime:
1020 ret.append(child)
1021 stack.append(child_pid)
1022 except (NoSuchProcess, ZombieProcess):
1023 pass
1024 return ret
1025
1026 def cpu_percent(self, interval=None):
1027 """Return a float representing the current process CPU
1028 utilization as a percentage.
1029
1030 When *interval* is 0.0 or None (default) compares process times
1031 to system CPU times elapsed since last call, returning
1032 immediately (non-blocking). That means that the first time
1033 this is called it will return a meaningful 0.0 value.
1034
1035 When *interval* is > 0.0 compares process times to system CPU
1036 times elapsed before and after the interval (blocking).
1037
1038 In this case is recommended for accuracy that this function
1039 be called with at least 0.1 seconds between calls.
1040
1041 A value > 100.0 can be returned in case of processes running
1042 multiple threads on different CPU cores.
1043
1044 The returned value is explicitly NOT split evenly between
1045 all available logical CPUs. This means that a busy loop process
1046 running on a system with 2 logical CPUs will be reported as
1047 having 100% CPU utilization instead of 50%.
1048
1049 Examples:
1050
1051 >>> import psutil
1052 >>> p = psutil.Process(os.getpid())
1053 >>> # blocking
1054 >>> p.cpu_percent(interval=1)
1055 2.0
1056 >>> # non-blocking (percentage since last call)
1057 >>> p.cpu_percent(interval=None)
1058 2.9
1059 >>>
1060 """
1061 blocking = interval is not None and interval > 0.0
1062 if interval is not None and interval < 0:
1063 msg = f"interval is not positive (got {interval!r})"
1064 raise ValueError(msg)
1065 num_cpus = cpu_count() or 1
1066
1067 def timer():
1068 return _timer() * num_cpus
1069
1070 if blocking:
1071 st1 = timer()
1072 pt1 = self._proc.cpu_times()
1073 time.sleep(interval)
1074 st2 = timer()
1075 pt2 = self._proc.cpu_times()
1076 else:
1077 st1 = self._last_sys_cpu_times
1078 pt1 = self._last_proc_cpu_times
1079 st2 = timer()
1080 pt2 = self._proc.cpu_times()
1081 if st1 is None or pt1 is None:
1082 self._last_sys_cpu_times = st2
1083 self._last_proc_cpu_times = pt2
1084 return 0.0
1085
1086 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system)
1087 delta_time = st2 - st1
1088 # reset values for next call in case of interval == None
1089 self._last_sys_cpu_times = st2
1090 self._last_proc_cpu_times = pt2
1091
1092 try:
1093 # This is the utilization split evenly between all CPUs.
1094 # E.g. a busy loop process on a 2-CPU-cores system at this
1095 # point is reported as 50% instead of 100%.
1096 overall_cpus_percent = (delta_proc / delta_time) * 100
1097 except ZeroDivisionError:
1098 # interval was too low
1099 return 0.0
1100 else:
1101 # Note 1:
1102 # in order to emulate "top" we multiply the value for the num
1103 # of CPU cores. This way the busy process will be reported as
1104 # having 100% (or more) usage.
1105 #
1106 # Note 2:
1107 # taskmgr.exe on Windows differs in that it will show 50%
1108 # instead.
1109 #
1110 # Note 3:
1111 # a percentage > 100 is legitimate as it can result from a
1112 # process with multiple threads running on different CPU
1113 # cores (top does the same), see:
1114 # http://stackoverflow.com/questions/1032357
1115 # https://github.com/giampaolo/psutil/issues/474
1116 single_cpu_percent = overall_cpus_percent * num_cpus
1117 return round(single_cpu_percent, 1)
1118
1119 @memoize_when_activated
1120 def cpu_times(self):
1121 """Return a (user, system, children_user, children_system)
1122 namedtuple representing the accumulated process time, in
1123 seconds.
1124 This is similar to os.times() but per-process.
1125 On macOS and Windows children_user and children_system are
1126 always set to 0.
1127 """
1128 return self._proc.cpu_times()
1129
1130 @memoize_when_activated
1131 def memory_info(self):
1132 """Return a namedtuple with variable fields depending on the
1133 platform, representing memory information about the process.
1134
1135 The "portable" fields available on all platforms are `rss` and `vms`.
1136
1137 All numbers are expressed in bytes.
1138 """
1139 return self._proc.memory_info()
1140
1141 def memory_full_info(self):
1142 """This method returns the same information as memory_info(),
1143 plus, on some platform (Linux, macOS, Windows), also provides
1144 additional metrics (USS, PSS and swap).
1145 The additional metrics provide a better representation of actual
1146 process memory usage.
1147
1148 Namely USS is the memory which is unique to a process and which
1149 would be freed if the process was terminated right now.
1150
1151 It does so by passing through the whole process address.
1152 As such it usually requires higher user privileges than
1153 memory_info() and is considerably slower.
1154 """
1155 return self._proc.memory_full_info()
1156
1157 def memory_percent(self, memtype="rss"):
1158 """Compare process memory to total physical system memory and
1159 calculate process memory utilization as a percentage.
1160 *memtype* argument is a string that dictates what type of
1161 process memory you want to compare against (defaults to "rss").
1162 The list of available strings can be obtained like this:
1163
1164 >>> psutil.Process().memory_info()._fields
1165 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss')
1166 """
1167 valid_types = list(_psplatform.pfullmem._fields)
1168 if memtype not in valid_types:
1169 msg = (
1170 f"invalid memtype {memtype!r}; valid types are"
1171 f" {tuple(valid_types)!r}"
1172 )
1173 raise ValueError(msg)
1174 fun = (
1175 self.memory_info
1176 if memtype in _psplatform.pmem._fields
1177 else self.memory_full_info
1178 )
1179 metrics = fun()
1180 value = getattr(metrics, memtype)
1181
1182 # use cached value if available
1183 total_phymem = _TOTAL_PHYMEM or virtual_memory().total
1184 if not total_phymem > 0:
1185 # we should never get here
1186 msg = (
1187 "can't calculate process memory percent because total physical"
1188 f" system memory is not positive ({total_phymem!r})"
1189 )
1190 raise ValueError(msg)
1191 return (value / float(total_phymem)) * 100
1192
1193 if hasattr(_psplatform.Process, "memory_maps"):
1194
1195 def memory_maps(self, grouped=True):
1196 """Return process' mapped memory regions as a list of namedtuples
1197 whose fields are variable depending on the platform.
1198
1199 If *grouped* is True the mapped regions with the same 'path'
1200 are grouped together and the different memory fields are summed.
1201
1202 If *grouped* is False every mapped region is shown as a single
1203 entity and the namedtuple will also include the mapped region's
1204 address space ('addr') and permission set ('perms').
1205 """
1206 it = self._proc.memory_maps()
1207 if grouped:
1208 d = {}
1209 for tupl in it:
1210 path = tupl[2]
1211 nums = tupl[3:]
1212 try:
1213 d[path] = list(map(lambda x, y: x + y, d[path], nums))
1214 except KeyError:
1215 d[path] = nums
1216 nt = _psplatform.pmmap_grouped
1217 return [nt(path, *d[path]) for path in d]
1218 else:
1219 nt = _psplatform.pmmap_ext
1220 return [nt(*x) for x in it]
1221
1222 def open_files(self):
1223 """Return files opened by process as a list of
1224 (path, fd) namedtuples including the absolute file name
1225 and file descriptor number.
1226 """
1227 return self._proc.open_files()
1228
1229 def net_connections(self, kind='inet'):
1230 """Return socket connections opened by process as a list of
1231 (fd, family, type, laddr, raddr, status) namedtuples.
1232 The *kind* parameter filters for connections that match the
1233 following criteria:
1234
1235 +------------+----------------------------------------------------+
1236 | Kind Value | Connections using |
1237 +------------+----------------------------------------------------+
1238 | inet | IPv4 and IPv6 |
1239 | inet4 | IPv4 |
1240 | inet6 | IPv6 |
1241 | tcp | TCP |
1242 | tcp4 | TCP over IPv4 |
1243 | tcp6 | TCP over IPv6 |
1244 | udp | UDP |
1245 | udp4 | UDP over IPv4 |
1246 | udp6 | UDP over IPv6 |
1247 | unix | UNIX socket (both UDP and TCP protocols) |
1248 | all | the sum of all the possible families and protocols |
1249 +------------+----------------------------------------------------+
1250 """
1251 _check_conn_kind(kind)
1252 return self._proc.net_connections(kind)
1253
1254 @_common.deprecated_method(replacement="net_connections")
1255 def connections(self, kind="inet"):
1256 return self.net_connections(kind=kind)
1257
1258 # --- signals
1259
1260 if POSIX:
1261
1262 def _send_signal(self, sig):
1263 assert not self.pid < 0, self.pid
1264 self._raise_if_pid_reused()
1265
1266 pid, ppid, name = self.pid, self._ppid, self._name
1267 if pid == 0:
1268 # see "man 2 kill"
1269 msg = (
1270 "preventing sending signal to process with PID 0 as it "
1271 "would affect every process in the process group of the "
1272 "calling process (os.getpid()) instead of PID 0"
1273 )
1274 raise ValueError(msg)
1275 try:
1276 os.kill(pid, sig)
1277 except ProcessLookupError as err:
1278 if OPENBSD and pid_exists(pid):
1279 # We do this because os.kill() lies in case of
1280 # zombie processes.
1281 raise ZombieProcess(pid, name, ppid) from err
1282 self._gone = True
1283 raise NoSuchProcess(pid, name) from err
1284 except PermissionError as err:
1285 raise AccessDenied(pid, name) from err
1286
1287 def send_signal(self, sig):
1288 """Send a signal *sig* to process pre-emptively checking
1289 whether PID has been reused (see signal module constants) .
1290 On Windows only SIGTERM is valid and is treated as an alias
1291 for kill().
1292 """
1293 if POSIX:
1294 self._send_signal(sig)
1295 else: # pragma: no cover
1296 self._raise_if_pid_reused()
1297 if sig != signal.SIGTERM and not self.is_running():
1298 msg = "process no longer exists"
1299 raise NoSuchProcess(self.pid, self._name, msg=msg)
1300 self._proc.send_signal(sig)
1301
1302 def suspend(self):
1303 """Suspend process execution with SIGSTOP pre-emptively checking
1304 whether PID has been reused.
1305 On Windows this has the effect of suspending all process threads.
1306 """
1307 if POSIX:
1308 self._send_signal(signal.SIGSTOP)
1309 else: # pragma: no cover
1310 self._raise_if_pid_reused()
1311 self._proc.suspend()
1312
1313 def resume(self):
1314 """Resume process execution with SIGCONT pre-emptively checking
1315 whether PID has been reused.
1316 On Windows this has the effect of resuming all process threads.
1317 """
1318 if POSIX:
1319 self._send_signal(signal.SIGCONT)
1320 else: # pragma: no cover
1321 self._raise_if_pid_reused()
1322 self._proc.resume()
1323
1324 def terminate(self):
1325 """Terminate the process with SIGTERM pre-emptively checking
1326 whether PID has been reused.
1327 On Windows this is an alias for kill().
1328 """
1329 if POSIX:
1330 self._send_signal(signal.SIGTERM)
1331 else: # pragma: no cover
1332 self._raise_if_pid_reused()
1333 self._proc.kill()
1334
1335 def kill(self):
1336 """Kill the current process with SIGKILL pre-emptively checking
1337 whether PID has been reused.
1338 """
1339 if POSIX:
1340 self._send_signal(signal.SIGKILL)
1341 else: # pragma: no cover
1342 self._raise_if_pid_reused()
1343 self._proc.kill()
1344
1345 def wait(self, timeout=None):
1346 """Wait for process to terminate and, if process is a children
1347 of os.getpid(), also return its exit code, else None.
1348 On Windows there's no such limitation (exit code is always
1349 returned).
1350
1351 If the process is already terminated immediately return None
1352 instead of raising NoSuchProcess.
1353
1354 If *timeout* (in seconds) is specified and process is still
1355 alive raise TimeoutExpired.
1356
1357 To wait for multiple Process(es) use psutil.wait_procs().
1358 """
1359 if timeout is not None and not timeout >= 0:
1360 msg = "timeout must be a positive integer"
1361 raise ValueError(msg)
1362 if self._exitcode is not _SENTINEL:
1363 return self._exitcode
1364 self._exitcode = self._proc.wait(timeout)
1365 return self._exitcode
1366
1367
1368# The valid attr names which can be processed by Process.as_dict().
1369# fmt: off
1370_as_dict_attrnames = {
1371 x for x in dir(Process) if not x.startswith("_") and x not in
1372 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait',
1373 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit',
1374 'connections', 'oneshot'}
1375}
1376# fmt: on
1377
1378
1379# =====================================================================
1380# --- Popen class
1381# =====================================================================
1382
1383
1384class Popen(Process):
1385 """Same as subprocess.Popen, but in addition it provides all
1386 psutil.Process methods in a single class.
1387 For the following methods which are common to both classes, psutil
1388 implementation takes precedence:
1389
1390 * send_signal()
1391 * terminate()
1392 * kill()
1393
1394 This is done in order to avoid killing another process in case its
1395 PID has been reused, fixing BPO-6973.
1396
1397 >>> import psutil
1398 >>> from subprocess import PIPE
1399 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE)
1400 >>> p.name()
1401 'python'
1402 >>> p.uids()
1403 user(real=1000, effective=1000, saved=1000)
1404 >>> p.username()
1405 'giampaolo'
1406 >>> p.communicate()
1407 ('hi', None)
1408 >>> p.terminate()
1409 >>> p.wait(timeout=2)
1410 0
1411 >>>
1412 """
1413
1414 def __init__(self, *args, **kwargs):
1415 # Explicitly avoid to raise NoSuchProcess in case the process
1416 # spawned by subprocess.Popen terminates too quickly, see:
1417 # https://github.com/giampaolo/psutil/issues/193
1418 self.__subproc = subprocess.Popen(*args, **kwargs)
1419 self._init(self.__subproc.pid, _ignore_nsp=True)
1420
1421 def __dir__(self):
1422 return sorted(set(dir(Popen) + dir(subprocess.Popen)))
1423
1424 def __enter__(self):
1425 if hasattr(self.__subproc, '__enter__'):
1426 self.__subproc.__enter__()
1427 return self
1428
1429 def __exit__(self, *args, **kwargs):
1430 if hasattr(self.__subproc, '__exit__'):
1431 return self.__subproc.__exit__(*args, **kwargs)
1432 else:
1433 if self.stdout:
1434 self.stdout.close()
1435 if self.stderr:
1436 self.stderr.close()
1437 try:
1438 # Flushing a BufferedWriter may raise an error.
1439 if self.stdin:
1440 self.stdin.close()
1441 finally:
1442 # Wait for the process to terminate, to avoid zombies.
1443 self.wait()
1444
1445 def __getattribute__(self, name):
1446 try:
1447 return object.__getattribute__(self, name)
1448 except AttributeError:
1449 try:
1450 return object.__getattribute__(self.__subproc, name)
1451 except AttributeError:
1452 msg = f"{self.__class__!r} has no attribute {name!r}"
1453 raise AttributeError(msg) from None
1454
1455 def wait(self, timeout=None):
1456 if self.__subproc.returncode is not None:
1457 return self.__subproc.returncode
1458 ret = super().wait(timeout)
1459 self.__subproc.returncode = ret
1460 return ret
1461
1462
1463# =====================================================================
1464# --- system processes related functions
1465# =====================================================================
1466
1467
1468def pids():
1469 """Return a list of current running PIDs."""
1470 global _LOWEST_PID
1471 ret = sorted(_psplatform.pids())
1472 _LOWEST_PID = ret[0]
1473 return ret
1474
1475
1476def pid_exists(pid):
1477 """Return True if given PID exists in the current process list.
1478 This is faster than doing "pid in psutil.pids()" and
1479 should be preferred.
1480 """
1481 if pid < 0:
1482 return False
1483 elif pid == 0 and POSIX:
1484 # On POSIX we use os.kill() to determine PID existence.
1485 # According to "man 2 kill" PID 0 has a special meaning
1486 # though: it refers to <<every process in the process
1487 # group of the calling process>> and that is not we want
1488 # to do here.
1489 return pid in pids()
1490 else:
1491 return _psplatform.pid_exists(pid)
1492
1493
1494_pmap = {}
1495_pids_reused = set()
1496
1497
1498def process_iter(attrs=None, ad_value=None):
1499 """Return a generator yielding a Process instance for all
1500 running processes.
1501
1502 Every new Process instance is only created once and then cached
1503 into an internal table which is updated every time this is used.
1504 Cache can optionally be cleared via `process_iter.cache_clear()`.
1505
1506 The sorting order in which processes are yielded is based on
1507 their PIDs.
1508
1509 *attrs* and *ad_value* have the same meaning as in
1510 Process.as_dict(). If *attrs* is specified as_dict() is called
1511 and the resulting dict is stored as a 'info' attribute attached
1512 to returned Process instance.
1513 If *attrs* is an empty list it will retrieve all process info
1514 (slow).
1515 """
1516 global _pmap
1517
1518 def add(pid):
1519 proc = Process(pid)
1520 pmap[proc.pid] = proc
1521 return proc
1522
1523 def remove(pid):
1524 pmap.pop(pid, None)
1525
1526 pmap = _pmap.copy()
1527 a = set(pids())
1528 b = set(pmap.keys())
1529 new_pids = a - b
1530 gone_pids = b - a
1531 for pid in gone_pids:
1532 remove(pid)
1533 while _pids_reused:
1534 pid = _pids_reused.pop()
1535 debug(f"refreshing Process instance for reused PID {pid}")
1536 remove(pid)
1537 try:
1538 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items()))
1539 for pid, proc in ls:
1540 try:
1541 if proc is None: # new process
1542 proc = add(pid)
1543 if attrs is not None:
1544 proc.info = proc.as_dict(attrs=attrs, ad_value=ad_value)
1545 yield proc
1546 except NoSuchProcess:
1547 remove(pid)
1548 finally:
1549 _pmap = pmap
1550
1551
1552process_iter.cache_clear = lambda: _pmap.clear() # noqa: PLW0108
1553process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache."
1554
1555
1556def wait_procs(procs, timeout=None, callback=None):
1557 """Convenience function which waits for a list of processes to
1558 terminate.
1559
1560 Return a (gone, alive) tuple indicating which processes
1561 are gone and which ones are still alive.
1562
1563 The gone ones will have a new *returncode* attribute indicating
1564 process exit status (may be None).
1565
1566 *callback* is a function which gets called every time a process
1567 terminates (a Process instance is passed as callback argument).
1568
1569 Function will return as soon as all processes terminate or when
1570 *timeout* occurs.
1571 Differently from Process.wait() it will not raise TimeoutExpired if
1572 *timeout* occurs.
1573
1574 Typical use case is:
1575
1576 - send SIGTERM to a list of processes
1577 - give them some time to terminate
1578 - send SIGKILL to those ones which are still alive
1579
1580 Example:
1581
1582 >>> def on_terminate(proc):
1583 ... print("process {} terminated".format(proc))
1584 ...
1585 >>> for p in procs:
1586 ... p.terminate()
1587 ...
1588 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate)
1589 >>> for p in alive:
1590 ... p.kill()
1591 """
1592
1593 def check_gone(proc, timeout):
1594 try:
1595 returncode = proc.wait(timeout=timeout)
1596 except (TimeoutExpired, subprocess.TimeoutExpired):
1597 pass
1598 else:
1599 if returncode is not None or not proc.is_running():
1600 # Set new Process instance attribute.
1601 proc.returncode = returncode
1602 gone.add(proc)
1603 if callback is not None:
1604 callback(proc)
1605
1606 if timeout is not None and not timeout >= 0:
1607 msg = f"timeout must be a positive integer, got {timeout}"
1608 raise ValueError(msg)
1609 gone = set()
1610 alive = set(procs)
1611 if callback is not None and not callable(callback):
1612 msg = f"callback {callback!r} is not a callable"
1613 raise TypeError(msg)
1614 if timeout is not None:
1615 deadline = _timer() + timeout
1616
1617 while alive:
1618 if timeout is not None and timeout <= 0:
1619 break
1620 for proc in alive:
1621 # Make sure that every complete iteration (all processes)
1622 # will last max 1 sec.
1623 # We do this because we don't want to wait too long on a
1624 # single process: in case it terminates too late other
1625 # processes may disappear in the meantime and their PID
1626 # reused.
1627 max_timeout = 1.0 / len(alive)
1628 if timeout is not None:
1629 timeout = min((deadline - _timer()), max_timeout)
1630 if timeout <= 0:
1631 break
1632 check_gone(proc, timeout)
1633 else:
1634 check_gone(proc, max_timeout)
1635 alive = alive - gone # noqa: PLR6104
1636
1637 if alive:
1638 # Last attempt over processes survived so far.
1639 # timeout == 0 won't make this function wait any further.
1640 for proc in alive:
1641 check_gone(proc, 0)
1642 alive = alive - gone # noqa: PLR6104
1643
1644 return (list(gone), list(alive))
1645
1646
1647# =====================================================================
1648# --- CPU related functions
1649# =====================================================================
1650
1651
1652def cpu_count(logical=True):
1653 """Return the number of logical CPUs in the system (same as
1654 os.cpu_count()).
1655
1656 If *logical* is False return the number of physical cores only
1657 (e.g. hyper thread CPUs are excluded).
1658
1659 Return None if undetermined.
1660
1661 The return value is cached after first call.
1662 If desired cache can be cleared like this:
1663
1664 >>> psutil.cpu_count.cache_clear()
1665 """
1666 if logical:
1667 ret = _psplatform.cpu_count_logical()
1668 else:
1669 ret = _psplatform.cpu_count_cores()
1670 if ret is not None and ret < 1:
1671 ret = None
1672 return ret
1673
1674
1675def cpu_times(percpu=False):
1676 """Return system-wide CPU times as a namedtuple.
1677 Every CPU time represents the seconds the CPU has spent in the
1678 given mode. The namedtuple's fields availability varies depending on the
1679 platform:
1680
1681 - user
1682 - system
1683 - idle
1684 - nice (UNIX)
1685 - iowait (Linux)
1686 - irq (Linux, FreeBSD)
1687 - softirq (Linux)
1688 - steal (Linux >= 2.6.11)
1689 - guest (Linux >= 2.6.24)
1690 - guest_nice (Linux >= 3.2.0)
1691
1692 When *percpu* is True return a list of namedtuples for each CPU.
1693 First element of the list refers to first CPU, second element
1694 to second CPU and so on.
1695 The order of the list is consistent across calls.
1696 """
1697 if not percpu:
1698 return _psplatform.cpu_times()
1699 else:
1700 return _psplatform.per_cpu_times()
1701
1702
1703try:
1704 _last_cpu_times = {threading.current_thread().ident: cpu_times()}
1705except Exception: # noqa: BLE001
1706 # Don't want to crash at import time.
1707 _last_cpu_times = {}
1708
1709try:
1710 _last_per_cpu_times = {
1711 threading.current_thread().ident: cpu_times(percpu=True)
1712 }
1713except Exception: # noqa: BLE001
1714 # Don't want to crash at import time.
1715 _last_per_cpu_times = {}
1716
1717
1718def _cpu_tot_time(times):
1719 """Given a cpu_time() ntuple calculates the total CPU time
1720 (including idle time).
1721 """
1722 tot = sum(times)
1723 if LINUX:
1724 # On Linux guest times are already accounted in "user" or
1725 # "nice" times, so we subtract them from total.
1726 # Htop does the same. References:
1727 # https://github.com/giampaolo/psutil/pull/940
1728 # http://unix.stackexchange.com/questions/178045
1729 # https://github.com/torvalds/linux/blob/
1730 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/
1731 # cputime.c#L158
1732 tot -= getattr(times, "guest", 0) # Linux 2.6.24+
1733 tot -= getattr(times, "guest_nice", 0) # Linux 3.2.0+
1734 return tot
1735
1736
1737def _cpu_busy_time(times):
1738 """Given a cpu_time() ntuple calculates the busy CPU time.
1739 We do so by subtracting all idle CPU times.
1740 """
1741 busy = _cpu_tot_time(times)
1742 busy -= times.idle
1743 # Linux: "iowait" is time during which the CPU does not do anything
1744 # (waits for IO to complete). On Linux IO wait is *not* accounted
1745 # in "idle" time so we subtract it. Htop does the same.
1746 # References:
1747 # https://github.com/torvalds/linux/blob/
1748 # 447976ef4fd09b1be88b316d1a81553f1aa7cd07/kernel/sched/cputime.c#L244
1749 busy -= getattr(times, "iowait", 0)
1750 return busy
1751
1752
1753def _cpu_times_deltas(t1, t2):
1754 assert t1._fields == t2._fields, (t1, t2)
1755 field_deltas = []
1756 for field in _psplatform.scputimes._fields:
1757 field_delta = getattr(t2, field) - getattr(t1, field)
1758 # CPU times are always supposed to increase over time
1759 # or at least remain the same and that's because time
1760 # cannot go backwards.
1761 # Surprisingly sometimes this might not be the case (at
1762 # least on Windows and Linux), see:
1763 # https://github.com/giampaolo/psutil/issues/392
1764 # https://github.com/giampaolo/psutil/issues/645
1765 # https://github.com/giampaolo/psutil/issues/1210
1766 # Trim negative deltas to zero to ignore decreasing fields.
1767 # top does the same. Reference:
1768 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063
1769 field_delta = max(0, field_delta)
1770 field_deltas.append(field_delta)
1771 return _psplatform.scputimes(*field_deltas)
1772
1773
1774def cpu_percent(interval=None, percpu=False):
1775 """Return a float representing the current system-wide CPU
1776 utilization as a percentage.
1777
1778 When *interval* is > 0.0 compares system CPU times elapsed before
1779 and after the interval (blocking).
1780
1781 When *interval* is 0.0 or None compares system CPU times elapsed
1782 since last call or module import, returning immediately (non
1783 blocking). That means the first time this is called it will
1784 return a meaningless 0.0 value which you should ignore.
1785 In this case is recommended for accuracy that this function be
1786 called with at least 0.1 seconds between calls.
1787
1788 When *percpu* is True returns a list of floats representing the
1789 utilization as a percentage for each CPU.
1790 First element of the list refers to first CPU, second element
1791 to second CPU and so on.
1792 The order of the list is consistent across calls.
1793
1794 Examples:
1795
1796 >>> # blocking, system-wide
1797 >>> psutil.cpu_percent(interval=1)
1798 2.0
1799 >>>
1800 >>> # blocking, per-cpu
1801 >>> psutil.cpu_percent(interval=1, percpu=True)
1802 [2.0, 1.0]
1803 >>>
1804 >>> # non-blocking (percentage since last call)
1805 >>> psutil.cpu_percent(interval=None)
1806 2.9
1807 >>>
1808 """
1809 tid = threading.current_thread().ident
1810 blocking = interval is not None and interval > 0.0
1811 if interval is not None and interval < 0:
1812 msg = f"interval is not positive (got {interval})"
1813 raise ValueError(msg)
1814
1815 def calculate(t1, t2):
1816 times_delta = _cpu_times_deltas(t1, t2)
1817 all_delta = _cpu_tot_time(times_delta)
1818 busy_delta = _cpu_busy_time(times_delta)
1819
1820 try:
1821 busy_perc = (busy_delta / all_delta) * 100
1822 except ZeroDivisionError:
1823 return 0.0
1824 else:
1825 return round(busy_perc, 1)
1826
1827 # system-wide usage
1828 if not percpu:
1829 if blocking:
1830 t1 = cpu_times()
1831 time.sleep(interval)
1832 else:
1833 t1 = _last_cpu_times.get(tid) or cpu_times()
1834 _last_cpu_times[tid] = cpu_times()
1835 return calculate(t1, _last_cpu_times[tid])
1836 # per-cpu usage
1837 else:
1838 ret = []
1839 if blocking:
1840 tot1 = cpu_times(percpu=True)
1841 time.sleep(interval)
1842 else:
1843 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True)
1844 _last_per_cpu_times[tid] = cpu_times(percpu=True)
1845 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]):
1846 ret.append(calculate(t1, t2))
1847 return ret
1848
1849
1850# Use a separate dict for cpu_times_percent(), so it's independent from
1851# cpu_percent() and they can both be used within the same program.
1852_last_cpu_times_2 = _last_cpu_times.copy()
1853_last_per_cpu_times_2 = _last_per_cpu_times.copy()
1854
1855
1856def cpu_times_percent(interval=None, percpu=False):
1857 """Same as cpu_percent() but provides utilization percentages
1858 for each specific CPU time as is returned by cpu_times().
1859 For instance, on Linux we'll get:
1860
1861 >>> cpu_times_percent()
1862 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0,
1863 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0)
1864 >>>
1865
1866 *interval* and *percpu* arguments have the same meaning as in
1867 cpu_percent().
1868 """
1869 tid = threading.current_thread().ident
1870 blocking = interval is not None and interval > 0.0
1871 if interval is not None and interval < 0:
1872 msg = f"interval is not positive (got {interval!r})"
1873 raise ValueError(msg)
1874
1875 def calculate(t1, t2):
1876 nums = []
1877 times_delta = _cpu_times_deltas(t1, t2)
1878 all_delta = _cpu_tot_time(times_delta)
1879 # "scale" is the value to multiply each delta with to get percentages.
1880 # We use "max" to avoid division by zero (if all_delta is 0, then all
1881 # fields are 0 so percentages will be 0 too. all_delta cannot be a
1882 # fraction because cpu times are integers)
1883 scale = 100.0 / max(1, all_delta)
1884 for field_delta in times_delta:
1885 field_perc = field_delta * scale
1886 field_perc = round(field_perc, 1)
1887 # make sure we don't return negative values or values over 100%
1888 field_perc = min(max(0.0, field_perc), 100.0)
1889 nums.append(field_perc)
1890 return _psplatform.scputimes(*nums)
1891
1892 # system-wide usage
1893 if not percpu:
1894 if blocking:
1895 t1 = cpu_times()
1896 time.sleep(interval)
1897 else:
1898 t1 = _last_cpu_times_2.get(tid) or cpu_times()
1899 _last_cpu_times_2[tid] = cpu_times()
1900 return calculate(t1, _last_cpu_times_2[tid])
1901 # per-cpu usage
1902 else:
1903 ret = []
1904 if blocking:
1905 tot1 = cpu_times(percpu=True)
1906 time.sleep(interval)
1907 else:
1908 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True)
1909 _last_per_cpu_times_2[tid] = cpu_times(percpu=True)
1910 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]):
1911 ret.append(calculate(t1, t2))
1912 return ret
1913
1914
1915def cpu_stats():
1916 """Return CPU statistics."""
1917 return _psplatform.cpu_stats()
1918
1919
1920if hasattr(_psplatform, "cpu_freq"):
1921
1922 def cpu_freq(percpu=False):
1923 """Return CPU frequency as a namedtuple including current,
1924 min and max frequency expressed in Mhz.
1925
1926 If *percpu* is True and the system supports per-cpu frequency
1927 retrieval (Linux only) a list of frequencies is returned for
1928 each CPU. If not a list with one element is returned.
1929 """
1930 ret = _psplatform.cpu_freq()
1931 if percpu:
1932 return ret
1933 else:
1934 num_cpus = float(len(ret))
1935 if num_cpus == 0:
1936 return None
1937 elif num_cpus == 1:
1938 return ret[0]
1939 else:
1940 currs, mins, maxs = 0.0, 0.0, 0.0
1941 set_none = False
1942 for cpu in ret:
1943 currs += cpu.current
1944 # On Linux if /proc/cpuinfo is used min/max are set
1945 # to None.
1946 if LINUX and cpu.min is None:
1947 set_none = True
1948 continue
1949 mins += cpu.min
1950 maxs += cpu.max
1951
1952 current = currs / num_cpus
1953
1954 if set_none:
1955 min_ = max_ = None
1956 else:
1957 min_ = mins / num_cpus
1958 max_ = maxs / num_cpus
1959
1960 return _common.scpufreq(current, min_, max_)
1961
1962 __all__.append("cpu_freq")
1963
1964
1965if hasattr(os, "getloadavg") or hasattr(_psplatform, "getloadavg"):
1966 # Perform this hasattr check once on import time to either use the
1967 # platform based code or proxy straight from the os module.
1968 if hasattr(os, "getloadavg"):
1969 getloadavg = os.getloadavg
1970 else:
1971 getloadavg = _psplatform.getloadavg
1972
1973 __all__.append("getloadavg")
1974
1975
1976# =====================================================================
1977# --- system memory related functions
1978# =====================================================================
1979
1980
1981def virtual_memory():
1982 """Return statistics about system memory usage as a namedtuple
1983 including the following fields, expressed in bytes:
1984
1985 - total:
1986 total physical memory available.
1987
1988 - available:
1989 the memory that can be given instantly to processes without the
1990 system going into swap.
1991 This is calculated by summing different memory values depending
1992 on the platform and it is supposed to be used to monitor actual
1993 memory usage in a cross platform fashion.
1994
1995 - percent:
1996 the percentage usage calculated as (total - available) / total * 100
1997
1998 - used:
1999 memory used, calculated differently depending on the platform and
2000 designed for informational purposes only:
2001 macOS: active + wired
2002 BSD: active + wired + cached
2003 Linux: total - free
2004
2005 - free:
2006 memory not being used at all (zeroed) that is readily available;
2007 note that this doesn't reflect the actual memory available
2008 (use 'available' instead)
2009
2010 Platform-specific fields:
2011
2012 - active (UNIX):
2013 memory currently in use or very recently used, and so it is in RAM.
2014
2015 - inactive (UNIX):
2016 memory that is marked as not used.
2017
2018 - buffers (BSD, Linux):
2019 cache for things like file system metadata.
2020
2021 - cached (BSD, macOS):
2022 cache for various things.
2023
2024 - wired (macOS, BSD):
2025 memory that is marked to always stay in RAM. It is never moved to disk.
2026
2027 - shared (BSD):
2028 memory that may be simultaneously accessed by multiple processes.
2029
2030 The sum of 'used' and 'available' does not necessarily equal total.
2031 On Windows 'available' and 'free' are the same.
2032 """
2033 global _TOTAL_PHYMEM
2034 ret = _psplatform.virtual_memory()
2035 # cached for later use in Process.memory_percent()
2036 _TOTAL_PHYMEM = ret.total
2037 return ret
2038
2039
2040def swap_memory():
2041 """Return system swap memory statistics as a namedtuple including
2042 the following fields:
2043
2044 - total: total swap memory in bytes
2045 - used: used swap memory in bytes
2046 - free: free swap memory in bytes
2047 - percent: the percentage usage
2048 - sin: no. of bytes the system has swapped in from disk (cumulative)
2049 - sout: no. of bytes the system has swapped out from disk (cumulative)
2050
2051 'sin' and 'sout' on Windows are meaningless and always set to 0.
2052 """
2053 return _psplatform.swap_memory()
2054
2055
2056# =====================================================================
2057# --- disks/partitions related functions
2058# =====================================================================
2059
2060
2061def disk_usage(path):
2062 """Return disk usage statistics about the given *path* as a
2063 namedtuple including total, used and free space expressed in bytes
2064 plus the percentage usage.
2065 """
2066 return _psplatform.disk_usage(path)
2067
2068
2069def disk_partitions(all=False):
2070 """Return mounted partitions as a list of
2071 (device, mountpoint, fstype, opts) namedtuple.
2072 'opts' field is a raw string separated by commas indicating mount
2073 options which may vary depending on the platform.
2074
2075 If *all* parameter is False return physical devices only and ignore
2076 all others.
2077 """
2078 return _psplatform.disk_partitions(all)
2079
2080
2081def disk_io_counters(perdisk=False, nowrap=True):
2082 """Return system disk I/O statistics as a namedtuple including
2083 the following fields:
2084
2085 - read_count: number of reads
2086 - write_count: number of writes
2087 - read_bytes: number of bytes read
2088 - write_bytes: number of bytes written
2089 - read_time: time spent reading from disk (in ms)
2090 - write_time: time spent writing to disk (in ms)
2091
2092 Platform specific:
2093
2094 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms)
2095 - read_merged_count (Linux): number of merged reads
2096 - write_merged_count (Linux): number of merged writes
2097
2098 If *perdisk* is True return the same information for every
2099 physical disk installed on the system as a dictionary
2100 with partition names as the keys and the namedtuple
2101 described above as the values.
2102
2103 If *nowrap* is True it detects and adjust the numbers which overflow
2104 and wrap (restart from 0) and add "old value" to "new value" so that
2105 the returned numbers will always be increasing or remain the same,
2106 but never decrease.
2107 "disk_io_counters.cache_clear()" can be used to invalidate the
2108 cache.
2109
2110 On recent Windows versions 'diskperf -y' command may need to be
2111 executed first otherwise this function won't find any disk.
2112 """
2113 kwargs = dict(perdisk=perdisk) if LINUX else {}
2114 rawdict = _psplatform.disk_io_counters(**kwargs)
2115 if not rawdict:
2116 return {} if perdisk else None
2117 if nowrap:
2118 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters')
2119 nt = getattr(_psplatform, "sdiskio", _common.sdiskio)
2120 if perdisk:
2121 for disk, fields in rawdict.items():
2122 rawdict[disk] = nt(*fields)
2123 return rawdict
2124 else:
2125 return nt(*(sum(x) for x in zip(*rawdict.values())))
2126
2127
2128disk_io_counters.cache_clear = functools.partial(
2129 _wrap_numbers.cache_clear, 'psutil.disk_io_counters'
2130)
2131disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2132
2133
2134# =====================================================================
2135# --- network related functions
2136# =====================================================================
2137
2138
2139def net_io_counters(pernic=False, nowrap=True):
2140 """Return network I/O statistics as a namedtuple including
2141 the following fields:
2142
2143 - bytes_sent: number of bytes sent
2144 - bytes_recv: number of bytes received
2145 - packets_sent: number of packets sent
2146 - packets_recv: number of packets received
2147 - errin: total number of errors while receiving
2148 - errout: total number of errors while sending
2149 - dropin: total number of incoming packets which were dropped
2150 - dropout: total number of outgoing packets which were dropped
2151 (always 0 on macOS and BSD)
2152
2153 If *pernic* is True return the same information for every
2154 network interface installed on the system as a dictionary
2155 with network interface names as the keys and the namedtuple
2156 described above as the values.
2157
2158 If *nowrap* is True it detects and adjust the numbers which overflow
2159 and wrap (restart from 0) and add "old value" to "new value" so that
2160 the returned numbers will always be increasing or remain the same,
2161 but never decrease.
2162 "net_io_counters.cache_clear()" can be used to invalidate the
2163 cache.
2164 """
2165 rawdict = _psplatform.net_io_counters()
2166 if not rawdict:
2167 return {} if pernic else None
2168 if nowrap:
2169 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters')
2170 if pernic:
2171 for nic, fields in rawdict.items():
2172 rawdict[nic] = _common.snetio(*fields)
2173 return rawdict
2174 else:
2175 return _common.snetio(*[sum(x) for x in zip(*rawdict.values())])
2176
2177
2178net_io_counters.cache_clear = functools.partial(
2179 _wrap_numbers.cache_clear, 'psutil.net_io_counters'
2180)
2181net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache"
2182
2183
2184def net_connections(kind='inet'):
2185 """Return system-wide socket connections as a list of
2186 (fd, family, type, laddr, raddr, status, pid) namedtuples.
2187 In case of limited privileges 'fd' and 'pid' may be set to -1
2188 and None respectively.
2189 The *kind* parameter filters for connections that fit the
2190 following criteria:
2191
2192 +------------+----------------------------------------------------+
2193 | Kind Value | Connections using |
2194 +------------+----------------------------------------------------+
2195 | inet | IPv4 and IPv6 |
2196 | inet4 | IPv4 |
2197 | inet6 | IPv6 |
2198 | tcp | TCP |
2199 | tcp4 | TCP over IPv4 |
2200 | tcp6 | TCP over IPv6 |
2201 | udp | UDP |
2202 | udp4 | UDP over IPv4 |
2203 | udp6 | UDP over IPv6 |
2204 | unix | UNIX socket (both UDP and TCP protocols) |
2205 | all | the sum of all the possible families and protocols |
2206 +------------+----------------------------------------------------+
2207
2208 On macOS this function requires root privileges.
2209 """
2210 _check_conn_kind(kind)
2211 return _psplatform.net_connections(kind)
2212
2213
2214def net_if_addrs():
2215 """Return the addresses associated to each NIC (network interface
2216 card) installed on the system as a dictionary whose keys are the
2217 NIC names and value is a list of namedtuples for each address
2218 assigned to the NIC. Each namedtuple includes 5 fields:
2219
2220 - family: can be either socket.AF_INET, socket.AF_INET6 or
2221 psutil.AF_LINK, which refers to a MAC address.
2222 - address: is the primary address and it is always set.
2223 - netmask: and 'broadcast' and 'ptp' may be None.
2224 - ptp: stands for "point to point" and references the
2225 destination address on a point to point interface
2226 (typically a VPN).
2227 - broadcast: and *ptp* are mutually exclusive.
2228
2229 Note: you can have more than one address of the same family
2230 associated with each interface.
2231 """
2232 rawlist = _psplatform.net_if_addrs()
2233 rawlist.sort(key=lambda x: x[1]) # sort by family
2234 ret = collections.defaultdict(list)
2235 for name, fam, addr, mask, broadcast, ptp in rawlist:
2236 try:
2237 fam = socket.AddressFamily(fam)
2238 except ValueError:
2239 if WINDOWS and fam == -1:
2240 fam = _psplatform.AF_LINK
2241 elif (
2242 hasattr(_psplatform, "AF_LINK") and fam == _psplatform.AF_LINK
2243 ):
2244 # Linux defines AF_LINK as an alias for AF_PACKET.
2245 # We re-set the family here so that repr(family)
2246 # will show AF_LINK rather than AF_PACKET
2247 fam = _psplatform.AF_LINK
2248
2249 if fam == _psplatform.AF_LINK:
2250 # The underlying C function may return an incomplete MAC
2251 # address in which case we fill it with null bytes, see:
2252 # https://github.com/giampaolo/psutil/issues/786
2253 separator = ":" if POSIX else "-"
2254 while addr.count(separator) < 5:
2255 addr += f"{separator}00"
2256
2257 nt = _common.snicaddr(fam, addr, mask, broadcast, ptp)
2258
2259 # On Windows broadcast is None, so we determine it via
2260 # ipaddress module.
2261 if WINDOWS and fam in {socket.AF_INET, socket.AF_INET6}:
2262 try:
2263 broadcast = _common.broadcast_addr(nt)
2264 except Exception as err: # noqa: BLE001
2265 debug(err)
2266 else:
2267 if broadcast is not None:
2268 nt._replace(broadcast=broadcast)
2269
2270 ret[name].append(nt)
2271
2272 return dict(ret)
2273
2274
2275def net_if_stats():
2276 """Return information about each NIC (network interface card)
2277 installed on the system as a dictionary whose keys are the
2278 NIC names and value is a namedtuple with the following fields:
2279
2280 - isup: whether the interface is up (bool)
2281 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or
2282 NIC_DUPLEX_UNKNOWN
2283 - speed: the NIC speed expressed in mega bits (MB); if it can't
2284 be determined (e.g. 'localhost') it will be set to 0.
2285 - mtu: the maximum transmission unit expressed in bytes.
2286 """
2287 return _psplatform.net_if_stats()
2288
2289
2290# =====================================================================
2291# --- sensors
2292# =====================================================================
2293
2294
2295# Linux, macOS
2296if hasattr(_psplatform, "sensors_temperatures"):
2297
2298 def sensors_temperatures(fahrenheit=False):
2299 """Return hardware temperatures. Each entry is a namedtuple
2300 representing a certain hardware sensor (it may be a CPU, an
2301 hard disk or something else, depending on the OS and its
2302 configuration).
2303 All temperatures are expressed in celsius unless *fahrenheit*
2304 is set to True.
2305 """
2306
2307 def convert(n):
2308 if n is not None:
2309 return (float(n) * 9 / 5) + 32 if fahrenheit else n
2310
2311 ret = collections.defaultdict(list)
2312 rawdict = _psplatform.sensors_temperatures()
2313
2314 for name, values in rawdict.items():
2315 while values:
2316 label, current, high, critical = values.pop(0)
2317 current = convert(current)
2318 high = convert(high)
2319 critical = convert(critical)
2320
2321 if high and not critical:
2322 critical = high
2323 elif critical and not high:
2324 high = critical
2325
2326 ret[name].append(
2327 _common.shwtemp(label, current, high, critical)
2328 )
2329
2330 return dict(ret)
2331
2332 __all__.append("sensors_temperatures")
2333
2334
2335# Linux
2336if hasattr(_psplatform, "sensors_fans"):
2337
2338 def sensors_fans():
2339 """Return fans speed. Each entry is a namedtuple
2340 representing a certain hardware sensor.
2341 All speed are expressed in RPM (rounds per minute).
2342 """
2343 return _psplatform.sensors_fans()
2344
2345 __all__.append("sensors_fans")
2346
2347
2348# Linux, Windows, FreeBSD, macOS
2349if hasattr(_psplatform, "sensors_battery"):
2350
2351 def sensors_battery():
2352 """Return battery information. If no battery is installed
2353 returns None.
2354
2355 - percent: battery power left as a percentage.
2356 - secsleft: a rough approximation of how many seconds are left
2357 before the battery runs out of power. May be
2358 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED.
2359 - power_plugged: True if the AC power cable is connected.
2360 """
2361 return _psplatform.sensors_battery()
2362
2363 __all__.append("sensors_battery")
2364
2365
2366# =====================================================================
2367# --- other system related functions
2368# =====================================================================
2369
2370
2371def boot_time():
2372 """Return the system boot time expressed in seconds since the epoch
2373 (seconds since January 1, 1970, at midnight UTC). The returned
2374 value is based on the system clock, which means it may be affected
2375 by changes such as manual adjustments or time synchronization (e.g.
2376 NTP).
2377 """
2378 return _psplatform.boot_time()
2379
2380
2381def users():
2382 """Return users currently connected on the system as a list of
2383 namedtuples including the following fields.
2384
2385 - user: the name of the user
2386 - terminal: the tty or pseudo-tty associated with the user, if any.
2387 - host: the host name associated with the entry, if any.
2388 - started: the creation time as a floating point number expressed in
2389 seconds since the epoch.
2390 """
2391 return _psplatform.users()
2392
2393
2394# =====================================================================
2395# --- Windows services
2396# =====================================================================
2397
2398
2399if WINDOWS:
2400
2401 def win_service_iter():
2402 """Return a generator yielding a WindowsService instance for all
2403 Windows services installed.
2404 """
2405 return _psplatform.win_service_iter()
2406
2407 def win_service_get(name):
2408 """Get a Windows service by *name*.
2409 Raise NoSuchProcess if no service with such name exists.
2410 """
2411 return _psplatform.win_service_get(name)
2412
2413
2414# =====================================================================
2415
2416
2417def _set_debug(value):
2418 """Enable or disable PSUTIL_DEBUG option, which prints debugging
2419 messages to stderr.
2420 """
2421 import psutil._common
2422
2423 psutil._common.PSUTIL_DEBUG = bool(value)
2424 _psplatform.cext.set_debug(bool(value))
2425
2426
2427del memoize_when_activated