Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/psutil-8.0.0-py3.11-linux-x86_64.egg/psutil/__init__.py: 25%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1064 statements  

1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. 

2# Use of this source code is governed by a BSD-style license that can be 

3# found in the LICENSE file. 

4 

5"""psutil is a cross-platform library for retrieving information on 

6running processes and system utilization (CPU, memory, disks, network, 

7sensors) in Python. Supported platforms: 

8 

9 - Linux 

10 - Windows 

11 - macOS 

12 - FreeBSD 

13 - OpenBSD 

14 - NetBSD 

15 - Sun Solaris 

16 - AIX 

17 

18Supported Python versions are cPython 3.7+ and PyPy. 

19""" 

20 

21from __future__ import annotations 

22 

23import collections 

24import contextlib 

25import datetime 

26import functools 

27import os 

28import signal 

29import socket 

30import subprocess 

31import sys 

32import threading 

33import time 

34import warnings 

35from typing import TYPE_CHECKING as _TYPE_CHECKING 

36 

37try: 

38 import pwd 

39except ImportError: 

40 pwd = None 

41 

42from . import _common 

43from . import _ntuples as _ntp 

44from ._common import AIX 

45from ._common import BSD 

46from ._common import FREEBSD 

47from ._common import LINUX 

48from ._common import MACOS 

49from ._common import NETBSD 

50from ._common import OPENBSD 

51from ._common import OSX # deprecated alias 

52from ._common import POSIX 

53from ._common import SUNOS 

54from ._common import WINDOWS 

55from ._common import AccessDenied 

56from ._common import Error 

57from ._common import NoSuchProcess 

58from ._common import TimeoutExpired 

59from ._common import ZombieProcess 

60from ._common import debug 

61from ._common import memoize_when_activated 

62from ._common import wrap_numbers as _wrap_numbers 

63from ._enums import BatteryTime 

64from ._enums import ConnectionStatus 

65from ._enums import NicDuplex 

66from ._enums import ProcessStatus 

67 

68if _TYPE_CHECKING: 

69 from collections.abc import Collection 

70 from typing import Any 

71 from typing import Callable 

72 from typing import Generator 

73 from typing import Iterator 

74 

75 from ._ntuples import pconn 

76 from ._ntuples import pcputimes 

77 from ._ntuples import pctxsw 

78 from ._ntuples import pfootprint 

79 from ._ntuples import pfullmem 

80 from ._ntuples import pgids 

81 from ._ntuples import pheap 

82 from ._ntuples import pio 

83 from ._ntuples import pionice 

84 from ._ntuples import pmem 

85 from ._ntuples import pmem_ex 

86 from ._ntuples import pmmap_ext 

87 from ._ntuples import pmmap_grouped 

88 from ._ntuples import popenfile 

89 from ._ntuples import ppagefaults 

90 from ._ntuples import pthread 

91 from ._ntuples import puids 

92 from ._ntuples import sbattery 

93 from ._ntuples import sconn 

94 from ._ntuples import scpufreq 

95 from ._ntuples import scpustats 

96 from ._ntuples import scputimes 

97 from ._ntuples import sdiskio 

98 from ._ntuples import sdiskpart 

99 from ._ntuples import sdiskusage 

100 from ._ntuples import sfan 

101 from ._ntuples import shwtemp 

102 from ._ntuples import snetio 

103 from ._ntuples import snicaddr 

104 from ._ntuples import snicstats 

105 from ._ntuples import sswap 

106 from ._ntuples import suser 

107 from ._ntuples import svmem 

108 from ._pswindows import WindowsService 

109 

110 

111if LINUX: 

112 # This is public API and it will be retrieved from _pslinux.py 

113 # via sys.modules. 

114 PROCFS_PATH = "/proc" 

115 

116 from . import _pslinux as _psplatform 

117 from ._enums import ProcessIOPriority 

118 from ._enums import ProcessRlimit 

119 

120elif WINDOWS: 

121 from . import _pswindows as _psplatform 

122 from ._enums import ProcessIOPriority 

123 from ._enums import ProcessPriority 

124 

125elif MACOS: 

126 from . import _psosx as _psplatform 

127 

128elif BSD: 

129 from . import _psbsd as _psplatform 

130 

131 if FREEBSD: 

132 from ._enums import ProcessRlimit 

133 

134elif SUNOS: 

135 from . import _pssunos as _psplatform 

136 

137 # This is public writable API which is read from _pslinux.py and 

138 # _pssunos.py via sys.modules. 

139 PROCFS_PATH = "/proc" 

140 

141elif AIX: 

142 from . import _psaix as _psplatform 

143 

144 # This is public API and it will be retrieved from _pslinux.py 

145 # via sys.modules. 

146 PROCFS_PATH = "/proc" 

147 

148else: # pragma: no cover 

149 msg = f"platform {sys.platform} is not supported" 

150 raise NotImplementedError(msg) 

151 

152 

153# fmt: off 

154__all__ = [ 

155 # exceptions 

156 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied", 

157 "TimeoutExpired", 

158 

159 # constants 

160 "version_info", "__version__", 

161 

162 "AF_LINK", 

163 

164 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX", 

165 "SUNOS", "WINDOWS", "AIX", 

166 

167 # classes 

168 "Process", "Popen", 

169 

170 # functions 

171 "pid_exists", "pids", "process_iter", "wait_procs", # proc 

172 "virtual_memory", "swap_memory", # memory 

173 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu 

174 "cpu_stats", "getloadavg", # "cpu_freq", 

175 "net_io_counters", "net_connections", "net_if_addrs", # network 

176 "net_if_stats", 

177 "disk_io_counters", "disk_partitions", "disk_usage", # disk 

178 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors 

179 "users", "boot_time", # others 

180] 

181# fmt: on 

182 

183__all__.extend(_psplatform.__extra__all__) 

184_globals = globals() 

185 

186 

187def _export_enum(cls): 

188 __all__.append(cls.__name__) 

189 for name, member in cls.__members__.items(): 

190 if name not in _globals: # noqa: F821 

191 _globals[name] = member # noqa: F821 

192 __all__.append(name) 

193 

194 

195# Populate global namespace with enums and CONSTANTs. 

196_export_enum(ProcessStatus) 

197_export_enum(ConnectionStatus) 

198_export_enum(NicDuplex) 

199_export_enum(BatteryTime) 

200if LINUX or WINDOWS: 

201 _export_enum(ProcessIOPriority) 

202if WINDOWS: 

203 _export_enum(ProcessPriority) 

204if LINUX or FREEBSD: 

205 _export_enum(ProcessRlimit) 

206if LINUX or SUNOS or AIX: 

207 __all__.append("PROCFS_PATH") 

208 

209del _globals, _export_enum 

210 

211AF_LINK = _psplatform.AF_LINK 

212 

213__author__ = "Giampaolo Rodola'" 

214__version__ = "8.0.0" 

215version_info = tuple(int(num) for num in __version__.split('.')) 

216 

217_timer = getattr(time, 'monotonic', time.time) 

218_TOTAL_PHYMEM = None 

219_LOWEST_PID = None 

220_SENTINEL = object() 

221 

222# Sanity check in case the user messed up with psutil installation 

223# or did something weird with sys.path. In this case we might end 

224# up importing a python module using a C extension module which 

225# was compiled for a different version of psutil. 

226# We want to prevent that by failing sooner rather than later. 

227# See: https://github.com/giampaolo/psutil/issues/564 

228if int(__version__.replace('.', '')) != getattr( 

229 _psplatform.cext, 'version', None 

230): 

231 msg = f"version conflict: {_psplatform.cext.__file__!r} C extension " 

232 msg += "module was built for another version of psutil" 

233 if hasattr(_psplatform.cext, 'version'): 

234 v = ".".join(list(str(_psplatform.cext.version))) 

235 msg += f" ({v} instead of {__version__})" 

236 else: 

237 msg += f" (different than {__version__})" 

238 what = getattr( 

239 _psplatform.cext, 

240 "__file__", 

241 "the existing psutil install directory", 

242 ) 

243 msg += f"; you may try to 'pip uninstall psutil', manually remove {what}" 

244 msg += " or clean the virtual env somehow, then reinstall" 

245 raise ImportError(msg) 

246 

247 

248# ===================================================================== 

249# --- Utils 

250# ===================================================================== 

251 

252 

253if hasattr(_psplatform, 'ppid_map'): 

254 # Faster version (Windows and Linux). 

255 _ppid_map = _psplatform.ppid_map 

256else: # pragma: no cover 

257 

258 def _ppid_map(): 

259 """Return a `{pid: ppid, ...}` dict for all running processes in 

260 one shot. Used to speed up `Process.children()`. 

261 """ 

262 ret = {} 

263 for pid in pids(): 

264 try: 

265 ret[pid] = _psplatform.Process(pid).ppid() 

266 except (NoSuchProcess, ZombieProcess): 

267 pass 

268 return ret 

269 

270 

271def _pprint_secs(secs): 

272 """Format seconds in a human readable form.""" 

273 now = time.time() 

274 secs_ago = int(now - secs) 

275 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S" 

276 return datetime.datetime.fromtimestamp(secs).strftime(fmt) 

277 

278 

279def _check_conn_kind(kind): 

280 """Check net_connections()'s `kind` parameter.""" 

281 kinds = tuple(_common.conn_tmap) 

282 if kind not in kinds: 

283 msg = f"invalid kind argument {kind!r}; valid ones are: {kinds}" 

284 raise ValueError(msg) 

285 

286 

287# ===================================================================== 

288# --- Process class 

289# ===================================================================== 

290 

291 

292def _use_prefetch(method): 

293 """Decorator returning cached values from `process_iter(attrs=...)`. 

294 

295 When `process_iter()` is called with an *attrs* argument, it 

296 pre-fetches the requested attributes via `as_dict()` and stores 

297 them in `Process._prefetch`. This decorator makes the decorated 

298 method return the cached value (if present) instead of issuing 

299 a new system call. 

300 """ 

301 

302 @functools.wraps(method) 

303 def wrapper(self, *args, **kwargs): 

304 if not args and not kwargs: 

305 try: 

306 return self._prefetch[method.__name__] 

307 except KeyError: 

308 pass 

309 return method(self, *args, **kwargs) 

310 

311 return wrapper 

312 

313 

314class Process: 

315 """Represents an OS process identified by a PID. 

316 

317 If *pid* arg is omitted, the current process PID (`os.getpid()`) is 

318 used. Raises `NoSuchProcess` if the PID does not exist. 

319 

320 The way this class is bound to a process is via its PID. Most 

321 methods do not guarantee that the PID has not been reused, so you 

322 may end up retrieving information for a different process. 

323 

324 Real process identity is checked (via PID + creation time) only for 

325 methods that set attributes or send signals. 

326 

327 To avoid issues with PID reuse for other read-only methods, call 

328 `is_running()` before querying the process. 

329 """ 

330 

331 attrs: frozenset[str] = frozenset() # dynamically set later 

332 

333 def __init__(self, pid: int | None = None) -> None: 

334 self._init(pid) 

335 

336 def _init(self, pid, _ignore_nsp=False): 

337 if pid is None: 

338 pid = os.getpid() 

339 else: 

340 if pid < 0: 

341 msg = f"pid must be a positive integer (got {pid})" 

342 raise ValueError(msg) 

343 try: 

344 _psplatform.cext.check_pid_range(pid) 

345 except OverflowError as err: 

346 msg = "process PID out of range" 

347 raise NoSuchProcess(pid, msg=msg) from err 

348 

349 self._pid = pid 

350 self._name = None 

351 self._exe = None 

352 self._create_time = None 

353 self._gone = False 

354 self._pid_reused = False 

355 self._hash = None 

356 self._lock = threading.RLock() 

357 # used for caching on Windows only (on POSIX ppid may change) 

358 self._ppid = None 

359 # platform-specific modules define an _psplatform.Process 

360 # implementation class 

361 self._proc = _psplatform.Process(pid) 

362 self._last_sys_cpu_times = None 

363 self._last_proc_cpu_times = None 

364 self._exitcode = _SENTINEL 

365 self._prefetch = {} 

366 self._ident = (self.pid, None) 

367 try: 

368 self._ident = self._get_ident() 

369 except AccessDenied: 

370 # This should happen on Windows only, since we use the fast 

371 # create time method. AFAIK, on all other platforms we are 

372 # able to get create time for all PIDs. 

373 pass 

374 except ZombieProcess: 

375 # Zombies can still be queried by this class (although 

376 # not always) and pids() return them so just go on. 

377 pass 

378 except NoSuchProcess: 

379 if not _ignore_nsp: 

380 msg = "process PID not found" 

381 raise NoSuchProcess(pid, msg=msg) from None 

382 self._gone = True 

383 

384 def _get_ident(self): 

385 """Return a `(pid, uid)` tuple which is supposed to identify a 

386 Process instance univocally over time. 

387 

388 The PID alone is not enough, as it can be assigned to a new 

389 process after this one terminates, so we add creation time to 

390 the mix. We need this in order to prevent killing the wrong 

391 process later on. This is also known as PID reuse or PID 

392 recycling problem. 

393 

394 The reliability of this strategy mostly depends on 

395 `create_time()` precision, which is 0.01 secs on Linux. The 

396 assumption is that, after a process terminates, the kernel 

397 won't reuse the same PID after such a short period of time 

398 (0.01 secs). Technically this is inherently racy, but 

399 practically it should be good enough. 

400 

401 NOTE: unreliable on FreeBSD and OpenBSD as ctime is subject to 

402 system clock updates. 

403 """ 

404 

405 if WINDOWS: 

406 # Use create_time() fast method in order to speedup 

407 # `process_iter()`. This means we'll get AccessDenied for 

408 # most ADMIN processes, but that's fine since it means 

409 # we'll also get AccessDenied on kill(). 

410 # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555 

411 self._create_time = self._proc.create_time(fast_only=True) 

412 return (self.pid, self._create_time) 

413 elif LINUX or NETBSD or OSX: 

414 # Use 'monotonic' process starttime since boot to form unique 

415 # process identity, since it is stable over changes to system 

416 # time. 

417 return (self.pid, self._proc.create_time(monotonic=True)) 

418 else: 

419 return (self.pid, self.create_time()) 

420 

421 def __str__(self): 

422 info = {} 

423 info["pid"] = self.pid 

424 with self.oneshot(): 

425 if self._pid_reused: 

426 info["status"] = "terminated + PID reused" 

427 else: 

428 try: 

429 info["name"] = self._name or self.name() 

430 info["status"] = str(self.status()) 

431 except ZombieProcess: 

432 info["status"] = "zombie" 

433 except NoSuchProcess: 

434 info["status"] = "terminated" 

435 except AccessDenied: 

436 pass 

437 

438 if self._exitcode not in {_SENTINEL, None}: 

439 info["exitcode"] = self._exitcode 

440 if self._create_time is not None: 

441 info['started'] = _pprint_secs(self._create_time) 

442 

443 return "{}.{}({})".format( 

444 self.__class__.__module__, 

445 self.__class__.__name__, 

446 ", ".join([f"{k}={v!r}" for k, v in info.items()]), 

447 ) 

448 

449 __repr__ = __str__ 

450 

451 def __eq__(self, other): 

452 # Test for equality with another Process object based 

453 # on PID and creation time. 

454 if not isinstance(other, Process): 

455 return NotImplemented 

456 if OPENBSD or NETBSD or SUNOS: # pragma: no cover 

457 # Zombie processes on Open/NetBSD/illumos/Solaris have a 

458 # creation time of 0.0. This covers the case when a process 

459 # started normally (so it has a ctime), then it turned into a 

460 # zombie. It's important to do this because is_running() 

461 # depends on __eq__. 

462 pid1, ident1 = self._ident 

463 pid2, ident2 = other._ident 

464 if pid1 == pid2: 

465 if ident1 and not ident2: 

466 try: 

467 return self.status() == ProcessStatus.STATUS_ZOMBIE 

468 except Error: 

469 pass 

470 return self._ident == other._ident 

471 

472 def __ne__(self, other): 

473 return not self == other 

474 

475 def __hash__(self): 

476 if self._hash is None: 

477 self._hash = hash(self._ident) 

478 return self._hash 

479 

480 def _raise_if_pid_reused(self): 

481 """Raise `NoSuchProcess` in case process PID has been reused.""" 

482 if self._pid_reused or (not self.is_running() and self._pid_reused): 

483 # We may directly raise NSP in here already if PID is just 

484 # not running, but I prefer NSP to be raised naturally by 

485 # the actual Process API call. This way unit tests will tell 

486 # us if the API is broken (aka don't raise NSP when it 

487 # should). We also remain consistent with all other "get" 

488 # APIs which don't use _raise_if_pid_reused(). 

489 msg = "process no longer exists and its PID has been reused" 

490 raise NoSuchProcess(self.pid, self._name, msg=msg) 

491 

492 @property 

493 def pid(self) -> int: 

494 """The process PID.""" 

495 return self._pid 

496 

497 # DEPRECATED 

498 @property 

499 def info(self) -> dict: 

500 """Return pre-fetched `process_iter()` info dict. 

501 

502 Deprecated: use method calls instead (e.g. `p.name()`). 

503 """ 

504 msg = ( 

505 "Process.info is deprecated; use method calls instead" 

506 " (e.g. p.name() instead of p.info['name'])" 

507 ) 

508 warnings.warn(msg, DeprecationWarning, stacklevel=2) 

509 # Return a copy to prevent the user from mutating the dict and 

510 # corrupting the prefetch cache. 

511 return self._prefetch.copy() 

512 

513 # --- utility methods 

514 

515 @contextlib.contextmanager 

516 def oneshot(self) -> Generator[None, None, None]: 

517 """Context manager which speeds up the retrieval of multiple 

518 process attributes at the same time. 

519 

520 Internally, many attributes (e.g. `name()`, `ppid()`, `uids()`, 

521 `create_time()`, ...) share the same system call. This context 

522 manager executes each system call once, and caches the results, 

523 so subsequent calls return cached values. The cache is cleared 

524 when exiting the context manager block. Use this every time you 

525 retrieve more than one attribute about the process. 

526 

527 >>> import psutil 

528 >>> p = psutil.Process() 

529 >>> with p.oneshot(): 

530 ... p.name() # collect multiple info 

531 ... p.cpu_times() # return cached value 

532 ... p.cpu_percent() # return cached value 

533 ... p.create_time() # return cached value 

534 ... 

535 >>> 

536 """ 

537 with self._lock: 

538 if hasattr(self, "_cache"): 

539 # NOOP: this covers the use case where the user enters the 

540 # context twice: 

541 # 

542 # >>> with p.oneshot(): 

543 # ... with p.oneshot(): 

544 # ... 

545 # 

546 # Also, since as_dict() internally uses oneshot() 

547 # I expect that the code below will be a pretty common 

548 # "mistake" that the user will make, so let's guard 

549 # against that: 

550 # 

551 # >>> with p.oneshot(): 

552 # ... p.as_dict() 

553 # ... 

554 yield 

555 else: 

556 try: 

557 # cached in case cpu_percent() is used 

558 self.cpu_times.cache_activate(self) 

559 # cached in case memory_percent() is used 

560 self.memory_info.cache_activate(self) 

561 # cached in case parent() is used 

562 self.ppid.cache_activate(self) 

563 # cached in case username() is used 

564 if POSIX: 

565 self.uids.cache_activate(self) 

566 # specific implementation cache 

567 self._proc.oneshot_enter() 

568 yield 

569 finally: 

570 self.cpu_times.cache_deactivate(self) 

571 self.memory_info.cache_deactivate(self) 

572 self.ppid.cache_deactivate(self) 

573 if POSIX: 

574 self.uids.cache_deactivate(self) 

575 self._proc.oneshot_exit() 

576 

577 def as_dict( 

578 self, attrs: Collection[str] | None = None, ad_value: Any = None 

579 ) -> dict[str, Any]: 

580 """Utility method returning process information as a 

581 hashable dictionary. 

582 

583 If *attrs* is specified it must be a collection of strings 

584 reflecting available Process class' attribute names (e.g. 

585 ['cpu_times', 'name']) else all public (read-only) attributes 

586 are assumed. See `Process.attrs` for a full list. 

587 

588 *ad_value* is the value which gets assigned in case 

589 `AccessDenied` or `ZombieProcess` exception is raised when 

590 retrieving that particular process information. 

591 """ 

592 valid_names = self.attrs 

593 # Deprecated attrs: not returned by default but still accepted if 

594 # explicitly requested. 

595 deprecated_names = {"memory_full_info"} 

596 

597 if attrs is not None: 

598 if not isinstance(attrs, (list, tuple, set, frozenset)): 

599 msg = f"invalid attrs type {type(attrs)}" 

600 raise TypeError(msg) 

601 attrs = set(attrs) 

602 invalid_names = attrs - valid_names - deprecated_names 

603 if invalid_names: 

604 msg = "invalid attr name{} {}".format( 

605 "s" if len(invalid_names) > 1 else "", 

606 ", ".join(map(repr, invalid_names)), 

607 ) 

608 raise ValueError(msg) 

609 

610 retdict = {} 

611 names = attrs or sorted(valid_names) 

612 with self.oneshot(): 

613 for name in names: 

614 try: 

615 if name == 'pid': 

616 ret = self.pid 

617 else: 

618 meth = getattr(self, name) 

619 ret = meth() 

620 except (AccessDenied, ZombieProcess): 

621 ret = ad_value 

622 except NotImplementedError: 

623 # in case of not implemented functionality (may happen 

624 # on old or exotic systems) we want to crash only if 

625 # the user explicitly asked for that particular attr 

626 if attrs: 

627 raise 

628 continue 

629 retdict[name] = ret 

630 return retdict 

631 

632 def parent(self) -> Process | None: 

633 """Return the parent process as a `Process` object, preemptively 

634 checking whether PID has been reused. 

635 

636 If no parent is known return None. 

637 """ 

638 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0] 

639 if self.pid == lowest_pid: 

640 return None 

641 ppid = self.ppid() 

642 if ppid is not None: 

643 # Get a fresh (non-cached) ctime in case the system clock 

644 # was updated. TODO: use a monotonic ctime on platforms 

645 # where it's supported. 

646 proc_ctime = Process(self.pid).create_time() 

647 try: 

648 parent = Process(ppid) 

649 if parent.create_time() <= proc_ctime: 

650 return parent 

651 # ...else ppid has been reused by another process 

652 except NoSuchProcess: 

653 pass 

654 

655 def parents(self) -> list[Process]: 

656 """Return the parents of this process as a list of `Process` 

657 instances. 

658 

659 If no parents are known return an empty list. 

660 """ 

661 parents = [] 

662 proc = self.parent() 

663 while proc is not None: 

664 parents.append(proc) 

665 proc = proc.parent() 

666 return parents 

667 

668 def is_running(self) -> bool: 

669 """Return whether this process is running. 

670 

671 It also checks if PID has been reused by another process, in 

672 which case it will remove the process from `process_iter()` 

673 internal cache and return False. 

674 """ 

675 if self._gone or self._pid_reused: 

676 return False 

677 try: 

678 # Checking if PID is alive is not enough as the PID might 

679 # have been reused by another process. Process identity / 

680 # uniqueness over time is guaranteed by (PID + creation 

681 # time) and that is verified in __eq__. 

682 self._pid_reused = self != Process(self.pid) 

683 if self._pid_reused: 

684 _pids_reused.add(self.pid) 

685 raise NoSuchProcess(self.pid) 

686 return True 

687 except ZombieProcess: 

688 # We should never get here as it's already handled in 

689 # Process.__init__; here just for extra safety. 

690 return True 

691 except NoSuchProcess: 

692 self._gone = True 

693 return False 

694 

695 # --- actual API 

696 

697 @_use_prefetch 

698 @memoize_when_activated 

699 def ppid(self) -> int: 

700 """The process parent PID. 

701 On Windows the return value is cached after first call. 

702 """ 

703 # On POSIX we don't want to cache the ppid as it may unexpectedly 

704 # change to 1 (init) in case this process turns into a zombie: 

705 # https://github.com/giampaolo/psutil/issues/321 

706 # http://stackoverflow.com/questions/356722/ 

707 

708 # XXX should we check creation time here rather than in 

709 # Process.parent()? 

710 self._raise_if_pid_reused() 

711 if POSIX: 

712 return self._proc.ppid() 

713 else: # pragma: no cover 

714 self._ppid = self._ppid or self._proc.ppid() 

715 return self._ppid 

716 

717 @_use_prefetch 

718 def name(self) -> str: 

719 """The process name. The return value is cached after first call.""" 

720 # Process name is only cached on Windows as on POSIX it may 

721 # change, see: 

722 # https://github.com/giampaolo/psutil/issues/692 

723 if WINDOWS and self._name is not None: 

724 return self._name 

725 name = self._proc.name() 

726 if POSIX and len(name) >= 15: 

727 # On UNIX the name gets truncated to the first 15 characters. 

728 # If it matches the first part of the cmdline we return that 

729 # one instead because it's usually more explicative. 

730 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon". 

731 try: 

732 cmdline = self.cmdline() 

733 except (AccessDenied, ZombieProcess): 

734 # Just pass and return the truncated name: it's better 

735 # than nothing. Note: there are actual cases where a 

736 # zombie process can return a name() but not a 

737 # cmdline(), see: 

738 # https://github.com/giampaolo/psutil/issues/2239 

739 pass 

740 else: 

741 if cmdline: 

742 extended_name = os.path.basename(cmdline[0]) 

743 if extended_name.startswith(name): 

744 name = extended_name 

745 self._name = name 

746 self._proc._name = name 

747 return name 

748 

749 @_use_prefetch 

750 def exe(self) -> str: 

751 """The process executable as an absolute path. 

752 

753 May also be an empty string. The return value is cached after 

754 first call. 

755 """ 

756 

757 def guess_it(fallback): 

758 # try to guess exe from cmdline[0] in absence of a native 

759 # exe representation 

760 cmdline = self.cmdline() 

761 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'): 

762 exe = cmdline[0] # the possible exe 

763 # Attempt to guess only in case of an absolute path. 

764 # It is not safe otherwise as the process might have 

765 # changed cwd. 

766 if ( 

767 os.path.isabs(exe) 

768 and os.path.isfile(exe) 

769 and os.access(exe, os.X_OK) 

770 ): 

771 return exe 

772 if isinstance(fallback, AccessDenied): 

773 raise fallback 

774 return fallback 

775 

776 if self._exe is None: 

777 try: 

778 exe = self._proc.exe() 

779 except AccessDenied as err: 

780 return guess_it(fallback=err) 

781 else: 

782 if not exe: 

783 # underlying implementation can legitimately return an 

784 # empty string; if that's the case we don't want to 

785 # raise AD while guessing from the cmdline 

786 try: 

787 exe = guess_it(fallback=exe) 

788 except AccessDenied: 

789 pass 

790 self._exe = exe 

791 return self._exe 

792 

793 @_use_prefetch 

794 def cmdline(self) -> list[str]: 

795 """The command line this process has been called with.""" 

796 return self._proc.cmdline() 

797 

798 @_use_prefetch 

799 def status(self) -> ProcessStatus | str: 

800 """The process current status as a `STATUS_` constant.""" 

801 try: 

802 return self._proc.status() 

803 except ZombieProcess: 

804 return ProcessStatus.STATUS_ZOMBIE 

805 

806 @_use_prefetch 

807 def username(self) -> str: 

808 """The name of the user that owns the process. 

809 

810 On UNIX this is calculated by using the real process uid. 

811 """ 

812 if POSIX: 

813 if pwd is None: 

814 # might happen if python was installed from sources 

815 msg = "requires pwd module shipped with standard python" 

816 raise ImportError(msg) 

817 real_uid = self.uids().real 

818 try: 

819 return pwd.getpwuid(real_uid).pw_name 

820 except KeyError: 

821 # the uid can't be resolved by the system 

822 return str(real_uid) 

823 else: 

824 return self._proc.username() 

825 

826 @_use_prefetch 

827 def create_time(self) -> float: 

828 """The process creation time as a floating point number 

829 expressed in seconds since the epoch (seconds since January 1, 

830 1970, at midnight UTC). 

831 

832 The return value, which is cached after first call, is based on 

833 the system clock, which means it may be affected by changes 

834 such as manual adjustments or time synchronization (e.g. NTP). 

835 """ 

836 if self._create_time is None: 

837 self._create_time = self._proc.create_time() 

838 return self._create_time 

839 

840 @_use_prefetch 

841 def cwd(self) -> str: 

842 """Process current working directory as an absolute path.""" 

843 return self._proc.cwd() 

844 

845 @_use_prefetch 

846 def nice(self, value: int | None = None) -> int | None: 

847 """Get or set process niceness (priority).""" 

848 if value is None: 

849 return self._proc.nice_get() 

850 else: 

851 self._raise_if_pid_reused() 

852 self._proc.nice_set(value) 

853 

854 if POSIX: 

855 

856 @_use_prefetch 

857 @memoize_when_activated 

858 def uids(self) -> puids: 

859 """Return process UIDs as a `(real, effective, saved)` 

860 named tuple. 

861 """ 

862 return self._proc.uids() 

863 

864 @_use_prefetch 

865 def gids(self) -> pgids: 

866 """Return process GIDs as a `(real, effective, saved)` 

867 named tuple. 

868 """ 

869 return self._proc.gids() 

870 

871 @_use_prefetch 

872 def terminal(self) -> str | None: 

873 """The terminal associated with this process, if any, 

874 else None. 

875 """ 

876 return self._proc.terminal() 

877 

878 @_use_prefetch 

879 def num_fds(self) -> int: 

880 """Return the number of file descriptors opened by this 

881 process (POSIX only). 

882 """ 

883 return self._proc.num_fds() 

884 

885 if hasattr(_psplatform.Process, "io_counters"): 

886 

887 @_use_prefetch 

888 def io_counters(self) -> pio: 

889 """Return process I/O statistics (primarily read and 

890 written bytes). 

891 

892 Availability: Linux, Windows, BSD, AIX 

893 """ 

894 return self._proc.io_counters() 

895 

896 if hasattr(_psplatform.Process, "ionice_get"): 

897 

898 @_use_prefetch 

899 def ionice( 

900 self, ioclass: int | None = None, value: int | None = None 

901 ) -> pionice | ProcessIOPriority | None: 

902 """Get or set process I/O niceness (priority). 

903 

904 On Linux *ioclass* is one of the `IOPRIO_CLASS_*` constants. 

905 *value* is a number which goes from 0 to 7. The higher the 

906 value, the lower the I/O priority of the process. 

907 

908 On Windows only *ioclass* is used and it can be set to 

909 one of the `IOPRIO_*` constants. 

910 

911 Availability: Linux, Windows 

912 """ 

913 if ioclass is None: 

914 if value is not None: 

915 msg = "'ioclass' argument must be specified" 

916 raise ValueError(msg) 

917 return self._proc.ionice_get() 

918 else: 

919 self._raise_if_pid_reused() 

920 return self._proc.ionice_set(ioclass, value) 

921 

922 if hasattr(_psplatform.Process, "rlimit"): 

923 

924 def rlimit( 

925 self, 

926 resource: int, 

927 limits: tuple[int, int] | None = None, 

928 ) -> tuple[int, int] | None: 

929 """Get or set process resource limits as a `(soft, hard)` 

930 tuple. 

931 

932 - resource: one of the `RLIMIT_*` constants. 

933 - limits: a `(soft, hard)` tuple (set). 

934 

935 See "man prlimit" for further info. 

936 

937 Availability: Linux, FreeBSD 

938 """ 

939 if limits is not None: 

940 self._raise_if_pid_reused() 

941 return self._proc.rlimit(resource, limits) 

942 

943 if hasattr(_psplatform.Process, "cpu_affinity_get"): 

944 

945 @_use_prefetch 

946 def cpu_affinity( 

947 self, cpus: list[int] | None = None 

948 ) -> list[int] | None: 

949 """Get or set process CPU affinity. 

950 

951 If specified, *cpus* must be a list of CPUs for which you 

952 want to set the affinity (e.g. `[0, 1]`). If an empty list is 

953 passed, all eligible CPUs are assumed (and set). 

954 

955 Availability: Linux, Windows, FreeBSD 

956 """ 

957 if cpus is None: 

958 return sorted(set(self._proc.cpu_affinity_get())) 

959 else: 

960 self._raise_if_pid_reused() 

961 if not cpus: 

962 if hasattr(self._proc, "_get_eligible_cpus"): 

963 cpus = self._proc._get_eligible_cpus() 

964 else: 

965 cpus = tuple(range(len(cpu_times(percpu=True)))) 

966 self._proc.cpu_affinity_set(list(set(cpus))) 

967 

968 # Linux, FreeBSD, SunOS 

969 if hasattr(_psplatform.Process, "cpu_num"): 

970 

971 @_use_prefetch 

972 def cpu_num(self) -> int: 

973 """Return what CPU this process is currently running on. 

974 

975 The returned number should be <= `psutil.cpu_count()`. 

976 """ 

977 return self._proc.cpu_num() 

978 

979 # All platforms has it, but maybe not in the future. 

980 if hasattr(_psplatform.Process, "environ"): 

981 

982 @_use_prefetch 

983 def environ(self) -> dict[str, str]: 

984 """The environment variables of the process as a dict. 

985 

986 Note: this might not reflect changes made after the process 

987 started. 

988 """ 

989 return self._proc.environ() 

990 

991 if WINDOWS: 

992 

993 @_use_prefetch 

994 def num_handles(self) -> int: 

995 """Return the number of handles opened by this process 

996 

997 Availability: Windows 

998 """ 

999 return self._proc.num_handles() 

1000 

1001 @_use_prefetch 

1002 def num_ctx_switches(self) -> pctxsw: 

1003 """Return the number of voluntary and involuntary context 

1004 switches performed by this process. 

1005 """ 

1006 return self._proc.num_ctx_switches() 

1007 

1008 @_use_prefetch 

1009 def num_threads(self) -> int: 

1010 """Return the number of threads used by this process.""" 

1011 return self._proc.num_threads() 

1012 

1013 if hasattr(_psplatform.Process, "threads"): 

1014 

1015 @_use_prefetch 

1016 def threads(self) -> list[pthread]: 

1017 """Return threads opened by process as a list of 

1018 `(id, user_time, system_time)` named tuples. 

1019 

1020 On OpenBSD this method requires root access. 

1021 """ 

1022 return self._proc.threads() 

1023 

1024 def children(self, recursive: bool = False) -> list[Process]: 

1025 """Return the children of this process as a list of Process 

1026 instances, preemptively checking whether PID has been reused. 

1027 

1028 If *recursive* is True return all the parent descendants. 

1029 

1030 Example (A == this process): 

1031 

1032 A ─┐ 

1033 

1034 ├─ B (child) ─┐ 

1035 │ └─ X (grandchild) ─┐ 

1036 │ └─ Y (great grandchild) 

1037 ├─ C (child) 

1038 └─ D (child) 

1039 

1040 >>> import psutil 

1041 >>> p = psutil.Process() 

1042 >>> p.children() 

1043 B, C, D 

1044 >>> p.children(recursive=True) 

1045 B, X, Y, C, D 

1046 

1047 Note that in the example above if process X disappears 

1048 process Y won't be listed as the reference to process A 

1049 is lost. 

1050 """ 

1051 self._raise_if_pid_reused() 

1052 ppid_map = _ppid_map() 

1053 # Get a fresh (non-cached) ctime in case the system clock was 

1054 # updated. TODO: use a monotonic ctime on platforms where it's 

1055 # supported. 

1056 proc_ctime = Process(self.pid).create_time() 

1057 ret = [] 

1058 if not recursive: 

1059 for pid, ppid in ppid_map.items(): 

1060 if ppid == self.pid: 

1061 try: 

1062 child = Process(pid) 

1063 # if child happens to be older than its parent 

1064 # (self) it means child's PID has been reused 

1065 if proc_ctime <= child.create_time(): 

1066 ret.append(child) 

1067 except (NoSuchProcess, ZombieProcess): 

1068 pass 

1069 else: 

1070 # Construct a {pid: [child pids]} dict 

1071 reverse_ppid_map = collections.defaultdict(list) 

1072 for pid, ppid in ppid_map.items(): 

1073 reverse_ppid_map[ppid].append(pid) 

1074 # Recursively traverse that dict, starting from self.pid, 

1075 # such that we only call Process() on actual children 

1076 seen = set() 

1077 stack = [self.pid] 

1078 while stack: 

1079 pid = stack.pop() 

1080 if pid in seen: 

1081 # Since pids can be reused while the ppid_map is 

1082 # constructed, there may be rare instances where 

1083 # there's a cycle in the recorded process "tree". 

1084 continue 

1085 seen.add(pid) 

1086 for child_pid in reverse_ppid_map[pid]: 

1087 try: 

1088 child = Process(child_pid) 

1089 # if child happens to be older than its parent 

1090 # (self) it means child's PID has been reused 

1091 intime = proc_ctime <= child.create_time() 

1092 if intime: 

1093 ret.append(child) 

1094 stack.append(child_pid) 

1095 except (NoSuchProcess, ZombieProcess): 

1096 pass 

1097 return ret 

1098 

1099 @_use_prefetch 

1100 def cpu_percent(self, interval: float | None = None) -> float: 

1101 """Return a float representing the current process CPU 

1102 utilization as a percentage. 

1103 

1104 When *interval* is 0.0 or None (default) compares process times 

1105 to system CPU times elapsed since last call, returning 

1106 immediately (non-blocking). That means that the first time 

1107 this is called it will return a meaningless 0.0 value. 

1108 

1109 When *interval* is > 0.0 compares process times to system CPU 

1110 times elapsed before and after the interval (blocking). 

1111 

1112 In this case is recommended for accuracy that this function 

1113 be called with at least 0.1 seconds between calls. 

1114 

1115 A value > 100.0 can be returned in case of processes running 

1116 multiple threads on different CPU cores. 

1117 

1118 The returned value is explicitly NOT split evenly between 

1119 all available logical CPUs. This means that a busy loop process 

1120 running on a system with 2 logical CPUs will be reported as 

1121 having 100% CPU utilization instead of 50%. 

1122 

1123 Examples: 

1124 

1125 >>> import psutil 

1126 >>> p = psutil.Process(os.getpid()) 

1127 >>> # blocking 

1128 >>> p.cpu_percent(interval=1) 

1129 2.0 

1130 >>> # non-blocking (percentage since last call) 

1131 >>> p.cpu_percent(interval=None) 

1132 2.9 

1133 >>> 

1134 """ 

1135 blocking = interval is not None and interval > 0.0 

1136 if interval is not None and interval < 0: 

1137 msg = f"interval is not positive (got {interval!r})" 

1138 raise ValueError(msg) 

1139 num_cpus = cpu_count() or 1 

1140 

1141 def timer(): 

1142 return _timer() * num_cpus 

1143 

1144 if blocking: 

1145 st1 = timer() 

1146 pt1 = self._proc.cpu_times() 

1147 time.sleep(interval) 

1148 st2 = timer() 

1149 pt2 = self._proc.cpu_times() 

1150 else: 

1151 st1 = self._last_sys_cpu_times 

1152 pt1 = self._last_proc_cpu_times 

1153 st2 = timer() 

1154 pt2 = self._proc.cpu_times() 

1155 if st1 is None or pt1 is None: 

1156 self._last_sys_cpu_times = st2 

1157 self._last_proc_cpu_times = pt2 

1158 return 0.0 

1159 

1160 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system) 

1161 delta_time = st2 - st1 

1162 # reset values for next call in case of interval == None 

1163 self._last_sys_cpu_times = st2 

1164 self._last_proc_cpu_times = pt2 

1165 

1166 try: 

1167 # This is the utilization split evenly between all CPUs. 

1168 # E.g. a busy loop process on a 2-CPU-cores system at this 

1169 # point is reported as 50% instead of 100%. 

1170 overall_cpus_percent = (delta_proc / delta_time) * 100 

1171 except ZeroDivisionError: 

1172 # interval was too low 

1173 return 0.0 

1174 else: 

1175 # Note 1: 

1176 # in order to emulate "top" we multiply the value for the num 

1177 # of CPU cores. This way the busy process will be reported as 

1178 # having 100% (or more) usage. 

1179 # 

1180 # Note 2: 

1181 # taskmgr.exe on Windows differs in that it will show 50% 

1182 # instead. 

1183 # 

1184 # Note 3: 

1185 # a percentage > 100 is legitimate as it can result from a 

1186 # process with multiple threads running on different CPU 

1187 # cores (top does the same), see: 

1188 # http://stackoverflow.com/questions/1032357 

1189 # https://github.com/giampaolo/psutil/issues/474 

1190 single_cpu_percent = overall_cpus_percent * num_cpus 

1191 return round(single_cpu_percent, 1) 

1192 

1193 @_use_prefetch 

1194 @memoize_when_activated 

1195 def cpu_times(self) -> pcputimes: 

1196 """Return a `(user, system, children_user, children_system)` 

1197 named tuple representing the accumulated process time, 

1198 expressed in seconds. 

1199 

1200 Linux includes an additional `iowait` field. 

1201 

1202 On macOS and Windows `children_user` and `children_system` 

1203 fields are always set to 0. 

1204 """ 

1205 return self._proc.cpu_times() 

1206 

1207 @_use_prefetch 

1208 @memoize_when_activated 

1209 def memory_info(self) -> pmem: 

1210 """Return a named tuple with variable fields depending on the 

1211 platform, representing memory information about the process. 

1212 

1213 The portable fields available on all platforms are `rss` and `vms`. 

1214 

1215 All numbers are expressed in bytes. 

1216 """ 

1217 return self._proc.memory_info() 

1218 

1219 @_use_prefetch 

1220 @memoize_when_activated 

1221 def memory_info_ex(self) -> pmem_ex: 

1222 """Return a named tuple extending `memory_info()` with extra 

1223 metrics. 

1224 

1225 All numbers are expressed in bytes. 

1226 """ 

1227 base = self.memory_info() 

1228 if hasattr(self._proc, "memory_info_ex"): 

1229 extras = self._proc.memory_info_ex() 

1230 return _ntp.pmem_ex(**base._asdict(), **extras) 

1231 return base 

1232 

1233 # Linux, macOS, Windows 

1234 if hasattr(_psplatform.Process, "memory_footprint"): 

1235 

1236 @_use_prefetch 

1237 def memory_footprint(self) -> pfootprint: 

1238 """Return a named tuple with USS memory, and on Linux also 

1239 PSS and swap. 

1240 

1241 These values provide a more accurate representation of 

1242 actual process memory usage. 

1243 

1244 USS is the memory unique to a process and which would 

1245 be freed if the process was terminated right now. 

1246 

1247 It does so by passing through the whole process address. As 

1248 such it usually requires higher user privileges than 

1249 `memory_info()` or `memory_info_ex()` and is considerably 

1250 slower. 

1251 """ 

1252 return self._proc.memory_footprint() 

1253 

1254 # DEPRECATED 

1255 def memory_full_info(self) -> pfullmem: 

1256 """Return the same information as `memory_info()` plus 

1257 `memory_footprint()` in a single named tuple. 

1258 

1259 DEPRECATED in 8.0.0. Use `memory_footprint()` instead. 

1260 """ 

1261 msg = ( 

1262 "memory_full_info() is deprecated; use memory_footprint() instead" 

1263 ) 

1264 warnings.warn(msg, DeprecationWarning, stacklevel=2) 

1265 basic_mem = self.memory_info() 

1266 if hasattr(self, "memory_footprint"): 

1267 fp = self.memory_footprint() 

1268 return _ntp.pfullmem(*basic_mem + fp) 

1269 return _ntp.pfullmem(*basic_mem) 

1270 

1271 def memory_percent(self, memtype: str = "rss") -> float: 

1272 """Compare process memory to total physical system memory and 

1273 calculate process memory utilization as a percentage. 

1274 

1275 *memtype* argument is a string that dictates what type of 

1276 process memory you want to compare against (defaults to "rss"). 

1277 The list of available strings can be obtained like this: 

1278 

1279 >>> psutil.Process().memory_info()._fields 

1280 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss') 

1281 """ 

1282 valid_types = list(_ntp.pmem._fields) 

1283 if hasattr(_ntp, "pmem_ex"): 

1284 valid_types += [ 

1285 f for f in _ntp.pmem_ex._fields if f not in valid_types 

1286 ] 

1287 if hasattr(_ntp, "pfootprint"): 

1288 valid_types += [ 

1289 f for f in _ntp.pfootprint._fields if f not in valid_types 

1290 ] 

1291 if memtype not in valid_types: 

1292 msg = ( 

1293 f"invalid memtype {memtype!r}; valid types are" 

1294 f" {tuple(valid_types)!r}" 

1295 ) 

1296 raise ValueError(msg) 

1297 if memtype in _ntp.pmem._fields: 

1298 fun = self.memory_info 

1299 elif ( 

1300 hasattr(_ntp, "pfootprint") and memtype in _ntp.pfootprint._fields 

1301 ): 

1302 fun = self.memory_footprint 

1303 else: 

1304 fun = self.memory_info_ex 

1305 metrics = fun() 

1306 value = getattr(metrics, memtype) 

1307 

1308 # use cached value if available 

1309 total_phymem = _TOTAL_PHYMEM or virtual_memory().total 

1310 if not total_phymem > 0: 

1311 # we should never get here 

1312 msg = ( 

1313 "can't calculate process memory percent because total physical" 

1314 f" system memory is not positive ({total_phymem!r})" 

1315 ) 

1316 raise ValueError(msg) 

1317 return (value / float(total_phymem)) * 100 

1318 

1319 if hasattr(_psplatform.Process, "memory_maps"): 

1320 

1321 @_use_prefetch 

1322 def memory_maps( 

1323 self, grouped: bool = True 

1324 ) -> list[pmmap_grouped] | list[pmmap_ext]: 

1325 """Return process mapped memory regions as a list of named 

1326 tuples whose fields are variable depending on the platform. 

1327 

1328 If *grouped* is True the mapped regions with the same 'path' 

1329 are grouped together and the different memory fields are summed. 

1330 

1331 If *grouped* is False every mapped region is shown as a single 

1332 entity and the named tuple will also include the mapped region's 

1333 address space ('addr') and permission set ('perms'). 

1334 """ 

1335 

1336 it = self._proc.memory_maps() 

1337 if grouped: 

1338 d = {} 

1339 for tupl in it: 

1340 path = tupl[2] 

1341 nums = tupl[3:] 

1342 try: 

1343 d[path] = list(map(lambda x, y: x + y, d[path], nums)) 

1344 except KeyError: 

1345 d[path] = nums 

1346 return [_ntp.pmmap_grouped(path, *d[path]) for path in d] 

1347 else: 

1348 return [_ntp.pmmap_ext(*x) for x in it] 

1349 

1350 @_use_prefetch 

1351 def page_faults(self) -> ppagefaults: 

1352 """Return the number of page faults for this process as a 

1353 `(minor, major)` named tuple. 

1354 

1355 - `minor` (a.k.a. *soft* faults): occur when a memory page is 

1356 not currently mapped into the process address space, but is 

1357 already present in physical RAM (e.g. a shared library page 

1358 loaded by another process). The kernel resolves these without 

1359 disk I/O. 

1360 

1361 - `major` (a.k.a. *hard* faults): occur when the page must be 

1362 fetched from disk. These are expensive because they stall the 

1363 process until I/O completes. 

1364 

1365 Both counters are cumulative since process creation. 

1366 """ 

1367 return self._proc.page_faults() 

1368 

1369 @_use_prefetch 

1370 def open_files(self) -> list[popenfile]: 

1371 """Return files opened by process as a list of `(path, fd)` 

1372 named tuples including the absolute file name and file 

1373 descriptor number. 

1374 

1375 On Linux the named tuple also includes `position`, `mode` and 

1376 `flags` fields. 

1377 """ 

1378 return self._proc.open_files() 

1379 

1380 @_use_prefetch 

1381 def net_connections(self, kind: str = "inet") -> list[pconn]: 

1382 """Return socket connections opened by process as a list of 

1383 `(fd, family, type, laddr, raddr, status)` named tuples. 

1384 

1385 The *kind* parameter filters for connections that match the 

1386 following criteria: 

1387 

1388 +------------+----------------------------------------------------+ 

1389 | Kind Value | Connections using | 

1390 +------------+----------------------------------------------------+ 

1391 | 'inet' | IPv4 and IPv6 | 

1392 | 'inet4' | IPv4 | 

1393 | 'inet6' | IPv6 | 

1394 | 'tcp' | TCP | 

1395 | 'tcp4' | TCP over IPv4 | 

1396 | 'tcp6' | TCP over IPv6 | 

1397 | 'udp' | UDP | 

1398 | 'udp4' | UDP over IPv4 | 

1399 | 'udp6' | UDP over IPv6 | 

1400 | 'unix' | UNIX socket (both UDP and TCP protocols) | 

1401 | 'all' | the sum of all the possible families and protocols | 

1402 +------------+----------------------------------------------------+ 

1403 """ 

1404 _check_conn_kind(kind) 

1405 return self._proc.net_connections(kind) 

1406 

1407 @_common.deprecated_method(replacement="net_connections") 

1408 def connections(self, kind="inet") -> list[pconn]: 

1409 return self.net_connections(kind=kind) 

1410 

1411 # --- signals 

1412 

1413 if POSIX: 

1414 

1415 def _send_signal(self, sig): 

1416 assert not self.pid < 0, self.pid 

1417 self._raise_if_pid_reused() 

1418 

1419 pid, ppid, name = self.pid, self._ppid, self._name 

1420 if pid == 0: 

1421 # see "man 2 kill" 

1422 msg = ( 

1423 "preventing sending signal to process with PID 0 as it " 

1424 "would affect every process in the process group of the " 

1425 "calling process (os.getpid()) instead of PID 0" 

1426 ) 

1427 raise ValueError(msg) 

1428 try: 

1429 os.kill(pid, sig) 

1430 except ProcessLookupError as err: 

1431 if OPENBSD and pid_exists(pid): 

1432 # We do this because os.kill() lies in case of 

1433 # zombie processes. 

1434 raise ZombieProcess(pid, name, ppid) from err 

1435 self._gone = True 

1436 raise NoSuchProcess(pid, name) from err 

1437 except PermissionError as err: 

1438 raise AccessDenied(pid, name) from err 

1439 

1440 def send_signal(self, sig: int) -> None: 

1441 """Send a signal *sig* to process, preemptively checking 

1442 whether PID has been reused (see signal module constants). 

1443 

1444 On Windows only SIGTERM, CTRL_C_EVENT and CTRL_BREAK_EVENT 

1445 are valid. SIGTERM is treated as an alias for `kill()`. 

1446 """ 

1447 if POSIX: 

1448 self._send_signal(sig) 

1449 else: # pragma: no cover 

1450 self._raise_if_pid_reused() 

1451 if sig != signal.SIGTERM and not self.is_running(): 

1452 msg = "process no longer exists" 

1453 raise NoSuchProcess(self.pid, self._name, msg=msg) 

1454 self._proc.send_signal(sig) 

1455 

1456 def suspend(self) -> None: 

1457 """Suspend process execution with SIGSTOP preemptively checking 

1458 whether PID has been reused. 

1459 

1460 On Windows this has the effect of suspending all process threads. 

1461 """ 

1462 if POSIX: 

1463 self._send_signal(signal.SIGSTOP) 

1464 else: # pragma: no cover 

1465 self._raise_if_pid_reused() 

1466 self._proc.suspend() 

1467 

1468 def resume(self) -> None: 

1469 """Resume process execution with SIGCONT preemptively checking 

1470 whether PID has been reused. 

1471 

1472 On Windows this has the effect of resuming all process threads. 

1473 """ 

1474 if POSIX: 

1475 self._send_signal(signal.SIGCONT) 

1476 else: # pragma: no cover 

1477 self._raise_if_pid_reused() 

1478 self._proc.resume() 

1479 

1480 def terminate(self) -> None: 

1481 """Terminate the process with SIGTERM preemptively checking 

1482 whether PID has been reused. 

1483 

1484 On Windows this is an alias for `kill()`. 

1485 """ 

1486 if POSIX: 

1487 self._send_signal(signal.SIGTERM) 

1488 else: # pragma: no cover 

1489 self._raise_if_pid_reused() 

1490 self._proc.kill() 

1491 

1492 def kill(self) -> None: 

1493 """Kill the current process with SIGKILL preemptively checking 

1494 whether PID has been reused. 

1495 """ 

1496 if POSIX: 

1497 self._send_signal(signal.SIGKILL) 

1498 else: # pragma: no cover 

1499 self._raise_if_pid_reused() 

1500 self._proc.kill() 

1501 

1502 def wait(self, timeout: float | None = None) -> int | None: 

1503 """Wait for process to terminate, and if process is a child 

1504 of os.getpid(), also return its exit code, else None. 

1505 

1506 On Windows there's no such limitation (exit code is always 

1507 returned). 

1508 

1509 If the process is already terminated, immediately return None 

1510 instead of raising `NoSuchProcess`. 

1511 

1512 If *timeout* (in seconds) is specified and process is still 

1513 alive, raise `TimeoutExpired`. 

1514 

1515 If *timeout=0* either return immediately or raise 

1516 `TimeoutExpired` (non-blocking). 

1517 

1518 To wait for multiple Process objects use `psutil.wait_procs()`. 

1519 """ 

1520 if self.pid == 0: 

1521 msg = "can't wait for PID 0" 

1522 raise ValueError(msg) 

1523 if timeout is not None: 

1524 if not isinstance(timeout, (int, float)): 

1525 msg = f"timeout must be an int or float (got {type(timeout)})" 

1526 raise TypeError(msg) 

1527 if timeout < 0: 

1528 msg = f"timeout must be positive or zero (got {timeout})" 

1529 raise ValueError(msg) 

1530 

1531 if self._exitcode is not _SENTINEL: 

1532 return self._exitcode 

1533 

1534 try: 

1535 self._exitcode = self._proc.wait(timeout) 

1536 except TimeoutExpired as err: 

1537 exc = TimeoutExpired(timeout, pid=self.pid, name=self._name) 

1538 raise exc from err 

1539 

1540 return self._exitcode 

1541 

1542 

1543# The valid attr names which can be processed by Process.as_dict(attrs=...) 

1544# and process_iter(attrs=...). 

1545# fmt: off 

1546Process.attrs = frozenset( 

1547 x for x in dir(Process) if not x.startswith("_") and x not in 

1548 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait', 

1549 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit', 

1550 'connections', 'memory_full_info', 'oneshot', 'info', 'attrs'} 

1551) 

1552# fmt: on 

1553 

1554 

1555# ===================================================================== 

1556# --- Popen class 

1557# ===================================================================== 

1558 

1559 

1560class Popen(Process): 

1561 """Same as `subprocess.Popen`, but in addition it provides all 

1562 `Process` methods in a single class. 

1563 

1564 For the following methods which are common to both classes, psutil 

1565 implementation takes precedence: 

1566 

1567 * `send_signal()` 

1568 * `terminate()` 

1569 * `kill()` 

1570 

1571 This is done in order to avoid killing another process in case its 

1572 PID has been reused, fixing BPO-6973. 

1573 

1574 >>> import psutil 

1575 >>> from subprocess import PIPE 

1576 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE) 

1577 >>> p.name() 

1578 'python3' 

1579 >>> p.uids() 

1580 user(real=1000, effective=1000, saved=1000) 

1581 >>> p.username() 

1582 'giampaolo' 

1583 >>> p.communicate() 

1584 ('hi', None) 

1585 >>> p.terminate() 

1586 >>> p.wait(timeout=2) 

1587 0 

1588 >>> 

1589 """ 

1590 

1591 def __init__(self, *args, **kwargs): 

1592 # Explicitly avoid to raise NoSuchProcess in case the process 

1593 # spawned by subprocess.Popen terminates too quickly, see: 

1594 # https://github.com/giampaolo/psutil/issues/193 

1595 self.__subproc = subprocess.Popen(*args, **kwargs) 

1596 self._init(self.__subproc.pid, _ignore_nsp=True) 

1597 

1598 def __dir__(self): 

1599 return sorted(set(dir(Popen) + dir(subprocess.Popen))) 

1600 

1601 def __enter__(self) -> Popen: 

1602 if hasattr(self.__subproc, '__enter__'): 

1603 self.__subproc.__enter__() 

1604 return self 

1605 

1606 def __exit__(self, *args, **kwargs): 

1607 if hasattr(self.__subproc, '__exit__'): 

1608 return self.__subproc.__exit__(*args, **kwargs) 

1609 else: 

1610 if self.stdout: 

1611 self.stdout.close() 

1612 if self.stderr: 

1613 self.stderr.close() 

1614 try: 

1615 # Flushing a BufferedWriter may raise an error. 

1616 if self.stdin: 

1617 self.stdin.close() 

1618 finally: 

1619 # Wait for the process to terminate, to avoid zombies. 

1620 self.wait() 

1621 

1622 def __getattribute__(self, name): 

1623 try: 

1624 return object.__getattribute__(self, name) 

1625 except AttributeError: 

1626 try: 

1627 return object.__getattribute__(self.__subproc, name) 

1628 except AttributeError: 

1629 msg = f"{self.__class__!r} has no attribute {name!r}" 

1630 raise AttributeError(msg) from None 

1631 

1632 def wait(self, timeout: float | None = None) -> int | None: 

1633 if self.__subproc.returncode is not None: 

1634 return self.__subproc.returncode 

1635 ret = super().wait(timeout) 

1636 self.__subproc.returncode = ret 

1637 return ret 

1638 

1639 

1640# ===================================================================== 

1641# --- system processes related functions 

1642# ===================================================================== 

1643 

1644 

1645def pids() -> list[int]: 

1646 """Return a list of current running PIDs.""" 

1647 global _LOWEST_PID 

1648 ret = sorted(_psplatform.pids()) 

1649 _LOWEST_PID = ret[0] 

1650 return ret 

1651 

1652 

1653def pid_exists(pid: int) -> bool: 

1654 """Return True if *pid* exists in the current process list. 

1655 

1656 This is faster than doing `pid in psutil.pids()` and should be 

1657 preferred. 

1658 """ 

1659 if pid < 0: 

1660 return False 

1661 elif pid == 0 and POSIX: 

1662 # On POSIX we use os.kill() to determine PID existence. 

1663 # According to "man 2 kill" PID 0 has a special meaning 

1664 # though: it refers to <<every process in the process 

1665 # group of the calling process>> and that is not we want 

1666 # to do here. 

1667 return pid in pids() 

1668 else: 

1669 return _psplatform.pid_exists(pid) 

1670 

1671 

1672_pmap = {} 

1673_pids_reused = set() 

1674 

1675 

1676def process_iter( 

1677 attrs: Collection[str] | None = None, ad_value: Any = None 

1678) -> Iterator[Process]: 

1679 """Return a generator yielding a `Process` instance for all 

1680 running processes. 

1681 

1682 Every new `Process` instance is only created once and then cached 

1683 into an internal table which is updated every time this is used. 

1684 Cache can optionally be cleared via `process_iter.cache_clear()`. 

1685 

1686 The sorting order in which processes are yielded is based on 

1687 their PIDs. 

1688 

1689 *attrs* and *ad_value* have the same meaning as in 

1690 `Process.as_dict()`. 

1691 

1692 If *attrs* is specified, `Process.as_dict()` is called and the 

1693 results are cached, so that subsequent method calls (e.g. 

1694 `p.name()`) return cached values. Use `attrs=Process.attrs` to 

1695 retrieve all process info (slow). 

1696 

1697 If a method raises `AccessDenied` during pre-fetch, it will return 

1698 *ad_value* (default None) instead of raising. 

1699 """ 

1700 global _pmap 

1701 

1702 def add(pid): 

1703 proc = Process(pid) 

1704 pmap[proc.pid] = proc 

1705 return proc 

1706 

1707 def remove(pid): 

1708 pmap.pop(pid, None) 

1709 

1710 if attrs is not None: 

1711 if attrs == []: # deprecated in 8.0.0 

1712 msg = ( 

1713 "process_iter(attrs=[]) is deprecated; use " 

1714 "process_iter(attrs=Process.attrs) to retrieve all attributes" 

1715 ) 

1716 warnings.warn(msg, DeprecationWarning, stacklevel=2) 

1717 elif not attrs: 

1718 # as_dict() will resolve an empty list|tuple|set to "all 

1719 # attribute names", but it's ambiguous and should be 

1720 # signaled. 

1721 msg = ( 

1722 f"process_iter(attrs={attrs}) is ambiguous; use " 

1723 "process_iter(attrs=Process.attrs) to retrieve all attributes" 

1724 ) 

1725 warnings.warn(msg, UserWarning, stacklevel=2) 

1726 

1727 pmap = _pmap.copy() 

1728 a = set(pids()) 

1729 b = set(pmap) 

1730 new_pids = a - b 

1731 gone_pids = b - a 

1732 for pid in gone_pids: 

1733 remove(pid) 

1734 while _pids_reused: 

1735 pid = _pids_reused.pop() 

1736 debug(f"refreshing Process instance for reused PID {pid}") 

1737 remove(pid) 

1738 try: 

1739 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items())) 

1740 for pid, proc in ls: 

1741 try: 

1742 if proc is None: # new process 

1743 proc = add(pid) 

1744 proc._prefetch = {} # clear cache 

1745 if attrs is not None: 

1746 proc._prefetch = proc.as_dict( 

1747 attrs=attrs, ad_value=ad_value 

1748 ) 

1749 yield proc 

1750 except NoSuchProcess: 

1751 remove(pid) 

1752 finally: 

1753 _pmap = pmap 

1754 

1755 

1756process_iter.cache_clear = lambda: _pmap.clear() # noqa: PLW0108 

1757process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache." 

1758 

1759 

1760def wait_procs( 

1761 procs: list[Process], 

1762 timeout: float | None = None, 

1763 callback: Callable[[Process], None] | None = None, 

1764) -> tuple[list[Process], list[Process]]: 

1765 """Convenience function which waits for a list of processes to 

1766 terminate. 

1767 

1768 Return a `(gone, alive)` tuple indicating which processes 

1769 are gone and which ones are still alive. 

1770 

1771 The gone ones will have a new `returncode` attribute indicating 

1772 process exit status (may be None). 

1773 

1774 *callback* is a function which gets called every time a process 

1775 terminates (a `Process` instance is passed as callback argument). 

1776 

1777 Function will return as soon as all processes terminate or when 

1778 *timeout* occurs. 

1779 

1780 Differently from `Process.wait()` it will not raise `TimeoutExpired` if 

1781 *timeout* occurs. 

1782 

1783 Typical use case is: 

1784 

1785 - send SIGTERM to a list of processes 

1786 - give them some time to terminate 

1787 - send SIGKILL to those ones which are still alive 

1788 

1789 Example: 

1790 

1791 >>> def on_terminate(proc): 

1792 ... print("process {} terminated".format(proc)) 

1793 ... 

1794 >>> for p in procs: 

1795 ... p.terminate() 

1796 ... 

1797 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate) 

1798 >>> for p in alive: 

1799 ... p.kill() 

1800 """ 

1801 

1802 def check_gone(proc, timeout): 

1803 try: 

1804 returncode = proc.wait(timeout=timeout) 

1805 except (TimeoutExpired, subprocess.TimeoutExpired): 

1806 pass 

1807 else: 

1808 if returncode is not None or not proc.is_running(): 

1809 # Set new Process instance attribute. 

1810 proc.returncode = returncode 

1811 gone.add(proc) 

1812 if callback is not None: 

1813 callback(proc) 

1814 

1815 if timeout is not None and not timeout >= 0: 

1816 msg = f"timeout must be a positive integer, got {timeout}" 

1817 raise ValueError(msg) 

1818 if callback is not None and not callable(callback): 

1819 msg = f"callback {callback!r} is not a callable" 

1820 raise TypeError(msg) 

1821 

1822 gone = set() 

1823 alive = set(procs) 

1824 if timeout is not None: 

1825 deadline = _timer() + timeout 

1826 

1827 while alive: 

1828 if timeout is not None and timeout <= 0: 

1829 break 

1830 for proc in alive: 

1831 # Make sure that every complete iteration (all processes) 

1832 # will last max 1 sec. 

1833 # We do this because we don't want to wait too long on a 

1834 # single process: in case it terminates too late other 

1835 # processes may disappear in the meantime and their PID 

1836 # reused. 

1837 max_timeout = 1.0 / len(alive) 

1838 if timeout is not None: 

1839 timeout = min((deadline - _timer()), max_timeout) 

1840 if timeout <= 0: 

1841 break 

1842 check_gone(proc, timeout) 

1843 else: 

1844 check_gone(proc, max_timeout) 

1845 alive = alive - gone # noqa: PLR6104 

1846 

1847 if alive: 

1848 # Last attempt over processes survived so far. 

1849 # timeout == 0 won't make this function wait any further. 

1850 for proc in alive: 

1851 check_gone(proc, 0) 

1852 alive = alive - gone # noqa: PLR6104 

1853 

1854 return (list(gone), list(alive)) 

1855 

1856 

1857# ===================================================================== 

1858# --- CPU related functions 

1859# ===================================================================== 

1860 

1861 

1862def cpu_count(logical: bool = True) -> int | None: 

1863 """Return the number of logical CPUs in the system (same as 

1864 `os.cpu_count()`). 

1865 

1866 If *logical* is False return the number of physical cores only 

1867 (e.g. hyper thread CPUs are excluded). 

1868 

1869 Return None if undetermined. 

1870 

1871 The return value is cached after first call. 

1872 If desired cache can be cleared like this: 

1873 

1874 >>> psutil.cpu_count.cache_clear() 

1875 """ 

1876 if logical: 

1877 ret = _psplatform.cpu_count_logical() 

1878 else: 

1879 ret = _psplatform.cpu_count_cores() 

1880 if ret is not None and ret < 1: 

1881 ret = None 

1882 return ret 

1883 

1884 

1885def cpu_times(percpu: bool = False) -> scputimes | list[scputimes]: 

1886 """Return system-wide CPU times as a named tuple. 

1887 

1888 Every CPU time represents the seconds the CPU has spent in the 

1889 given mode: 

1890 

1891 - `user` 

1892 - `system` 

1893 - `idle` 

1894 - `nice` (UNIX) 

1895 - `iowait` (Linux) 

1896 - `irq` (Linux, FreeBSD) 

1897 - `softirq` (Linux) 

1898 - `steal` (Linux) 

1899 - `guest` (Linux) 

1900 - `guest_nice` (Linux) 

1901 - `dpc` (Windows) 

1902 

1903 When *percpu* is True return a list of named tuples for each 

1904 logical CPU. First element of the list refers to first CPU, second 

1905 element to second CPU and so on. The order of the list is 

1906 consistent across calls. 

1907 """ 

1908 if not percpu: 

1909 return _psplatform.cpu_times() 

1910 else: 

1911 return _psplatform.per_cpu_times() 

1912 

1913 

1914try: 

1915 _last_cpu_times = {threading.current_thread().ident: cpu_times()} 

1916except Exception: # noqa: BLE001 

1917 # Don't want to crash at import time. 

1918 _last_cpu_times = {} 

1919 

1920try: 

1921 _last_per_cpu_times = { 

1922 threading.current_thread().ident: cpu_times(percpu=True) 

1923 } 

1924except Exception: # noqa: BLE001 

1925 # Don't want to crash at import time. 

1926 _last_per_cpu_times = {} 

1927 

1928 

1929def _cpu_tot_time(times): 

1930 """Given a `cpu_time()` named tuple calculates the total CPU time 

1931 (including idle time). 

1932 """ 

1933 tot = sum(times) 

1934 if LINUX: 

1935 # On Linux guest times are already accounted in "user" or 

1936 # "nice" times, so we subtract them from total. 

1937 # Htop does the same. References: 

1938 # https://github.com/giampaolo/psutil/pull/940 

1939 # http://unix.stackexchange.com/questions/178045 

1940 # https://github.com/torvalds/linux/blob/447976ef4/kernel/sched/cputime.c#L158 

1941 tot -= times.guest 

1942 tot -= times.guest_nice 

1943 return tot 

1944 

1945 

1946def _cpu_busy_time(times): 

1947 """Given a `cpu_time()` named tuple calculates the busy CPU time by 

1948 subtracting all idle CPU times. 

1949 """ 

1950 busy = _cpu_tot_time(times) 

1951 busy -= times.idle 

1952 # Linux: "iowait" is time during which the CPU does not do anything 

1953 # (waits for IO to complete). On Linux IO wait is *not* accounted 

1954 # in "idle" time so we subtract it. Htop does the same. 

1955 # References: 

1956 # https://github.com/torvalds/linux/blob/447976ef4/kernel/sched/cputime.c#L244 

1957 busy -= getattr(times, "iowait", 0) 

1958 return busy 

1959 

1960 

1961def _cpu_times_deltas(t1, t2): 

1962 assert t1._fields == t2._fields, (t1, t2) 

1963 field_deltas = [] 

1964 for field in _ntp.scputimes._fields: 

1965 field_delta = getattr(t2, field) - getattr(t1, field) 

1966 # CPU times are always supposed to increase over time 

1967 # or at least remain the same and that's because time 

1968 # cannot go backwards. 

1969 # Surprisingly sometimes this might not be the case (at 

1970 # least on Windows and Linux), see: 

1971 # https://github.com/giampaolo/psutil/issues/392 

1972 # https://github.com/giampaolo/psutil/issues/645 

1973 # https://github.com/giampaolo/psutil/issues/1210 

1974 # Trim negative deltas to zero to ignore decreasing fields. 

1975 # top does the same. Reference: 

1976 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063 

1977 field_delta = max(0, field_delta) 

1978 field_deltas.append(field_delta) 

1979 return _ntp.scputimes(*field_deltas) 

1980 

1981 

1982def cpu_percent( 

1983 interval: float | None = None, percpu: bool = False 

1984) -> float | list[float]: 

1985 """Return a float representing the current system-wide CPU 

1986 utilization as a percentage. 

1987 

1988 When *interval* is > 0.0 compares system CPU times elapsed before 

1989 and after the interval (blocking). 

1990 

1991 When *interval* is 0.0 or None compares system CPU times elapsed 

1992 since last call or module import, returning immediately (non 

1993 blocking). That means the first time this is called it will 

1994 return a meaningless 0.0 value which you should ignore. 

1995 In this case is recommended for accuracy that this function be 

1996 called with at least 0.1 seconds between calls. 

1997 

1998 When *percpu* is True returns a list of floats representing the 

1999 utilization as a percentage for each CPU. 

2000 First element of the list refers to first CPU, second element 

2001 to second CPU and so on. 

2002 The order of the list is consistent across calls. 

2003 

2004 Examples: 

2005 

2006 >>> # blocking, system-wide 

2007 >>> psutil.cpu_percent(interval=1) 

2008 2.0 

2009 >>> 

2010 >>> # blocking, per-cpu 

2011 >>> psutil.cpu_percent(interval=1, percpu=True) 

2012 [2.0, 1.0] 

2013 >>> 

2014 >>> # non-blocking (percentage since last call) 

2015 >>> psutil.cpu_percent(interval=None) 

2016 2.9 

2017 >>> 

2018 """ 

2019 tid = threading.current_thread().ident 

2020 blocking = interval is not None and interval > 0.0 

2021 if interval is not None and interval < 0: 

2022 msg = f"interval is not positive (got {interval})" 

2023 raise ValueError(msg) 

2024 

2025 def calculate(t1, t2): 

2026 times_delta = _cpu_times_deltas(t1, t2) 

2027 all_delta = _cpu_tot_time(times_delta) 

2028 busy_delta = _cpu_busy_time(times_delta) 

2029 

2030 try: 

2031 busy_perc = (busy_delta / all_delta) * 100 

2032 except ZeroDivisionError: 

2033 return 0.0 

2034 else: 

2035 return round(busy_perc, 1) 

2036 

2037 # system-wide usage 

2038 if not percpu: 

2039 if blocking: 

2040 t1 = cpu_times() 

2041 time.sleep(interval) 

2042 else: 

2043 t1 = _last_cpu_times.get(tid) or cpu_times() 

2044 _last_cpu_times[tid] = cpu_times() 

2045 return calculate(t1, _last_cpu_times[tid]) 

2046 # per-cpu usage 

2047 else: 

2048 ret = [] 

2049 if blocking: 

2050 tot1 = cpu_times(percpu=True) 

2051 time.sleep(interval) 

2052 else: 

2053 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True) 

2054 _last_per_cpu_times[tid] = cpu_times(percpu=True) 

2055 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]): 

2056 ret.append(calculate(t1, t2)) 

2057 return ret 

2058 

2059 

2060# Use a separate dict for cpu_times_percent(), so it's independent from 

2061# cpu_percent() and they can both be used within the same program. 

2062_last_cpu_times_2 = _last_cpu_times.copy() 

2063_last_per_cpu_times_2 = _last_per_cpu_times.copy() 

2064 

2065 

2066def cpu_times_percent( 

2067 interval: float | None = None, percpu: bool = False 

2068) -> scputimes | list[scputimes]: 

2069 """Same as `cpu_percent()`, but provides utilization percentages 

2070 for each specific CPU time as is returned by `cpu_times()`. 

2071 

2072 For instance, on Linux we'll get: 

2073 

2074 >>> cpu_times_percent() 

2075 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0, 

2076 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) 

2077 >>> 

2078 

2079 *interval* and *percpu* arguments have the same meaning as in 

2080 `cpu_percent()`. 

2081 """ 

2082 tid = threading.current_thread().ident 

2083 blocking = interval is not None and interval > 0.0 

2084 if interval is not None and interval < 0: 

2085 msg = f"interval is not positive (got {interval!r})" 

2086 raise ValueError(msg) 

2087 

2088 def calculate(t1, t2): 

2089 nums = [] 

2090 times_delta = _cpu_times_deltas(t1, t2) 

2091 all_delta = _cpu_tot_time(times_delta) 

2092 # "scale" is the value to multiply each delta with to get percentages. 

2093 # We use "max" to avoid division by zero (if all_delta is 0, then all 

2094 # fields are 0 so percentages will be 0 too. all_delta cannot be a 

2095 # fraction because cpu times are integers) 

2096 scale = 100.0 / max(1, all_delta) 

2097 for field_delta in times_delta: 

2098 field_perc = field_delta * scale 

2099 field_perc = round(field_perc, 1) 

2100 # make sure we don't return negative values or values over 100% 

2101 field_perc = min(max(0.0, field_perc), 100.0) 

2102 nums.append(field_perc) 

2103 return _ntp.scputimes(*nums) 

2104 

2105 # system-wide usage 

2106 if not percpu: 

2107 if blocking: 

2108 t1 = cpu_times() 

2109 time.sleep(interval) 

2110 else: 

2111 t1 = _last_cpu_times_2.get(tid) or cpu_times() 

2112 _last_cpu_times_2[tid] = cpu_times() 

2113 return calculate(t1, _last_cpu_times_2[tid]) 

2114 # per-cpu usage 

2115 else: 

2116 ret = [] 

2117 if blocking: 

2118 tot1 = cpu_times(percpu=True) 

2119 time.sleep(interval) 

2120 else: 

2121 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True) 

2122 _last_per_cpu_times_2[tid] = cpu_times(percpu=True) 

2123 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]): 

2124 ret.append(calculate(t1, t2)) 

2125 return ret 

2126 

2127 

2128def cpu_stats() -> scpustats: 

2129 """Return CPU statistics.""" 

2130 return _psplatform.cpu_stats() 

2131 

2132 

2133if hasattr(_psplatform, "cpu_freq"): 

2134 

2135 def cpu_freq(percpu: bool = False) -> scpufreq | list[scpufreq] | None: 

2136 """Return CPU frequency as a named tuple including current, 

2137 min and max frequency expressed in Mhz. 

2138 

2139 If *percpu* is True and the system supports per-cpu frequency 

2140 retrieval (Linux and FreeBSD), a list of frequencies is 

2141 returned for each CPU. If not, a list with one element is 

2142 returned. 

2143 """ 

2144 ret = _psplatform.cpu_freq() 

2145 if percpu: 

2146 return ret 

2147 else: 

2148 num_cpus = float(len(ret)) 

2149 if num_cpus == 0: 

2150 return None 

2151 elif num_cpus == 1: 

2152 return ret[0] 

2153 else: 

2154 currs, mins, maxs = 0.0, 0.0, 0.0 

2155 set_none = False 

2156 for cpu in ret: 

2157 currs += cpu.current 

2158 # On Linux if /proc/cpuinfo is used min/max are set 

2159 # to None. 

2160 if LINUX and cpu.min is None: 

2161 set_none = True 

2162 continue 

2163 mins += cpu.min 

2164 maxs += cpu.max 

2165 

2166 current = currs / num_cpus 

2167 

2168 if set_none: 

2169 min_ = max_ = None 

2170 else: 

2171 min_ = mins / num_cpus 

2172 max_ = maxs / num_cpus 

2173 

2174 return _ntp.scpufreq(current, min_, max_) 

2175 

2176 __all__.append("cpu_freq") 

2177 

2178 

2179def getloadavg() -> tuple[float, float, float]: 

2180 """Return the average system load over the last 1, 5 and 15 minutes 

2181 as a tuple. 

2182 

2183 On Windows this is emulated by using a Windows API that spawns a 

2184 thread which keeps running in background and updates results every 

2185 5 seconds, mimicking the UNIX behavior. 

2186 """ 

2187 if hasattr(os, "getloadavg"): 

2188 return os.getloadavg() 

2189 else: 

2190 return _psplatform.getloadavg() 

2191 

2192 

2193# ===================================================================== 

2194# --- system memory related functions 

2195# ===================================================================== 

2196 

2197 

2198def virtual_memory() -> svmem: 

2199 """Return statistics about system memory usage as a named tuple. 

2200 

2201 The fields vary by platform (see official doc), but the following 

2202 are present on all platforms: 

2203 

2204 - total: 

2205 total physical memory available 

2206 

2207 - available: 

2208 the memory that can be given instantly to processes without the 

2209 system going into swap. 

2210 This is calculated by summing different memory values depending 

2211 on the platform and it is supposed to be used to monitor actual 

2212 memory usage in a cross platform fashion. 

2213 

2214 - percent: 

2215 the percentage usage calculated as `(total - available) / total * 100` 

2216 

2217 - used: 

2218 memory used, calculated differently depending on the platform and 

2219 designed for informational purposes only 

2220 

2221 - free: 

2222 memory not being used at all (zeroed) that is readily available; 

2223 note that this doesn't reflect the actual memory available 

2224 (use 'available' instead) 

2225 

2226 The sum of `used` and `available` does not necessarily equal `total`. 

2227 

2228 On Windows `available` and `free` are the same. 

2229 """ 

2230 global _TOTAL_PHYMEM 

2231 ret = _psplatform.virtual_memory() 

2232 # cached for later use in Process.memory_percent() 

2233 _TOTAL_PHYMEM = ret.total 

2234 return ret 

2235 

2236 

2237def swap_memory() -> sswap: 

2238 """Return system swap memory statistics as a named tuple including 

2239 the following fields: 

2240 

2241 - total: total swap memory in bytes 

2242 - used: used swap memory in bytes 

2243 - free: free swap memory in bytes 

2244 - percent: the percentage usage 

2245 - sin: no. of bytes the system has swapped in from disk (cumulative) 

2246 - sout: no. of bytes the system has swapped out from disk (cumulative) 

2247 

2248 `sin` and `sout` on Windows are meaningless and always set to 0. 

2249 """ 

2250 return _psplatform.swap_memory() 

2251 

2252 

2253# ===================================================================== 

2254# --- disks/partitions related functions 

2255# ===================================================================== 

2256 

2257 

2258def disk_usage(path: str) -> sdiskusage: 

2259 """Return disk usage statistics about the given *path* as a 

2260 named tuple including total, used and free space expressed in bytes 

2261 plus the percentage usage. 

2262 """ 

2263 return _psplatform.disk_usage(path) 

2264 

2265 

2266def disk_partitions(all: bool = False) -> list[sdiskpart]: 

2267 """Return mounted partitions as a list of 

2268 (device, mountpoint, fstype, opts) named tuple. 

2269 

2270 `opts` field is a raw string separated by commas indicating mount 

2271 options which may vary depending on the platform. 

2272 

2273 If *all* parameter is False return physical devices only and ignore 

2274 all others. 

2275 """ 

2276 return _psplatform.disk_partitions(all) 

2277 

2278 

2279def disk_io_counters( 

2280 perdisk: bool = False, nowrap: bool = True 

2281) -> sdiskio | dict[str, sdiskio]: 

2282 """Return system disk I/O statistics as a named tuple including 

2283 the following fields: 

2284 

2285 - read_count: number of reads 

2286 - write_count: number of writes 

2287 - read_bytes: number of bytes read 

2288 - write_bytes: number of bytes written 

2289 - read_time: (not NetBSD, OpenBSD) time spent reading from 

2290 disk (in ms) 

2291 - write_time: (not NetBSD, OpenBSD) time spent writing to 

2292 disk (in ms) 

2293 

2294 Platform specific: 

2295 

2296 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms) 

2297 - read_merged_count (Linux): number of merged reads 

2298 - write_merged_count (Linux): number of merged writes 

2299 

2300 If *perdisk* is True return the same information for every 

2301 physical disk as a dictionary with partition names as the keys. 

2302 

2303 If *nowrap* is True (default), counters that overflow and wrap to 

2304 zero are automatically adjusted so they never decrease (this can 

2305 happen on very busy or long-lived systems). 

2306 `disk_io_counters.cache_clear()` can be used to invalidate the 

2307 *nowrap* cache. 

2308 """ 

2309 kwargs = dict(perdisk=perdisk) if LINUX else {} 

2310 rawdict = _psplatform.disk_io_counters(**kwargs) 

2311 if not rawdict: 

2312 return {} if perdisk else None 

2313 if nowrap: 

2314 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters') 

2315 if perdisk: 

2316 for disk, fields in rawdict.items(): 

2317 rawdict[disk] = _ntp.sdiskio(*fields) 

2318 return rawdict 

2319 else: 

2320 return _ntp.sdiskio(*(sum(x) for x in zip(*rawdict.values()))) 

2321 

2322 

2323disk_io_counters.cache_clear = functools.partial( 

2324 _wrap_numbers.cache_clear, 'psutil.disk_io_counters' 

2325) 

2326disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" 

2327 

2328 

2329# ===================================================================== 

2330# --- network related functions 

2331# ===================================================================== 

2332 

2333 

2334def net_io_counters( 

2335 pernic: bool = False, nowrap: bool = True 

2336) -> snetio | dict[str, snetio] | None: 

2337 """Return network I/O statistics as a named tuple including 

2338 the following fields: 

2339 

2340 - bytes_sent: number of bytes sent 

2341 - bytes_recv: number of bytes received 

2342 - packets_sent: number of packets sent 

2343 - packets_recv: number of packets received 

2344 - errin: total number of errors while receiving 

2345 - errout: total number of errors while sending 

2346 - dropin: total number of incoming packets which were dropped 

2347 - dropout: total number of outgoing packets which were dropped 

2348 (always 0 on macOS and BSD) 

2349 

2350 If *pernic* is True return the same information for every 

2351 network interface as a dictionary with interface names as the 

2352 keys. 

2353 

2354 If *nowrap* is True (default), counters that overflow and wrap to 

2355 zero are automatically adjusted so they never decrease (this can 

2356 happen on very busy or long-lived systems). 

2357 `net_io_counters.cache_clear()` can be used to invalidate the 

2358 *nowrap* cache. 

2359 """ 

2360 rawdict = _psplatform.net_io_counters() 

2361 if not rawdict: 

2362 return {} if pernic else None 

2363 if nowrap: 

2364 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters') 

2365 if pernic: 

2366 for nic, fields in rawdict.items(): 

2367 rawdict[nic] = _ntp.snetio(*fields) 

2368 return rawdict 

2369 else: 

2370 return _ntp.snetio(*[sum(x) for x in zip(*rawdict.values())]) 

2371 

2372 

2373net_io_counters.cache_clear = functools.partial( 

2374 _wrap_numbers.cache_clear, 'psutil.net_io_counters' 

2375) 

2376net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" 

2377 

2378 

2379def net_connections(kind: str = 'inet') -> list[sconn]: 

2380 """Return system-wide socket connections as a list of 

2381 (fd, family, type, laddr, raddr, status, pid) named tuples. 

2382 

2383 In case of limited privileges `fd` and `pid` may be set to -1 

2384 and None respectively. 

2385 

2386 The *kind* parameter filters for connections that fit the 

2387 following criteria: 

2388 

2389 +------------+----------------------------------------------------+ 

2390 | Kind Value | Connections using | 

2391 +------------+----------------------------------------------------+ 

2392 | 'inet' | IPv4 and IPv6 | 

2393 | 'inet4' | IPv4 | 

2394 | 'inet6' | IPv6 | 

2395 | 'tcp' | TCP | 

2396 | 'tcp4' | TCP over IPv4 | 

2397 | 'tcp6' | TCP over IPv6 | 

2398 | 'udp' | UDP | 

2399 | 'udp4' | UDP over IPv4 | 

2400 | 'udp6' | UDP over IPv6 | 

2401 | 'unix' | UNIX socket (both UDP and TCP protocols) | 

2402 | 'all' | the sum of all the possible families and protocols | 

2403 +------------+----------------------------------------------------+ 

2404 

2405 On macOS this function requires root privileges. 

2406 """ 

2407 _check_conn_kind(kind) 

2408 return _psplatform.net_connections(kind) 

2409 

2410 

2411def net_if_addrs() -> dict[str, list[snicaddr]]: 

2412 """Return a dictionary mapping each NIC (Network Interface Card) to 

2413 a list of named tuples representing its addresses. Multiple 

2414 addresses of the same family can exist per interface. 

2415 

2416 The named tuple includes 5 fields (addresses may be None): 

2417 

2418 - family: the address family, either `AF_INET`, `AF_INET6`, 

2419 `psutil.AF_LINK` (a MAC address) or `AF_UNSPEC` (a virtual or 

2420 unconfigured NIC). 

2421 - address: the primary NIC address 

2422 - netmask: the netmask address 

2423 - broadcast: the broadcast address; always None on Windows 

2424 - ptp: a "point to point" address (typically a VPN); always None on 

2425 Windows 

2426 """ 

2427 rawlist = _psplatform.net_if_addrs() 

2428 rawlist.sort(key=lambda x: x[1]) # sort by family 

2429 ret = collections.defaultdict(list) 

2430 for name, fam, addr, mask, broadcast, ptp in rawlist: 

2431 try: 

2432 fam = socket.AddressFamily(fam) 

2433 except ValueError: 

2434 if WINDOWS and fam == -1: 

2435 fam = _psplatform.AF_LINK 

2436 elif ( 

2437 hasattr(_psplatform, "AF_LINK") and fam == _psplatform.AF_LINK 

2438 ): 

2439 # Linux defines AF_LINK as an alias for AF_PACKET. 

2440 # We re-set the family here so that repr(family) 

2441 # will show AF_LINK rather than AF_PACKET 

2442 fam = _psplatform.AF_LINK 

2443 

2444 if fam == _psplatform.AF_LINK: 

2445 # The underlying C function may return an incomplete MAC 

2446 # address in which case we fill it with null bytes, see: 

2447 # https://github.com/giampaolo/psutil/issues/786 

2448 separator = ":" if POSIX else "-" 

2449 while addr.count(separator) < 5: 

2450 addr += f"{separator}00" 

2451 

2452 nt = _ntp.snicaddr(fam, addr, mask, broadcast, ptp) 

2453 

2454 # On Windows broadcast is None, so we determine it via 

2455 # ipaddress module. 

2456 if WINDOWS and fam in {socket.AF_INET, socket.AF_INET6}: 

2457 try: 

2458 broadcast = _common.broadcast_addr(nt) 

2459 except Exception as err: # noqa: BLE001 

2460 debug(err) 

2461 else: 

2462 if broadcast is not None: 

2463 nt._replace(broadcast=broadcast) 

2464 

2465 ret[name].append(nt) 

2466 

2467 return dict(ret) 

2468 

2469 

2470def net_if_stats() -> dict[str, snicstats]: 

2471 """Return information about each NIC (network interface card) 

2472 installed on the system as a dictionary whose keys are the 

2473 NIC names and value is a named tuple with the following fields: 

2474 

2475 - isup: whether the interface is up (bool) 

2476 - duplex: can be either `NIC_DUPLEX_FULL`, `NIC_DUPLEX_HALF` or 

2477 `NIC_DUPLEX_UNKNOWN` 

2478 - speed: the NIC speed expressed in mega bits (MB); if it can't 

2479 be determined (e.g. 'localhost') it will be set to 0. 

2480 - mtu: the maximum transmission unit expressed in bytes. 

2481 - flags: a string of comma-separated flags on the interface. 

2482 """ 

2483 return _psplatform.net_if_stats() 

2484 

2485 

2486# ===================================================================== 

2487# --- sensors 

2488# ===================================================================== 

2489 

2490 

2491# Linux, macOS 

2492if hasattr(_psplatform, "sensors_temperatures"): 

2493 

2494 def sensors_temperatures( 

2495 fahrenheit: bool = False, 

2496 ) -> dict[str, list[shwtemp]]: 

2497 """Return hardware temperatures. 

2498 

2499 Each entry is a named tuple representing a certain hardware 

2500 sensor (it may be a CPU, an hard disk or something else, 

2501 depending on the OS and its configuration). 

2502 

2503 All temperatures are expressed in celsius unless *fahrenheit* 

2504 is set to True. 

2505 """ 

2506 

2507 def convert(n): 

2508 if n is not None: 

2509 return (float(n) * 9 / 5) + 32 if fahrenheit else n 

2510 

2511 ret = collections.defaultdict(list) 

2512 rawdict = _psplatform.sensors_temperatures() 

2513 

2514 for name, values in rawdict.items(): 

2515 while values: 

2516 label, current, high, critical = values.pop(0) 

2517 current = convert(current) 

2518 high = convert(high) 

2519 critical = convert(critical) 

2520 

2521 if high and not critical: 

2522 critical = high 

2523 elif critical and not high: 

2524 high = critical 

2525 

2526 ret[name].append(_ntp.shwtemp(label, current, high, critical)) 

2527 

2528 return dict(ret) 

2529 

2530 __all__.append("sensors_temperatures") 

2531 

2532 

2533# Linux 

2534if hasattr(_psplatform, "sensors_fans"): 

2535 

2536 def sensors_fans() -> dict[str, list[sfan]]: 

2537 """Return fans speed. Each entry is a named tuple 

2538 representing a certain hardware sensor. 

2539 All speed are expressed in RPM (rounds per minute). 

2540 """ 

2541 return _psplatform.sensors_fans() 

2542 

2543 __all__.append("sensors_fans") 

2544 

2545 

2546# Linux, Windows, FreeBSD, macOS 

2547if hasattr(_psplatform, "sensors_battery"): 

2548 

2549 def sensors_battery() -> sbattery | None: 

2550 """Return battery information. If no battery is installed 

2551 returns None. 

2552 

2553 - percent: battery power left as a percentage. 

2554 - secsleft: a rough approximation of how many seconds are left 

2555 before the battery runs out of power. May be 

2556 `POWER_TIME_UNLIMITED` or `POWER_TIME_UNKNOWN`. 

2557 - power_plugged: True if the AC power cable is connected. 

2558 """ 

2559 return _psplatform.sensors_battery() 

2560 

2561 __all__.append("sensors_battery") 

2562 

2563 

2564# ===================================================================== 

2565# --- other system related functions 

2566# ===================================================================== 

2567 

2568 

2569def boot_time() -> float: 

2570 """Return the system boot time expressed in seconds since the epoch 

2571 (seconds since January 1, 1970, at midnight UTC). 

2572 

2573 The returned value is based on the system clock, which means it may 

2574 be affected by changes such as manual adjustments or time 

2575 synchronization (e.g. NTP). 

2576 """ 

2577 return _psplatform.boot_time() 

2578 

2579 

2580def users() -> list[suser]: 

2581 """Return users currently connected on the system as a list of 

2582 named tuples including the following fields: 

2583 

2584 - user: the name of the user 

2585 - terminal: the tty or pseudo-tty associated with the user, if any. 

2586 - host: the host name associated with the entry, if any. 

2587 - started: the creation time as a floating point number expressed in 

2588 seconds since the epoch. 

2589 - pid: the PID of the login process (None on Windows and OpenBSD). 

2590 """ 

2591 return _psplatform.users() 

2592 

2593 

2594# ===================================================================== 

2595# --- Windows services 

2596# ===================================================================== 

2597 

2598 

2599if WINDOWS: 

2600 

2601 def win_service_iter() -> Iterator[WindowsService]: 

2602 """Return a generator yielding a `WindowsService` instance for 

2603 all Windows services installed. 

2604 """ 

2605 return _psplatform.win_service_iter() 

2606 

2607 def win_service_get(name) -> WindowsService: 

2608 """Get a Windows service by *name*. 

2609 

2610 Raise `NoSuchProcess` if no service with such name exists. 

2611 """ 

2612 return _psplatform.win_service_get(name) 

2613 

2614 

2615# ===================================================================== 

2616# --- malloc / heap 

2617# ===================================================================== 

2618 

2619 

2620# Linux + glibc, Windows, macOS, FreeBSD, NetBSD 

2621if hasattr(_psplatform, "heap_info"): 

2622 

2623 def heap_info() -> pheap: 

2624 """Return low-level heap statistics from the C heap allocator 

2625 (glibc). 

2626 

2627 - `heap_used`: the total number of bytes allocated via 

2628 malloc/free. These are typically allocations smaller than 

2629 MMAP_THRESHOLD. 

2630 

2631 - `mmap_used`: the total number of bytes allocated via `mmap()` 

2632 or via large ``malloc()`` allocations. 

2633 

2634 - `heap_count` (Windows only): number of private heaps created 

2635 via `HeapCreate()`. 

2636 """ 

2637 return _ntp.pheap(*_psplatform.heap_info()) 

2638 

2639 def heap_trim() -> None: 

2640 """Request that the underlying allocator free any unused memory 

2641 it's holding in the heap (typically small `malloc()` 

2642 allocations). 

2643 

2644 In practice, modern allocators rarely comply, so this is not a 

2645 general-purpose memory-reduction tool and won't meaningfully 

2646 shrink RSS in real programs. Its primary value is in **leak 

2647 detection tools**. 

2648 

2649 Calling `heap_trim()` before taking measurements helps reduce 

2650 allocator noise, giving you a cleaner baseline so that changes 

2651 in `heap_used` come from the code you're testing, not from 

2652 internal allocator caching or fragmentation. Its effectiveness 

2653 depends on allocator behavior and fragmentation patterns. 

2654 """ 

2655 _psplatform.heap_trim() 

2656 

2657 __all__.append("heap_info") 

2658 __all__.append("heap_trim") 

2659 

2660 

2661# ===================================================================== 

2662 

2663 

2664def _set_debug(value): 

2665 """Enable or disable `PSUTIL_DEBUG` option, which prints debugging 

2666 messages to stderr. 

2667 """ 

2668 import psutil._common 

2669 

2670 psutil._common.PSUTIL_DEBUG = bool(value) 

2671 _psplatform.cext.set_debug(bool(value)) 

2672 

2673 

2674del memoize_when_activated