Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/psutil-8.0.0-py3.11-linux-x86_64.egg/psutil/__init__.py: 25%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1055 statements  

1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. 

2# Use of this source code is governed by a BSD-style license that can be 

3# found in the LICENSE file. 

4 

5"""psutil is a cross-platform library for retrieving information on 

6running processes and system utilization (CPU, memory, disks, network, 

7sensors) in Python. Supported platforms: 

8 

9 - Linux 

10 - Windows 

11 - macOS 

12 - FreeBSD 

13 - OpenBSD 

14 - NetBSD 

15 - Sun Solaris 

16 - AIX 

17 

18Supported Python versions are cPython 3.7+ and PyPy. 

19""" 

20 

21from __future__ import annotations 

22 

23import collections 

24import contextlib 

25import datetime 

26import functools 

27import os 

28import signal 

29import socket 

30import subprocess 

31import sys 

32import threading 

33import time 

34import warnings 

35from typing import TYPE_CHECKING as _TYPE_CHECKING 

36 

37try: 

38 import pwd 

39except ImportError: 

40 pwd = None 

41 

42from . import _common 

43from . import _ntuples as _ntp 

44from ._common import AIX 

45from ._common import BSD 

46from ._common import FREEBSD 

47from ._common import LINUX 

48from ._common import MACOS 

49from ._common import NETBSD 

50from ._common import OPENBSD 

51from ._common import OSX # deprecated alias 

52from ._common import POSIX 

53from ._common import SUNOS 

54from ._common import WINDOWS 

55from ._common import AccessDenied 

56from ._common import Error 

57from ._common import NoSuchProcess 

58from ._common import TimeoutExpired 

59from ._common import ZombieProcess 

60from ._common import debug 

61from ._common import memoize_when_activated 

62from ._common import wrap_numbers as _wrap_numbers 

63from ._enums import BatteryTime 

64from ._enums import ConnectionStatus 

65from ._enums import NicDuplex 

66from ._enums import ProcessStatus 

67 

68if _TYPE_CHECKING: 

69 from typing import Any 

70 from typing import Callable 

71 from typing import Generator 

72 from typing import Iterator 

73 

74 from ._ntuples import pconn 

75 from ._ntuples import pcputimes 

76 from ._ntuples import pctxsw 

77 from ._ntuples import pfootprint 

78 from ._ntuples import pfullmem 

79 from ._ntuples import pgids 

80 from ._ntuples import pheap 

81 from ._ntuples import pio 

82 from ._ntuples import pionice 

83 from ._ntuples import pmem 

84 from ._ntuples import pmem_ex 

85 from ._ntuples import pmmap_ext 

86 from ._ntuples import pmmap_grouped 

87 from ._ntuples import popenfile 

88 from ._ntuples import ppagefaults 

89 from ._ntuples import pthread 

90 from ._ntuples import puids 

91 from ._ntuples import sbattery 

92 from ._ntuples import sconn 

93 from ._ntuples import scpufreq 

94 from ._ntuples import scpustats 

95 from ._ntuples import scputimes 

96 from ._ntuples import sdiskio 

97 from ._ntuples import sdiskpart 

98 from ._ntuples import sdiskusage 

99 from ._ntuples import sfan 

100 from ._ntuples import shwtemp 

101 from ._ntuples import snetio 

102 from ._ntuples import snicaddr 

103 from ._ntuples import snicstats 

104 from ._ntuples import sswap 

105 from ._ntuples import suser 

106 from ._ntuples import svmem 

107 from ._pswindows import WindowsService 

108 

109 

110if LINUX: 

111 # This is public API and it will be retrieved from _pslinux.py 

112 # via sys.modules. 

113 PROCFS_PATH = "/proc" 

114 

115 from . import _pslinux as _psplatform 

116 from ._enums import ProcessIOPriority 

117 from ._enums import ProcessRlimit 

118 

119elif WINDOWS: 

120 from . import _pswindows as _psplatform 

121 from ._enums import ProcessIOPriority 

122 from ._enums import ProcessPriority 

123 

124elif MACOS: 

125 from . import _psosx as _psplatform 

126 

127elif BSD: 

128 from . import _psbsd as _psplatform 

129 

130 if FREEBSD: 

131 from ._enums import ProcessRlimit 

132 

133elif SUNOS: 

134 from . import _pssunos as _psplatform 

135 

136 # This is public writable API which is read from _pslinux.py and 

137 # _pssunos.py via sys.modules. 

138 PROCFS_PATH = "/proc" 

139 

140elif AIX: 

141 from . import _psaix as _psplatform 

142 

143 # This is public API and it will be retrieved from _pslinux.py 

144 # via sys.modules. 

145 PROCFS_PATH = "/proc" 

146 

147else: # pragma: no cover 

148 msg = f"platform {sys.platform} is not supported" 

149 raise NotImplementedError(msg) 

150 

151 

152# fmt: off 

153__all__ = [ 

154 # exceptions 

155 "Error", "NoSuchProcess", "ZombieProcess", "AccessDenied", 

156 "TimeoutExpired", 

157 

158 # constants 

159 "version_info", "__version__", 

160 

161 "AF_LINK", 

162 

163 "BSD", "FREEBSD", "LINUX", "NETBSD", "OPENBSD", "MACOS", "OSX", "POSIX", 

164 "SUNOS", "WINDOWS", "AIX", 

165 

166 # classes 

167 "Process", "Popen", 

168 

169 # functions 

170 "pid_exists", "pids", "process_iter", "wait_procs", # proc 

171 "virtual_memory", "swap_memory", # memory 

172 "cpu_times", "cpu_percent", "cpu_times_percent", "cpu_count", # cpu 

173 "cpu_stats", "getloadavg", # "cpu_freq", 

174 "net_io_counters", "net_connections", "net_if_addrs", # network 

175 "net_if_stats", 

176 "disk_io_counters", "disk_partitions", "disk_usage", # disk 

177 # "sensors_temperatures", "sensors_battery", "sensors_fans" # sensors 

178 "users", "boot_time", # others 

179] 

180# fmt: on 

181 

182__all__.extend(_psplatform.__extra__all__) 

183_globals = globals() 

184 

185 

186def _export_enum(cls): 

187 __all__.append(cls.__name__) 

188 for name, member in cls.__members__.items(): 

189 if name not in _globals: # noqa: F821 

190 _globals[name] = member # noqa: F821 

191 __all__.append(name) 

192 

193 

194# Populate global namespace with enums and CONSTANTs. 

195_export_enum(ProcessStatus) 

196_export_enum(ConnectionStatus) 

197_export_enum(NicDuplex) 

198_export_enum(BatteryTime) 

199if LINUX or WINDOWS: 

200 _export_enum(ProcessIOPriority) 

201if WINDOWS: 

202 _export_enum(ProcessPriority) 

203if LINUX or FREEBSD: 

204 _export_enum(ProcessRlimit) 

205if LINUX or SUNOS or AIX: 

206 __all__.append("PROCFS_PATH") 

207 

208del _globals, _export_enum 

209 

210AF_LINK = _psplatform.AF_LINK 

211 

212__author__ = "Giampaolo Rodola'" 

213__version__ = "8.0.0" 

214version_info = tuple(int(num) for num in __version__.split('.')) 

215 

216_timer = getattr(time, 'monotonic', time.time) 

217_TOTAL_PHYMEM = None 

218_LOWEST_PID = None 

219_SENTINEL = object() 

220 

221# Sanity check in case the user messed up with psutil installation 

222# or did something weird with sys.path. In this case we might end 

223# up importing a python module using a C extension module which 

224# was compiled for a different version of psutil. 

225# We want to prevent that by failing sooner rather than later. 

226# See: https://github.com/giampaolo/psutil/issues/564 

227if int(__version__.replace('.', '')) != getattr( 

228 _psplatform.cext, 'version', None 

229): 

230 msg = f"version conflict: {_psplatform.cext.__file__!r} C extension " 

231 msg += "module was built for another version of psutil" 

232 if hasattr(_psplatform.cext, 'version'): 

233 v = ".".join(list(str(_psplatform.cext.version))) 

234 msg += f" ({v} instead of {__version__})" 

235 else: 

236 msg += f" (different than {__version__})" 

237 what = getattr( 

238 _psplatform.cext, 

239 "__file__", 

240 "the existing psutil install directory", 

241 ) 

242 msg += f"; you may try to 'pip uninstall psutil', manually remove {what}" 

243 msg += " or clean the virtual env somehow, then reinstall" 

244 raise ImportError(msg) 

245 

246 

247# ===================================================================== 

248# --- Utils 

249# ===================================================================== 

250 

251 

252if hasattr(_psplatform, 'ppid_map'): 

253 # Faster version (Windows and Linux). 

254 _ppid_map = _psplatform.ppid_map 

255else: # pragma: no cover 

256 

257 def _ppid_map(): 

258 """Return a {pid: ppid, ...} dict for all running processes in 

259 one shot. Used to speed up Process.children(). 

260 """ 

261 ret = {} 

262 for pid in pids(): 

263 try: 

264 ret[pid] = _psplatform.Process(pid).ppid() 

265 except (NoSuchProcess, ZombieProcess): 

266 pass 

267 return ret 

268 

269 

270def _pprint_secs(secs): 

271 """Format seconds in a human readable form.""" 

272 now = time.time() 

273 secs_ago = int(now - secs) 

274 fmt = "%H:%M:%S" if secs_ago < 60 * 60 * 24 else "%Y-%m-%d %H:%M:%S" 

275 return datetime.datetime.fromtimestamp(secs).strftime(fmt) 

276 

277 

278def _check_conn_kind(kind): 

279 """Check net_connections()'s `kind` parameter.""" 

280 kinds = tuple(_common.conn_tmap) 

281 if kind not in kinds: 

282 msg = f"invalid kind argument {kind!r}; valid ones are: {kinds}" 

283 raise ValueError(msg) 

284 

285 

286# ===================================================================== 

287# --- Process class 

288# ===================================================================== 

289 

290 

291def _use_prefetch(method): 

292 """Decorator returning cached values from process_iter(attrs=...). 

293 

294 When process_iter() is called with an *attrs* argument, it 

295 pre-fetches the requested attributes via as_dict() and stores 

296 them in Process._prefetch. This decorator makes the decorated 

297 method return the cached value (if present) instead of issuing 

298 a new system call. 

299 """ 

300 

301 @functools.wraps(method) 

302 def wrapper(self, *args, **kwargs): 

303 if not args and not kwargs: 

304 try: 

305 return self._prefetch[method.__name__] 

306 except KeyError: 

307 pass 

308 return method(self, *args, **kwargs) 

309 

310 return wrapper 

311 

312 

313class Process: 

314 """Represents an OS process with the given PID. 

315 If PID is omitted current process PID (os.getpid()) is used. 

316 Raise NoSuchProcess if PID does not exist. 

317 

318 Note that most of the methods of this class do not make sure that 

319 the PID of the process being queried has been reused. That means 

320 that you may end up retrieving information for another process. 

321 

322 The only exceptions for which process identity is pre-emptively 

323 checked and guaranteed are: 

324 

325 - parent() 

326 - children() 

327 - nice() (set) 

328 - ionice() (set) 

329 - rlimit() (set) 

330 - cpu_affinity (set) 

331 - suspend() 

332 - resume() 

333 - send_signal() 

334 - terminate() 

335 - kill() 

336 

337 To prevent this problem for all other methods you can use 

338 is_running() before querying the process. 

339 """ 

340 

341 def __init__(self, pid: int | None = None) -> None: 

342 self._init(pid) 

343 

344 def _init(self, pid, _ignore_nsp=False): 

345 if pid is None: 

346 pid = os.getpid() 

347 else: 

348 if pid < 0: 

349 msg = f"pid must be a positive integer (got {pid})" 

350 raise ValueError(msg) 

351 try: 

352 _psplatform.cext.check_pid_range(pid) 

353 except OverflowError as err: 

354 msg = "process PID out of range" 

355 raise NoSuchProcess(pid, msg=msg) from err 

356 

357 self._pid = pid 

358 self._name = None 

359 self._exe = None 

360 self._create_time = None 

361 self._gone = False 

362 self._pid_reused = False 

363 self._hash = None 

364 self._lock = threading.RLock() 

365 # used for caching on Windows only (on POSIX ppid may change) 

366 self._ppid = None 

367 # platform-specific modules define an _psplatform.Process 

368 # implementation class 

369 self._proc = _psplatform.Process(pid) 

370 self._last_sys_cpu_times = None 

371 self._last_proc_cpu_times = None 

372 self._exitcode = _SENTINEL 

373 self._prefetch = {} 

374 self._ident = (self.pid, None) 

375 try: 

376 self._ident = self._get_ident() 

377 except AccessDenied: 

378 # This should happen on Windows only, since we use the fast 

379 # create time method. AFAIK, on all other platforms we are 

380 # able to get create time for all PIDs. 

381 pass 

382 except ZombieProcess: 

383 # Zombies can still be queried by this class (although 

384 # not always) and pids() return them so just go on. 

385 pass 

386 except NoSuchProcess: 

387 if not _ignore_nsp: 

388 msg = "process PID not found" 

389 raise NoSuchProcess(pid, msg=msg) from None 

390 self._gone = True 

391 

392 def _get_ident(self): 

393 """Return a (pid, uid) tuple which is supposed to identify a 

394 Process instance univocally over time. The PID alone is not 

395 enough, as it can be assigned to a new process after this one 

396 terminates, so we add process creation time to the mix. We need 

397 this in order to prevent killing the wrong process later on. 

398 This is also known as PID reuse or PID recycling problem. 

399 

400 The reliability of this strategy mostly depends on 

401 create_time() precision, which is 0.01 secs on Linux. The 

402 assumption is that, after a process terminates, the kernel 

403 won't reuse the same PID after such a short period of time 

404 (0.01 secs). Technically this is inherently racy, but 

405 practically it should be good enough. 

406 

407 NOTE: unreliable on FreeBSD and OpenBSD as ctime is subject to 

408 system clock updates. 

409 """ 

410 

411 if WINDOWS: 

412 # Use create_time() fast method in order to speedup 

413 # `process_iter()`. This means we'll get AccessDenied for 

414 # most ADMIN processes, but that's fine since it means 

415 # we'll also get AccessDenied on kill(). 

416 # https://github.com/giampaolo/psutil/issues/2366#issuecomment-2381646555 

417 self._create_time = self._proc.create_time(fast_only=True) 

418 return (self.pid, self._create_time) 

419 elif LINUX or NETBSD or OSX: 

420 # Use 'monotonic' process starttime since boot to form unique 

421 # process identity, since it is stable over changes to system 

422 # time. 

423 return (self.pid, self._proc.create_time(monotonic=True)) 

424 else: 

425 return (self.pid, self.create_time()) 

426 

427 def __str__(self): 

428 info = {} 

429 info["pid"] = self.pid 

430 with self.oneshot(): 

431 if self._pid_reused: 

432 info["status"] = "terminated + PID reused" 

433 else: 

434 try: 

435 info["name"] = self._name or self.name() 

436 info["status"] = str(self.status()) 

437 except ZombieProcess: 

438 info["status"] = "zombie" 

439 except NoSuchProcess: 

440 info["status"] = "terminated" 

441 except AccessDenied: 

442 pass 

443 

444 if self._exitcode not in {_SENTINEL, None}: 

445 info["exitcode"] = self._exitcode 

446 if self._create_time is not None: 

447 info['started'] = _pprint_secs(self._create_time) 

448 

449 return "{}.{}({})".format( 

450 self.__class__.__module__, 

451 self.__class__.__name__, 

452 ", ".join([f"{k}={v!r}" for k, v in info.items()]), 

453 ) 

454 

455 __repr__ = __str__ 

456 

457 def __eq__(self, other): 

458 # Test for equality with another Process object based 

459 # on PID and creation time. 

460 if not isinstance(other, Process): 

461 return NotImplemented 

462 if OPENBSD or NETBSD or SUNOS: # pragma: no cover 

463 # Zombie processes on Open/NetBSD/illumos/Solaris have a 

464 # creation time of 0.0. This covers the case when a process 

465 # started normally (so it has a ctime), then it turned into a 

466 # zombie. It's important to do this because is_running() 

467 # depends on __eq__. 

468 pid1, ident1 = self._ident 

469 pid2, ident2 = other._ident 

470 if pid1 == pid2: 

471 if ident1 and not ident2: 

472 try: 

473 return self.status() == ProcessStatus.STATUS_ZOMBIE 

474 except Error: 

475 pass 

476 return self._ident == other._ident 

477 

478 def __ne__(self, other): 

479 return not self == other 

480 

481 def __hash__(self): 

482 if self._hash is None: 

483 self._hash = hash(self._ident) 

484 return self._hash 

485 

486 def _raise_if_pid_reused(self): 

487 """Raises NoSuchProcess in case process PID has been reused.""" 

488 if self._pid_reused or (not self.is_running() and self._pid_reused): 

489 # We may directly raise NSP in here already if PID is just 

490 # not running, but I prefer NSP to be raised naturally by 

491 # the actual Process API call. This way unit tests will tell 

492 # us if the API is broken (aka don't raise NSP when it 

493 # should). We also remain consistent with all other "get" 

494 # APIs which don't use _raise_if_pid_reused(). 

495 msg = "process no longer exists and its PID has been reused" 

496 raise NoSuchProcess(self.pid, self._name, msg=msg) 

497 

498 @property 

499 def pid(self) -> int: 

500 """The process PID.""" 

501 return self._pid 

502 

503 # DEPRECATED 

504 @property 

505 def info(self) -> dict: 

506 """Return pre-fetched process_iter() info dict. 

507 

508 Deprecated: use method calls instead (e.g. p.name()). 

509 """ 

510 msg = ( 

511 "Process.info is deprecated; use method calls instead" 

512 " (e.g. p.name() instead of p.info['name'])" 

513 ) 

514 warnings.warn(msg, DeprecationWarning, stacklevel=2) 

515 # Return a copy to prevent the user from mutating the dict and 

516 # corrupting the prefetch cache. 

517 return self._prefetch.copy() 

518 

519 # --- utility methods 

520 

521 @contextlib.contextmanager 

522 def oneshot(self) -> Generator[None, None, None]: 

523 """Utility context manager which considerably speeds up the 

524 retrieval of multiple process information at the same time. 

525 

526 Internally different process info (e.g. name, ppid, uids, 

527 gids, ...) may be fetched by using the same routine, but 

528 only one information is returned and the others are discarded. 

529 When using this context manager the internal routine is 

530 executed once (in the example below on name()) and the 

531 other info are cached. 

532 

533 The cache is cleared when exiting the context manager block. 

534 The advice is to use this every time you retrieve more than 

535 one information about the process. If you're lucky, you'll 

536 get a hell of a speedup. 

537 

538 >>> import psutil 

539 >>> p = psutil.Process() 

540 >>> with p.oneshot(): 

541 ... p.name() # collect multiple info 

542 ... p.cpu_times() # return cached value 

543 ... p.cpu_percent() # return cached value 

544 ... p.create_time() # return cached value 

545 ... 

546 >>> 

547 """ 

548 with self._lock: 

549 if hasattr(self, "_cache"): 

550 # NOOP: this covers the use case where the user enters the 

551 # context twice: 

552 # 

553 # >>> with p.oneshot(): 

554 # ... with p.oneshot(): 

555 # ... 

556 # 

557 # Also, since as_dict() internally uses oneshot() 

558 # I expect that the code below will be a pretty common 

559 # "mistake" that the user will make, so let's guard 

560 # against that: 

561 # 

562 # >>> with p.oneshot(): 

563 # ... p.as_dict() 

564 # ... 

565 yield 

566 else: 

567 try: 

568 # cached in case cpu_percent() is used 

569 self.cpu_times.cache_activate(self) 

570 # cached in case memory_percent() is used 

571 self.memory_info.cache_activate(self) 

572 # cached in case parent() is used 

573 self.ppid.cache_activate(self) 

574 # cached in case username() is used 

575 if POSIX: 

576 self.uids.cache_activate(self) 

577 # specific implementation cache 

578 self._proc.oneshot_enter() 

579 yield 

580 finally: 

581 self.cpu_times.cache_deactivate(self) 

582 self.memory_info.cache_deactivate(self) 

583 self.ppid.cache_deactivate(self) 

584 if POSIX: 

585 self.uids.cache_deactivate(self) 

586 self._proc.oneshot_exit() 

587 

588 def as_dict( 

589 self, attrs: list[str] | None = None, ad_value: Any = None 

590 ) -> dict[str, Any]: 

591 """Utility method returning process information as a 

592 hashable dictionary. 

593 If *attrs* is specified it must be a list of strings 

594 reflecting available Process class' attribute names 

595 (e.g. ['cpu_times', 'name']) else all public (read 

596 only) attributes are assumed. 

597 *ad_value* is the value which gets assigned in case 

598 AccessDenied or ZombieProcess exception is raised when 

599 retrieving that particular process information. 

600 """ 

601 valid_names = _as_dict_attrnames 

602 if attrs is not None: 

603 if not isinstance(attrs, (list, tuple, set, frozenset)): 

604 msg = f"invalid attrs type {type(attrs)}" 

605 raise TypeError(msg) 

606 attrs = set(attrs) 

607 invalid_names = attrs - valid_names - _as_dict_attrnames_deprecated 

608 if invalid_names: 

609 msg = "invalid attr name{} {}".format( 

610 "s" if len(invalid_names) > 1 else "", 

611 ", ".join(map(repr, invalid_names)), 

612 ) 

613 raise ValueError(msg) 

614 

615 retdict = {} 

616 ls = attrs or valid_names 

617 with self.oneshot(): 

618 for name in ls: 

619 try: 

620 if name == 'pid': 

621 ret = self.pid 

622 else: 

623 meth = getattr(self, name) 

624 ret = meth() 

625 except (AccessDenied, ZombieProcess): 

626 ret = ad_value 

627 except NotImplementedError: 

628 # in case of not implemented functionality (may happen 

629 # on old or exotic systems) we want to crash only if 

630 # the user explicitly asked for that particular attr 

631 if attrs: 

632 raise 

633 continue 

634 retdict[name] = ret 

635 return retdict 

636 

637 def parent(self) -> Process | None: 

638 """Return the parent process as a Process object pre-emptively 

639 checking whether PID has been reused. 

640 If no parent is known return None. 

641 """ 

642 lowest_pid = _LOWEST_PID if _LOWEST_PID is not None else pids()[0] 

643 if self.pid == lowest_pid: 

644 return None 

645 ppid = self.ppid() 

646 if ppid is not None: 

647 # Get a fresh (non-cached) ctime in case the system clock 

648 # was updated. TODO: use a monotonic ctime on platforms 

649 # where it's supported. 

650 proc_ctime = Process(self.pid).create_time() 

651 try: 

652 parent = Process(ppid) 

653 if parent.create_time() <= proc_ctime: 

654 return parent 

655 # ...else ppid has been reused by another process 

656 except NoSuchProcess: 

657 pass 

658 

659 def parents(self) -> list[Process]: 

660 """Return the parents of this process as a list of Process 

661 instances. If no parents are known return an empty list. 

662 """ 

663 parents = [] 

664 proc = self.parent() 

665 while proc is not None: 

666 parents.append(proc) 

667 proc = proc.parent() 

668 return parents 

669 

670 def is_running(self) -> bool: 

671 """Return whether this process is running. 

672 

673 It also checks if PID has been reused by another process, in 

674 which case it will remove the process from `process_iter()` 

675 internal cache and return False. 

676 """ 

677 if self._gone or self._pid_reused: 

678 return False 

679 try: 

680 # Checking if PID is alive is not enough as the PID might 

681 # have been reused by another process. Process identity / 

682 # uniqueness over time is guaranteed by (PID + creation 

683 # time) and that is verified in __eq__. 

684 self._pid_reused = self != Process(self.pid) 

685 if self._pid_reused: 

686 _pids_reused.add(self.pid) 

687 raise NoSuchProcess(self.pid) 

688 return True 

689 except ZombieProcess: 

690 # We should never get here as it's already handled in 

691 # Process.__init__; here just for extra safety. 

692 return True 

693 except NoSuchProcess: 

694 self._gone = True 

695 return False 

696 

697 # --- actual API 

698 

699 @_use_prefetch 

700 @memoize_when_activated 

701 def ppid(self) -> int: 

702 """The process parent PID. 

703 On Windows the return value is cached after first call. 

704 """ 

705 # On POSIX we don't want to cache the ppid as it may unexpectedly 

706 # change to 1 (init) in case this process turns into a zombie: 

707 # https://github.com/giampaolo/psutil/issues/321 

708 # http://stackoverflow.com/questions/356722/ 

709 

710 # XXX should we check creation time here rather than in 

711 # Process.parent()? 

712 self._raise_if_pid_reused() 

713 if POSIX: 

714 return self._proc.ppid() 

715 else: # pragma: no cover 

716 self._ppid = self._ppid or self._proc.ppid() 

717 return self._ppid 

718 

719 @_use_prefetch 

720 def name(self) -> str: 

721 """The process name. The return value is cached after first call.""" 

722 # Process name is only cached on Windows as on POSIX it may 

723 # change, see: 

724 # https://github.com/giampaolo/psutil/issues/692 

725 if WINDOWS and self._name is not None: 

726 return self._name 

727 name = self._proc.name() 

728 if POSIX and len(name) >= 15: 

729 # On UNIX the name gets truncated to the first 15 characters. 

730 # If it matches the first part of the cmdline we return that 

731 # one instead because it's usually more explicative. 

732 # Examples are "gnome-keyring-d" vs. "gnome-keyring-daemon". 

733 try: 

734 cmdline = self.cmdline() 

735 except (AccessDenied, ZombieProcess): 

736 # Just pass and return the truncated name: it's better 

737 # than nothing. Note: there are actual cases where a 

738 # zombie process can return a name() but not a 

739 # cmdline(), see: 

740 # https://github.com/giampaolo/psutil/issues/2239 

741 pass 

742 else: 

743 if cmdline: 

744 extended_name = os.path.basename(cmdline[0]) 

745 if extended_name.startswith(name): 

746 name = extended_name 

747 self._name = name 

748 self._proc._name = name 

749 return name 

750 

751 @_use_prefetch 

752 def exe(self) -> str: 

753 """The process executable as an absolute path. 

754 May also be an empty string. 

755 The return value is cached after first call. 

756 """ 

757 

758 def guess_it(fallback): 

759 # try to guess exe from cmdline[0] in absence of a native 

760 # exe representation 

761 cmdline = self.cmdline() 

762 if cmdline and hasattr(os, 'access') and hasattr(os, 'X_OK'): 

763 exe = cmdline[0] # the possible exe 

764 # Attempt to guess only in case of an absolute path. 

765 # It is not safe otherwise as the process might have 

766 # changed cwd. 

767 if ( 

768 os.path.isabs(exe) 

769 and os.path.isfile(exe) 

770 and os.access(exe, os.X_OK) 

771 ): 

772 return exe 

773 if isinstance(fallback, AccessDenied): 

774 raise fallback 

775 return fallback 

776 

777 if self._exe is None: 

778 try: 

779 exe = self._proc.exe() 

780 except AccessDenied as err: 

781 return guess_it(fallback=err) 

782 else: 

783 if not exe: 

784 # underlying implementation can legitimately return an 

785 # empty string; if that's the case we don't want to 

786 # raise AD while guessing from the cmdline 

787 try: 

788 exe = guess_it(fallback=exe) 

789 except AccessDenied: 

790 pass 

791 self._exe = exe 

792 return self._exe 

793 

794 @_use_prefetch 

795 def cmdline(self) -> list[str]: 

796 """The command line this process has been called with.""" 

797 return self._proc.cmdline() 

798 

799 @_use_prefetch 

800 def status(self) -> ProcessStatus | str: 

801 """The process current status as a STATUS_* constant.""" 

802 try: 

803 return self._proc.status() 

804 except ZombieProcess: 

805 return ProcessStatus.STATUS_ZOMBIE 

806 

807 @_use_prefetch 

808 def username(self) -> str: 

809 """The name of the user that owns the process. 

810 On UNIX this is calculated by using *real* process uid. 

811 """ 

812 if POSIX: 

813 if pwd is None: 

814 # might happen if python was installed from sources 

815 msg = "requires pwd module shipped with standard python" 

816 raise ImportError(msg) 

817 real_uid = self.uids().real 

818 try: 

819 return pwd.getpwuid(real_uid).pw_name 

820 except KeyError: 

821 # the uid can't be resolved by the system 

822 return str(real_uid) 

823 else: 

824 return self._proc.username() 

825 

826 @_use_prefetch 

827 def create_time(self) -> float: 

828 """The process creation time as a floating point number 

829 expressed in seconds since the epoch (seconds since January 1, 

830 1970, at midnight UTC). The return value, which is cached after 

831 first call, is based on the system clock, which means it may be 

832 affected by changes such as manual adjustments or time 

833 synchronization (e.g. NTP). 

834 """ 

835 if self._create_time is None: 

836 self._create_time = self._proc.create_time() 

837 return self._create_time 

838 

839 @_use_prefetch 

840 def cwd(self) -> str: 

841 """Process current working directory as an absolute path.""" 

842 return self._proc.cwd() 

843 

844 @_use_prefetch 

845 def nice(self, value: int | None = None) -> int | None: 

846 """Get or set process niceness (priority).""" 

847 if value is None: 

848 return self._proc.nice_get() 

849 else: 

850 self._raise_if_pid_reused() 

851 self._proc.nice_set(value) 

852 

853 if POSIX: 

854 

855 @_use_prefetch 

856 @memoize_when_activated 

857 def uids(self) -> puids: 

858 """Return process UIDs as a (real, effective, saved) 

859 named tuple. 

860 """ 

861 return self._proc.uids() 

862 

863 @_use_prefetch 

864 def gids(self) -> pgids: 

865 """Return process GIDs as a (real, effective, saved) 

866 named tuple. 

867 """ 

868 return self._proc.gids() 

869 

870 @_use_prefetch 

871 def terminal(self) -> str | None: 

872 """The terminal associated with this process, if any, 

873 else None. 

874 """ 

875 return self._proc.terminal() 

876 

877 @_use_prefetch 

878 def num_fds(self) -> int: 

879 """Return the number of file descriptors opened by this 

880 process (POSIX only). 

881 """ 

882 return self._proc.num_fds() 

883 

884 # Linux, BSD, AIX and Windows only 

885 if hasattr(_psplatform.Process, "io_counters"): 

886 

887 @_use_prefetch 

888 def io_counters(self) -> pio: 

889 """Return process I/O statistics as a 

890 (read_count, write_count, read_bytes, write_bytes) 

891 named tuple. 

892 Those are the number of read/write calls performed and the 

893 amount of bytes read and written by the process. 

894 """ 

895 return self._proc.io_counters() 

896 

897 # Linux and Windows 

898 if hasattr(_psplatform.Process, "ionice_get"): 

899 

900 @_use_prefetch 

901 def ionice( 

902 self, ioclass: int | None = None, value: int | None = None 

903 ) -> pionice | ProcessIOPriority | None: 

904 """Get or set process I/O niceness (priority). 

905 

906 On Linux *ioclass* is one of the IOPRIO_CLASS_* constants. 

907 *value* is a number which goes from 0 to 7. The higher the 

908 value, the lower the I/O priority of the process. 

909 

910 On Windows only *ioclass* is used and it can be set to 2 

911 (normal), 1 (low) or 0 (very low). 

912 

913 Available on Linux and Windows > Vista only. 

914 """ 

915 if ioclass is None: 

916 if value is not None: 

917 msg = "'ioclass' argument must be specified" 

918 raise ValueError(msg) 

919 return self._proc.ionice_get() 

920 else: 

921 self._raise_if_pid_reused() 

922 return self._proc.ionice_set(ioclass, value) 

923 

924 # Linux / FreeBSD only 

925 if hasattr(_psplatform.Process, "rlimit"): 

926 

927 def rlimit( 

928 self, 

929 resource: int, 

930 limits: tuple[int, int] | None = None, 

931 ) -> tuple[int, int] | None: 

932 """Get or set process resource limits as a (soft, hard) 

933 tuple. 

934 

935 *resource* is one of the RLIMIT_* constants. 

936 *limits* is supposed to be a (soft, hard) tuple. 

937 

938 See "man prlimit" for further info. 

939 Available on Linux and FreeBSD only. 

940 """ 

941 if limits is not None: 

942 self._raise_if_pid_reused() 

943 return self._proc.rlimit(resource, limits) 

944 

945 # Windows, Linux and FreeBSD only 

946 if hasattr(_psplatform.Process, "cpu_affinity_get"): 

947 

948 @_use_prefetch 

949 def cpu_affinity( 

950 self, cpus: list[int] | None = None 

951 ) -> list[int] | None: 

952 """Get or set process CPU affinity. 

953 If specified, *cpus* must be a list of CPUs for which you 

954 want to set the affinity (e.g. [0, 1]). 

955 If an empty list is passed, all egible CPUs are assumed 

956 (and set). 

957 (Windows, Linux and BSD only). 

958 """ 

959 if cpus is None: 

960 return sorted(set(self._proc.cpu_affinity_get())) 

961 else: 

962 self._raise_if_pid_reused() 

963 if not cpus: 

964 if hasattr(self._proc, "_get_eligible_cpus"): 

965 cpus = self._proc._get_eligible_cpus() 

966 else: 

967 cpus = tuple(range(len(cpu_times(percpu=True)))) 

968 self._proc.cpu_affinity_set(list(set(cpus))) 

969 

970 # Linux, FreeBSD, SunOS 

971 if hasattr(_psplatform.Process, "cpu_num"): 

972 

973 @_use_prefetch 

974 def cpu_num(self) -> int: 

975 """Return what CPU this process is currently running on. 

976 The returned number should be <= psutil.cpu_count() 

977 and <= len(psutil.cpu_percent(percpu=True)). 

978 It may be used in conjunction with 

979 psutil.cpu_percent(percpu=True) to observe the system 

980 workload distributed across CPUs. 

981 """ 

982 return self._proc.cpu_num() 

983 

984 # All platforms has it, but maybe not in the future. 

985 if hasattr(_psplatform.Process, "environ"): 

986 

987 @_use_prefetch 

988 def environ(self) -> dict[str, str]: 

989 """The environment variables of the process as a dict. Note: this 

990 might not reflect changes made after the process started. 

991 """ 

992 return self._proc.environ() 

993 

994 if WINDOWS: 

995 

996 @_use_prefetch 

997 def num_handles(self) -> int: 

998 """Return the number of handles opened by this process 

999 (Windows only). 

1000 """ 

1001 return self._proc.num_handles() 

1002 

1003 @_use_prefetch 

1004 def num_ctx_switches(self) -> pctxsw: 

1005 """Return the number of voluntary and involuntary context 

1006 switches performed by this process. 

1007 """ 

1008 return self._proc.num_ctx_switches() 

1009 

1010 @_use_prefetch 

1011 def num_threads(self) -> int: 

1012 """Return the number of threads used by this process.""" 

1013 return self._proc.num_threads() 

1014 

1015 if hasattr(_psplatform.Process, "threads"): 

1016 

1017 @_use_prefetch 

1018 def threads(self) -> list[pthread]: 

1019 """Return threads opened by process as a list of 

1020 (id, user_time, system_time) named tuples representing 

1021 thread id and thread CPU times (user/system). 

1022 On OpenBSD this method requires root access. 

1023 """ 

1024 return self._proc.threads() 

1025 

1026 def children(self, recursive: bool = False) -> list[Process]: 

1027 """Return the children of this process as a list of Process 

1028 instances, pre-emptively checking whether PID has been reused. 

1029 If *recursive* is True return all the parent descendants. 

1030 

1031 Example (A == this process): 

1032 

1033 A ─┐ 

1034 

1035 ├─ B (child) ─┐ 

1036 │ └─ X (grandchild) ─┐ 

1037 │ └─ Y (great grandchild) 

1038 ├─ C (child) 

1039 └─ D (child) 

1040 

1041 >>> import psutil 

1042 >>> p = psutil.Process() 

1043 >>> p.children() 

1044 B, C, D 

1045 >>> p.children(recursive=True) 

1046 B, X, Y, C, D 

1047 

1048 Note that in the example above if process X disappears 

1049 process Y won't be listed as the reference to process A 

1050 is lost. 

1051 """ 

1052 self._raise_if_pid_reused() 

1053 ppid_map = _ppid_map() 

1054 # Get a fresh (non-cached) ctime in case the system clock was 

1055 # updated. TODO: use a monotonic ctime on platforms where it's 

1056 # supported. 

1057 proc_ctime = Process(self.pid).create_time() 

1058 ret = [] 

1059 if not recursive: 

1060 for pid, ppid in ppid_map.items(): 

1061 if ppid == self.pid: 

1062 try: 

1063 child = Process(pid) 

1064 # if child happens to be older than its parent 

1065 # (self) it means child's PID has been reused 

1066 if proc_ctime <= child.create_time(): 

1067 ret.append(child) 

1068 except (NoSuchProcess, ZombieProcess): 

1069 pass 

1070 else: 

1071 # Construct a {pid: [child pids]} dict 

1072 reverse_ppid_map = collections.defaultdict(list) 

1073 for pid, ppid in ppid_map.items(): 

1074 reverse_ppid_map[ppid].append(pid) 

1075 # Recursively traverse that dict, starting from self.pid, 

1076 # such that we only call Process() on actual children 

1077 seen = set() 

1078 stack = [self.pid] 

1079 while stack: 

1080 pid = stack.pop() 

1081 if pid in seen: 

1082 # Since pids can be reused while the ppid_map is 

1083 # constructed, there may be rare instances where 

1084 # there's a cycle in the recorded process "tree". 

1085 continue 

1086 seen.add(pid) 

1087 for child_pid in reverse_ppid_map[pid]: 

1088 try: 

1089 child = Process(child_pid) 

1090 # if child happens to be older than its parent 

1091 # (self) it means child's PID has been reused 

1092 intime = proc_ctime <= child.create_time() 

1093 if intime: 

1094 ret.append(child) 

1095 stack.append(child_pid) 

1096 except (NoSuchProcess, ZombieProcess): 

1097 pass 

1098 return ret 

1099 

1100 @_use_prefetch 

1101 def cpu_percent(self, interval: float | None = None) -> float: 

1102 """Return a float representing the current process CPU 

1103 utilization as a percentage. 

1104 

1105 When *interval* is 0.0 or None (default) compares process times 

1106 to system CPU times elapsed since last call, returning 

1107 immediately (non-blocking). That means that the first time 

1108 this is called it will return a meaningful 0.0 value. 

1109 

1110 When *interval* is > 0.0 compares process times to system CPU 

1111 times elapsed before and after the interval (blocking). 

1112 

1113 In this case is recommended for accuracy that this function 

1114 be called with at least 0.1 seconds between calls. 

1115 

1116 A value > 100.0 can be returned in case of processes running 

1117 multiple threads on different CPU cores. 

1118 

1119 The returned value is explicitly NOT split evenly between 

1120 all available logical CPUs. This means that a busy loop process 

1121 running on a system with 2 logical CPUs will be reported as 

1122 having 100% CPU utilization instead of 50%. 

1123 

1124 Examples: 

1125 

1126 >>> import psutil 

1127 >>> p = psutil.Process(os.getpid()) 

1128 >>> # blocking 

1129 >>> p.cpu_percent(interval=1) 

1130 2.0 

1131 >>> # non-blocking (percentage since last call) 

1132 >>> p.cpu_percent(interval=None) 

1133 2.9 

1134 >>> 

1135 """ 

1136 blocking = interval is not None and interval > 0.0 

1137 if interval is not None and interval < 0: 

1138 msg = f"interval is not positive (got {interval!r})" 

1139 raise ValueError(msg) 

1140 num_cpus = cpu_count() or 1 

1141 

1142 def timer(): 

1143 return _timer() * num_cpus 

1144 

1145 if blocking: 

1146 st1 = timer() 

1147 pt1 = self._proc.cpu_times() 

1148 time.sleep(interval) 

1149 st2 = timer() 

1150 pt2 = self._proc.cpu_times() 

1151 else: 

1152 st1 = self._last_sys_cpu_times 

1153 pt1 = self._last_proc_cpu_times 

1154 st2 = timer() 

1155 pt2 = self._proc.cpu_times() 

1156 if st1 is None or pt1 is None: 

1157 self._last_sys_cpu_times = st2 

1158 self._last_proc_cpu_times = pt2 

1159 return 0.0 

1160 

1161 delta_proc = (pt2.user - pt1.user) + (pt2.system - pt1.system) 

1162 delta_time = st2 - st1 

1163 # reset values for next call in case of interval == None 

1164 self._last_sys_cpu_times = st2 

1165 self._last_proc_cpu_times = pt2 

1166 

1167 try: 

1168 # This is the utilization split evenly between all CPUs. 

1169 # E.g. a busy loop process on a 2-CPU-cores system at this 

1170 # point is reported as 50% instead of 100%. 

1171 overall_cpus_percent = (delta_proc / delta_time) * 100 

1172 except ZeroDivisionError: 

1173 # interval was too low 

1174 return 0.0 

1175 else: 

1176 # Note 1: 

1177 # in order to emulate "top" we multiply the value for the num 

1178 # of CPU cores. This way the busy process will be reported as 

1179 # having 100% (or more) usage. 

1180 # 

1181 # Note 2: 

1182 # taskmgr.exe on Windows differs in that it will show 50% 

1183 # instead. 

1184 # 

1185 # Note 3: 

1186 # a percentage > 100 is legitimate as it can result from a 

1187 # process with multiple threads running on different CPU 

1188 # cores (top does the same), see: 

1189 # http://stackoverflow.com/questions/1032357 

1190 # https://github.com/giampaolo/psutil/issues/474 

1191 single_cpu_percent = overall_cpus_percent * num_cpus 

1192 return round(single_cpu_percent, 1) 

1193 

1194 @_use_prefetch 

1195 @memoize_when_activated 

1196 def cpu_times(self) -> pcputimes: 

1197 """Return a (user, system, children_user, children_system) 

1198 named tuple representing the accumulated process time, in 

1199 seconds. 

1200 This is similar to os.times() but per-process. 

1201 On macOS and Windows children_user and children_system are 

1202 always set to 0. 

1203 """ 

1204 return self._proc.cpu_times() 

1205 

1206 @_use_prefetch 

1207 @memoize_when_activated 

1208 def memory_info(self) -> pmem: 

1209 """Return a named tuple with variable fields depending on the 

1210 platform, representing memory information about the process. 

1211 

1212 The "portable" fields available on all platforms are `rss` and `vms`. 

1213 

1214 All numbers are expressed in bytes. 

1215 """ 

1216 return self._proc.memory_info() 

1217 

1218 @_use_prefetch 

1219 @memoize_when_activated 

1220 def memory_info_ex(self) -> pmem_ex: 

1221 """Return a named tuple extending memory_info() with extra 

1222 metrics. 

1223 

1224 All numbers are expressed in bytes. 

1225 """ 

1226 base = self.memory_info() 

1227 if hasattr(self._proc, "memory_info_ex"): 

1228 extras = self._proc.memory_info_ex() 

1229 return _ntp.pmem_ex(**base._asdict(), **extras) 

1230 return base 

1231 

1232 # Linux, macOS, Windows 

1233 if hasattr(_psplatform.Process, "memory_footprint"): 

1234 

1235 @_use_prefetch 

1236 def memory_footprint(self) -> pfootprint: 

1237 """Return a named tuple with USS, PSS and swap memory 

1238 metrics. These provide a better representation of 

1239 actual process memory usage. 

1240 

1241 USS is the memory unique to a process and which would 

1242 be freed if the process was terminated right now. 

1243 

1244 It does so by passing through the whole process address. As 

1245 such it usually requires higher user privileges than 

1246 memory_info() or memory_info_ex() and is considerably 

1247 slower. 

1248 """ 

1249 return self._proc.memory_footprint() 

1250 

1251 # DEPRECATED 

1252 def memory_full_info(self) -> pfullmem: 

1253 """Return the same information as memory_info() plus 

1254 memory_footprint() in a single named tuple. 

1255 

1256 DEPRECATED in 8.0.0. Use memory_footprint() instead. 

1257 """ 

1258 msg = ( 

1259 "memory_full_info() is deprecated; use memory_footprint() instead" 

1260 ) 

1261 warnings.warn(msg, DeprecationWarning, stacklevel=2) 

1262 basic_mem = self.memory_info() 

1263 if hasattr(self, "memory_footprint"): 

1264 fp = self.memory_footprint() 

1265 return _ntp.pfullmem(*basic_mem + fp) 

1266 return _ntp.pfullmem(*basic_mem) 

1267 

1268 def memory_percent(self, memtype: str = "rss") -> float: 

1269 """Compare process memory to total physical system memory and 

1270 calculate process memory utilization as a percentage. 

1271 *memtype* argument is a string that dictates what type of 

1272 process memory you want to compare against (defaults to "rss"). 

1273 The list of available strings can be obtained like this: 

1274 

1275 >>> psutil.Process().memory_info()._fields 

1276 ('rss', 'vms', 'shared', 'text', 'lib', 'data', 'dirty', 'uss', 'pss') 

1277 """ 

1278 valid_types = list(_ntp.pmem._fields) 

1279 if hasattr(_ntp, "pmem_ex"): 

1280 valid_types += [ 

1281 f for f in _ntp.pmem_ex._fields if f not in valid_types 

1282 ] 

1283 if hasattr(_ntp, "pfootprint"): 

1284 valid_types += [ 

1285 f for f in _ntp.pfootprint._fields if f not in valid_types 

1286 ] 

1287 if memtype not in valid_types: 

1288 msg = ( 

1289 f"invalid memtype {memtype!r}; valid types are" 

1290 f" {tuple(valid_types)!r}" 

1291 ) 

1292 raise ValueError(msg) 

1293 if memtype in _ntp.pmem._fields: 

1294 fun = self.memory_info 

1295 elif ( 

1296 hasattr(_ntp, "pfootprint") and memtype in _ntp.pfootprint._fields 

1297 ): 

1298 fun = self.memory_footprint 

1299 else: 

1300 fun = self.memory_info_ex 

1301 metrics = fun() 

1302 value = getattr(metrics, memtype) 

1303 

1304 # use cached value if available 

1305 total_phymem = _TOTAL_PHYMEM or virtual_memory().total 

1306 if not total_phymem > 0: 

1307 # we should never get here 

1308 msg = ( 

1309 "can't calculate process memory percent because total physical" 

1310 f" system memory is not positive ({total_phymem!r})" 

1311 ) 

1312 raise ValueError(msg) 

1313 return (value / float(total_phymem)) * 100 

1314 

1315 if hasattr(_psplatform.Process, "memory_maps"): 

1316 

1317 @_use_prefetch 

1318 def memory_maps( 

1319 self, grouped: bool = True 

1320 ) -> list[pmmap_grouped] | list[pmmap_ext]: 

1321 """Return process' mapped memory regions as a list of named tuples 

1322 whose fields are variable depending on the platform. 

1323 

1324 If *grouped* is True the mapped regions with the same 'path' 

1325 are grouped together and the different memory fields are summed. 

1326 

1327 If *grouped* is False every mapped region is shown as a single 

1328 entity and the named tuple will also include the mapped region's 

1329 address space ('addr') and permission set ('perms'). 

1330 """ 

1331 

1332 it = self._proc.memory_maps() 

1333 if grouped: 

1334 d = {} 

1335 for tupl in it: 

1336 path = tupl[2] 

1337 nums = tupl[3:] 

1338 try: 

1339 d[path] = list(map(lambda x, y: x + y, d[path], nums)) 

1340 except KeyError: 

1341 d[path] = nums 

1342 return [_ntp.pmmap_grouped(path, *d[path]) for path in d] 

1343 else: 

1344 return [_ntp.pmmap_ext(*x) for x in it] 

1345 

1346 @_use_prefetch 

1347 def page_faults(self) -> ppagefaults: 

1348 """Return the number of page faults for this process as a 

1349 (minor, major) named tuple. 

1350 

1351 - *minor* (a.k.a. *soft* faults): occur when a memory page is 

1352 not currently mapped into the process address space, but is 

1353 already present in physical RAM (e.g. a shared library page 

1354 loaded by another process). The kernel resolves these without 

1355 disk I/O. 

1356 

1357 - *major* (a.k.a. *hard* faults): occur when the page must be 

1358 fetched from disk. These are expensive because they stall the 

1359 process until I/O completes. 

1360 

1361 Both counters are cumulative since process creation. 

1362 """ 

1363 return self._proc.page_faults() 

1364 

1365 @_use_prefetch 

1366 def open_files(self) -> list[popenfile]: 

1367 """Return files opened by process as a list of 

1368 (path, fd) named tuples including the absolute file name 

1369 and file descriptor number. 

1370 """ 

1371 return self._proc.open_files() 

1372 

1373 @_use_prefetch 

1374 def net_connections(self, kind: str = "inet") -> list[pconn]: 

1375 """Return socket connections opened by process as a list of 

1376 (fd, family, type, laddr, raddr, status) named tuples. 

1377 The *kind* parameter filters for connections that match the 

1378 following criteria: 

1379 

1380 +------------+----------------------------------------------------+ 

1381 | Kind Value | Connections using | 

1382 +------------+----------------------------------------------------+ 

1383 | inet | IPv4 and IPv6 | 

1384 | inet4 | IPv4 | 

1385 | inet6 | IPv6 | 

1386 | tcp | TCP | 

1387 | tcp4 | TCP over IPv4 | 

1388 | tcp6 | TCP over IPv6 | 

1389 | udp | UDP | 

1390 | udp4 | UDP over IPv4 | 

1391 | udp6 | UDP over IPv6 | 

1392 | unix | UNIX socket (both UDP and TCP protocols) | 

1393 | all | the sum of all the possible families and protocols | 

1394 +------------+----------------------------------------------------+ 

1395 """ 

1396 _check_conn_kind(kind) 

1397 return self._proc.net_connections(kind) 

1398 

1399 @_common.deprecated_method(replacement="net_connections") 

1400 def connections(self, kind="inet") -> list[pconn]: 

1401 return self.net_connections(kind=kind) 

1402 

1403 # --- signals 

1404 

1405 if POSIX: 

1406 

1407 def _send_signal(self, sig): 

1408 assert not self.pid < 0, self.pid 

1409 self._raise_if_pid_reused() 

1410 

1411 pid, ppid, name = self.pid, self._ppid, self._name 

1412 if pid == 0: 

1413 # see "man 2 kill" 

1414 msg = ( 

1415 "preventing sending signal to process with PID 0 as it " 

1416 "would affect every process in the process group of the " 

1417 "calling process (os.getpid()) instead of PID 0" 

1418 ) 

1419 raise ValueError(msg) 

1420 try: 

1421 os.kill(pid, sig) 

1422 except ProcessLookupError as err: 

1423 if OPENBSD and pid_exists(pid): 

1424 # We do this because os.kill() lies in case of 

1425 # zombie processes. 

1426 raise ZombieProcess(pid, name, ppid) from err 

1427 self._gone = True 

1428 raise NoSuchProcess(pid, name) from err 

1429 except PermissionError as err: 

1430 raise AccessDenied(pid, name) from err 

1431 

1432 def send_signal(self, sig: int) -> None: 

1433 """Send a signal *sig* to process pre-emptively checking 

1434 whether PID has been reused (see signal module constants) . 

1435 On Windows only SIGTERM is valid and is treated as an alias 

1436 for kill(). 

1437 """ 

1438 if POSIX: 

1439 self._send_signal(sig) 

1440 else: # pragma: no cover 

1441 self._raise_if_pid_reused() 

1442 if sig != signal.SIGTERM and not self.is_running(): 

1443 msg = "process no longer exists" 

1444 raise NoSuchProcess(self.pid, self._name, msg=msg) 

1445 self._proc.send_signal(sig) 

1446 

1447 def suspend(self) -> None: 

1448 """Suspend process execution with SIGSTOP pre-emptively checking 

1449 whether PID has been reused. 

1450 On Windows this has the effect of suspending all process threads. 

1451 """ 

1452 if POSIX: 

1453 self._send_signal(signal.SIGSTOP) 

1454 else: # pragma: no cover 

1455 self._raise_if_pid_reused() 

1456 self._proc.suspend() 

1457 

1458 def resume(self) -> None: 

1459 """Resume process execution with SIGCONT pre-emptively checking 

1460 whether PID has been reused. 

1461 On Windows this has the effect of resuming all process threads. 

1462 """ 

1463 if POSIX: 

1464 self._send_signal(signal.SIGCONT) 

1465 else: # pragma: no cover 

1466 self._raise_if_pid_reused() 

1467 self._proc.resume() 

1468 

1469 def terminate(self) -> None: 

1470 """Terminate the process with SIGTERM pre-emptively checking 

1471 whether PID has been reused. 

1472 On Windows this is an alias for kill(). 

1473 """ 

1474 if POSIX: 

1475 self._send_signal(signal.SIGTERM) 

1476 else: # pragma: no cover 

1477 self._raise_if_pid_reused() 

1478 self._proc.kill() 

1479 

1480 def kill(self) -> None: 

1481 """Kill the current process with SIGKILL pre-emptively checking 

1482 whether PID has been reused. 

1483 """ 

1484 if POSIX: 

1485 self._send_signal(signal.SIGKILL) 

1486 else: # pragma: no cover 

1487 self._raise_if_pid_reused() 

1488 self._proc.kill() 

1489 

1490 def wait(self, timeout: float | None = None) -> int | None: 

1491 """Wait for process to terminate, and if process is a children 

1492 of os.getpid(), also return its exit code, else None. 

1493 On Windows there's no such limitation (exit code is always 

1494 returned). 

1495 

1496 If the process is already terminated, immediately return None 

1497 instead of raising NoSuchProcess. 

1498 

1499 If *timeout* (in seconds) is specified and process is still 

1500 alive, raise TimeoutExpired. 

1501 

1502 If *timeout=0* either return immediately or raise 

1503 TimeoutExpired (non-blocking). 

1504 

1505 To wait for multiple Process objects use psutil.wait_procs(). 

1506 """ 

1507 if self.pid == 0: 

1508 msg = "can't wait for PID 0" 

1509 raise ValueError(msg) 

1510 if timeout is not None: 

1511 if not isinstance(timeout, (int, float)): 

1512 msg = f"timeout must be an int or float (got {type(timeout)})" 

1513 raise TypeError(msg) 

1514 if timeout < 0: 

1515 msg = f"timeout must be positive or zero (got {timeout})" 

1516 raise ValueError(msg) 

1517 

1518 if self._exitcode is not _SENTINEL: 

1519 return self._exitcode 

1520 

1521 try: 

1522 self._exitcode = self._proc.wait(timeout) 

1523 except TimeoutExpired as err: 

1524 exc = TimeoutExpired(timeout, pid=self.pid, name=self._name) 

1525 raise exc from err 

1526 

1527 return self._exitcode 

1528 

1529 

1530# The valid attr names which can be processed by Process.as_dict(). 

1531# fmt: off 

1532_as_dict_attrnames = { 

1533 x for x in dir(Process) if not x.startswith("_") and x not in 

1534 {'send_signal', 'suspend', 'resume', 'terminate', 'kill', 'wait', 

1535 'is_running', 'as_dict', 'parent', 'parents', 'children', 'rlimit', 

1536 'connections', 'memory_full_info', 'oneshot', 'info'} 

1537} 

1538# fmt: on 

1539 

1540# Deprecated attrs: not returned by default but still accepted if 

1541# explicitly requested via as_dict(attrs=[...]). 

1542_as_dict_attrnames_deprecated = {'memory_full_info'} 

1543 

1544 

1545# ===================================================================== 

1546# --- Popen class 

1547# ===================================================================== 

1548 

1549 

1550class Popen(Process): 

1551 """Same as subprocess.Popen, but in addition it provides all 

1552 psutil.Process methods in a single class. 

1553 For the following methods which are common to both classes, psutil 

1554 implementation takes precedence: 

1555 

1556 * send_signal() 

1557 * terminate() 

1558 * kill() 

1559 

1560 This is done in order to avoid killing another process in case its 

1561 PID has been reused, fixing BPO-6973. 

1562 

1563 >>> import psutil 

1564 >>> from subprocess import PIPE 

1565 >>> p = psutil.Popen(["python", "-c", "print 'hi'"], stdout=PIPE) 

1566 >>> p.name() 

1567 'python3' 

1568 >>> p.uids() 

1569 user(real=1000, effective=1000, saved=1000) 

1570 >>> p.username() 

1571 'giampaolo' 

1572 >>> p.communicate() 

1573 ('hi', None) 

1574 >>> p.terminate() 

1575 >>> p.wait(timeout=2) 

1576 0 

1577 >>> 

1578 """ 

1579 

1580 def __init__(self, *args, **kwargs): 

1581 # Explicitly avoid to raise NoSuchProcess in case the process 

1582 # spawned by subprocess.Popen terminates too quickly, see: 

1583 # https://github.com/giampaolo/psutil/issues/193 

1584 self.__subproc = subprocess.Popen(*args, **kwargs) 

1585 self._init(self.__subproc.pid, _ignore_nsp=True) 

1586 

1587 def __dir__(self): 

1588 return sorted(set(dir(Popen) + dir(subprocess.Popen))) 

1589 

1590 def __enter__(self) -> Popen: 

1591 if hasattr(self.__subproc, '__enter__'): 

1592 self.__subproc.__enter__() 

1593 return self 

1594 

1595 def __exit__(self, *args, **kwargs): 

1596 if hasattr(self.__subproc, '__exit__'): 

1597 return self.__subproc.__exit__(*args, **kwargs) 

1598 else: 

1599 if self.stdout: 

1600 self.stdout.close() 

1601 if self.stderr: 

1602 self.stderr.close() 

1603 try: 

1604 # Flushing a BufferedWriter may raise an error. 

1605 if self.stdin: 

1606 self.stdin.close() 

1607 finally: 

1608 # Wait for the process to terminate, to avoid zombies. 

1609 self.wait() 

1610 

1611 def __getattribute__(self, name): 

1612 try: 

1613 return object.__getattribute__(self, name) 

1614 except AttributeError: 

1615 try: 

1616 return object.__getattribute__(self.__subproc, name) 

1617 except AttributeError: 

1618 msg = f"{self.__class__!r} has no attribute {name!r}" 

1619 raise AttributeError(msg) from None 

1620 

1621 def wait(self, timeout: float | None = None) -> int | None: 

1622 if self.__subproc.returncode is not None: 

1623 return self.__subproc.returncode 

1624 ret = super().wait(timeout) 

1625 self.__subproc.returncode = ret 

1626 return ret 

1627 

1628 

1629# ===================================================================== 

1630# --- system processes related functions 

1631# ===================================================================== 

1632 

1633 

1634def pids() -> list[int]: 

1635 """Return a list of current running PIDs.""" 

1636 global _LOWEST_PID 

1637 ret = sorted(_psplatform.pids()) 

1638 _LOWEST_PID = ret[0] 

1639 return ret 

1640 

1641 

1642def pid_exists(pid: int) -> bool: 

1643 """Return True if given PID exists in the current process list. 

1644 This is faster than doing "pid in psutil.pids()" and 

1645 should be preferred. 

1646 """ 

1647 if pid < 0: 

1648 return False 

1649 elif pid == 0 and POSIX: 

1650 # On POSIX we use os.kill() to determine PID existence. 

1651 # According to "man 2 kill" PID 0 has a special meaning 

1652 # though: it refers to <<every process in the process 

1653 # group of the calling process>> and that is not we want 

1654 # to do here. 

1655 return pid in pids() 

1656 else: 

1657 return _psplatform.pid_exists(pid) 

1658 

1659 

1660_pmap = {} 

1661_pids_reused = set() 

1662 

1663 

1664def process_iter( 

1665 attrs: list[str] | None = None, ad_value: Any = None 

1666) -> Iterator[Process]: 

1667 """Return a generator yielding a Process instance for all 

1668 running processes. 

1669 

1670 Every new Process instance is only created once and then cached 

1671 into an internal table which is updated every time this is used. 

1672 Cache can optionally be cleared via `process_iter.cache_clear()`. 

1673 

1674 The sorting order in which processes are yielded is based on 

1675 their PIDs. 

1676 

1677 *attrs* and *ad_value* have the same meaning as in 

1678 Process.as_dict(). If *attrs* is specified, as_dict() is called and 

1679 the results are cached so that subsequent method calls (e.g. 

1680 p.name()) return cached values. 

1681 

1682 If *attrs* is an empty list it will retrieve all process info 

1683 (slow). 

1684 """ 

1685 global _pmap 

1686 

1687 def add(pid): 

1688 proc = Process(pid) 

1689 pmap[proc.pid] = proc 

1690 return proc 

1691 

1692 def remove(pid): 

1693 pmap.pop(pid, None) 

1694 

1695 pmap = _pmap.copy() 

1696 a = set(pids()) 

1697 b = set(pmap) 

1698 new_pids = a - b 

1699 gone_pids = b - a 

1700 for pid in gone_pids: 

1701 remove(pid) 

1702 while _pids_reused: 

1703 pid = _pids_reused.pop() 

1704 debug(f"refreshing Process instance for reused PID {pid}") 

1705 remove(pid) 

1706 try: 

1707 ls = sorted(list(pmap.items()) + list(dict.fromkeys(new_pids).items())) 

1708 for pid, proc in ls: 

1709 try: 

1710 if proc is None: # new process 

1711 proc = add(pid) 

1712 proc._prefetch = {} # clear cache 

1713 if attrs is not None: 

1714 proc._prefetch = proc.as_dict( 

1715 attrs=attrs, ad_value=ad_value 

1716 ) 

1717 yield proc 

1718 except NoSuchProcess: 

1719 remove(pid) 

1720 finally: 

1721 _pmap = pmap 

1722 

1723 

1724process_iter.cache_clear = lambda: _pmap.clear() # noqa: PLW0108 

1725process_iter.cache_clear.__doc__ = "Clear process_iter() internal cache." 

1726 

1727 

1728def wait_procs( 

1729 procs: list[Process], 

1730 timeout: float | None = None, 

1731 callback: Callable[[Process], None] | None = None, 

1732) -> tuple[list[Process], list[Process]]: 

1733 """Convenience function which waits for a list of processes to 

1734 terminate. 

1735 

1736 Return a (gone, alive) tuple indicating which processes 

1737 are gone and which ones are still alive. 

1738 

1739 The gone ones will have a new *returncode* attribute indicating 

1740 process exit status (may be None). 

1741 

1742 *callback* is a function which gets called every time a process 

1743 terminates (a Process instance is passed as callback argument). 

1744 

1745 Function will return as soon as all processes terminate or when 

1746 *timeout* occurs. 

1747 Differently from Process.wait() it will not raise TimeoutExpired if 

1748 *timeout* occurs. 

1749 

1750 Typical use case is: 

1751 

1752 - send SIGTERM to a list of processes 

1753 - give them some time to terminate 

1754 - send SIGKILL to those ones which are still alive 

1755 

1756 Example: 

1757 

1758 >>> def on_terminate(proc): 

1759 ... print("process {} terminated".format(proc)) 

1760 ... 

1761 >>> for p in procs: 

1762 ... p.terminate() 

1763 ... 

1764 >>> gone, alive = wait_procs(procs, timeout=3, callback=on_terminate) 

1765 >>> for p in alive: 

1766 ... p.kill() 

1767 """ 

1768 

1769 def check_gone(proc, timeout): 

1770 try: 

1771 returncode = proc.wait(timeout=timeout) 

1772 except (TimeoutExpired, subprocess.TimeoutExpired): 

1773 pass 

1774 else: 

1775 if returncode is not None or not proc.is_running(): 

1776 # Set new Process instance attribute. 

1777 proc.returncode = returncode 

1778 gone.add(proc) 

1779 if callback is not None: 

1780 callback(proc) 

1781 

1782 if timeout is not None and not timeout >= 0: 

1783 msg = f"timeout must be a positive integer, got {timeout}" 

1784 raise ValueError(msg) 

1785 if callback is not None and not callable(callback): 

1786 msg = f"callback {callback!r} is not a callable" 

1787 raise TypeError(msg) 

1788 

1789 gone = set() 

1790 alive = set(procs) 

1791 if timeout is not None: 

1792 deadline = _timer() + timeout 

1793 

1794 while alive: 

1795 if timeout is not None and timeout <= 0: 

1796 break 

1797 for proc in alive: 

1798 # Make sure that every complete iteration (all processes) 

1799 # will last max 1 sec. 

1800 # We do this because we don't want to wait too long on a 

1801 # single process: in case it terminates too late other 

1802 # processes may disappear in the meantime and their PID 

1803 # reused. 

1804 max_timeout = 1.0 / len(alive) 

1805 if timeout is not None: 

1806 timeout = min((deadline - _timer()), max_timeout) 

1807 if timeout <= 0: 

1808 break 

1809 check_gone(proc, timeout) 

1810 else: 

1811 check_gone(proc, max_timeout) 

1812 alive = alive - gone # noqa: PLR6104 

1813 

1814 if alive: 

1815 # Last attempt over processes survived so far. 

1816 # timeout == 0 won't make this function wait any further. 

1817 for proc in alive: 

1818 check_gone(proc, 0) 

1819 alive = alive - gone # noqa: PLR6104 

1820 

1821 return (list(gone), list(alive)) 

1822 

1823 

1824# ===================================================================== 

1825# --- CPU related functions 

1826# ===================================================================== 

1827 

1828 

1829def cpu_count(logical: bool = True) -> int | None: 

1830 """Return the number of logical CPUs in the system (same as 

1831 os.cpu_count()). 

1832 

1833 If *logical* is False return the number of physical cores only 

1834 (e.g. hyper thread CPUs are excluded). 

1835 

1836 Return None if undetermined. 

1837 

1838 The return value is cached after first call. 

1839 If desired cache can be cleared like this: 

1840 

1841 >>> psutil.cpu_count.cache_clear() 

1842 """ 

1843 if logical: 

1844 ret = _psplatform.cpu_count_logical() 

1845 else: 

1846 ret = _psplatform.cpu_count_cores() 

1847 if ret is not None and ret < 1: 

1848 ret = None 

1849 return ret 

1850 

1851 

1852def cpu_times(percpu: bool = False) -> scputimes | list[scputimes]: 

1853 """Return system-wide CPU times as a named tuple. 

1854 Every CPU time represents the seconds the CPU has spent in the 

1855 given mode. The named tuple's fields availability varies depending on the 

1856 platform: 

1857 

1858 - user 

1859 - system 

1860 - idle 

1861 - nice (UNIX) 

1862 - iowait (Linux) 

1863 - irq (Linux, FreeBSD) 

1864 - softirq (Linux) 

1865 - steal (Linux) 

1866 - guest (Linux) 

1867 - guest_nice (Linux) 

1868 

1869 When *percpu* is True return a list of named tuples for each CPU. 

1870 First element of the list refers to first CPU, second element 

1871 to second CPU and so on. 

1872 The order of the list is consistent across calls. 

1873 """ 

1874 if not percpu: 

1875 return _psplatform.cpu_times() 

1876 else: 

1877 return _psplatform.per_cpu_times() 

1878 

1879 

1880try: 

1881 _last_cpu_times = {threading.current_thread().ident: cpu_times()} 

1882except Exception: # noqa: BLE001 

1883 # Don't want to crash at import time. 

1884 _last_cpu_times = {} 

1885 

1886try: 

1887 _last_per_cpu_times = { 

1888 threading.current_thread().ident: cpu_times(percpu=True) 

1889 } 

1890except Exception: # noqa: BLE001 

1891 # Don't want to crash at import time. 

1892 _last_per_cpu_times = {} 

1893 

1894 

1895def _cpu_tot_time(times): 

1896 """Given a cpu_time() ntuple calculates the total CPU time 

1897 (including idle time). 

1898 """ 

1899 tot = sum(times) 

1900 if LINUX: 

1901 # On Linux guest times are already accounted in "user" or 

1902 # "nice" times, so we subtract them from total. 

1903 # Htop does the same. References: 

1904 # https://github.com/giampaolo/psutil/pull/940 

1905 # http://unix.stackexchange.com/questions/178045 

1906 # https://github.com/torvalds/linux/blob/447976ef4/kernel/sched/cputime.c#L158 

1907 tot -= times.guest 

1908 tot -= times.guest_nice 

1909 return tot 

1910 

1911 

1912def _cpu_busy_time(times): 

1913 """Given a cpu_time() ntuple calculates the busy CPU time. 

1914 We do so by subtracting all idle CPU times. 

1915 """ 

1916 busy = _cpu_tot_time(times) 

1917 busy -= times.idle 

1918 # Linux: "iowait" is time during which the CPU does not do anything 

1919 # (waits for IO to complete). On Linux IO wait is *not* accounted 

1920 # in "idle" time so we subtract it. Htop does the same. 

1921 # References: 

1922 # https://github.com/torvalds/linux/blob/447976ef4/kernel/sched/cputime.c#L244 

1923 busy -= getattr(times, "iowait", 0) 

1924 return busy 

1925 

1926 

1927def _cpu_times_deltas(t1, t2): 

1928 assert t1._fields == t2._fields, (t1, t2) 

1929 field_deltas = [] 

1930 for field in _ntp.scputimes._fields: 

1931 field_delta = getattr(t2, field) - getattr(t1, field) 

1932 # CPU times are always supposed to increase over time 

1933 # or at least remain the same and that's because time 

1934 # cannot go backwards. 

1935 # Surprisingly sometimes this might not be the case (at 

1936 # least on Windows and Linux), see: 

1937 # https://github.com/giampaolo/psutil/issues/392 

1938 # https://github.com/giampaolo/psutil/issues/645 

1939 # https://github.com/giampaolo/psutil/issues/1210 

1940 # Trim negative deltas to zero to ignore decreasing fields. 

1941 # top does the same. Reference: 

1942 # https://gitlab.com/procps-ng/procps/blob/v3.3.12/top/top.c#L5063 

1943 field_delta = max(0, field_delta) 

1944 field_deltas.append(field_delta) 

1945 return _ntp.scputimes(*field_deltas) 

1946 

1947 

1948def cpu_percent( 

1949 interval: float | None = None, percpu: bool = False 

1950) -> float | list[float]: 

1951 """Return a float representing the current system-wide CPU 

1952 utilization as a percentage. 

1953 

1954 When *interval* is > 0.0 compares system CPU times elapsed before 

1955 and after the interval (blocking). 

1956 

1957 When *interval* is 0.0 or None compares system CPU times elapsed 

1958 since last call or module import, returning immediately (non 

1959 blocking). That means the first time this is called it will 

1960 return a meaningless 0.0 value which you should ignore. 

1961 In this case is recommended for accuracy that this function be 

1962 called with at least 0.1 seconds between calls. 

1963 

1964 When *percpu* is True returns a list of floats representing the 

1965 utilization as a percentage for each CPU. 

1966 First element of the list refers to first CPU, second element 

1967 to second CPU and so on. 

1968 The order of the list is consistent across calls. 

1969 

1970 Examples: 

1971 

1972 >>> # blocking, system-wide 

1973 >>> psutil.cpu_percent(interval=1) 

1974 2.0 

1975 >>> 

1976 >>> # blocking, per-cpu 

1977 >>> psutil.cpu_percent(interval=1, percpu=True) 

1978 [2.0, 1.0] 

1979 >>> 

1980 >>> # non-blocking (percentage since last call) 

1981 >>> psutil.cpu_percent(interval=None) 

1982 2.9 

1983 >>> 

1984 """ 

1985 tid = threading.current_thread().ident 

1986 blocking = interval is not None and interval > 0.0 

1987 if interval is not None and interval < 0: 

1988 msg = f"interval is not positive (got {interval})" 

1989 raise ValueError(msg) 

1990 

1991 def calculate(t1, t2): 

1992 times_delta = _cpu_times_deltas(t1, t2) 

1993 all_delta = _cpu_tot_time(times_delta) 

1994 busy_delta = _cpu_busy_time(times_delta) 

1995 

1996 try: 

1997 busy_perc = (busy_delta / all_delta) * 100 

1998 except ZeroDivisionError: 

1999 return 0.0 

2000 else: 

2001 return round(busy_perc, 1) 

2002 

2003 # system-wide usage 

2004 if not percpu: 

2005 if blocking: 

2006 t1 = cpu_times() 

2007 time.sleep(interval) 

2008 else: 

2009 t1 = _last_cpu_times.get(tid) or cpu_times() 

2010 _last_cpu_times[tid] = cpu_times() 

2011 return calculate(t1, _last_cpu_times[tid]) 

2012 # per-cpu usage 

2013 else: 

2014 ret = [] 

2015 if blocking: 

2016 tot1 = cpu_times(percpu=True) 

2017 time.sleep(interval) 

2018 else: 

2019 tot1 = _last_per_cpu_times.get(tid) or cpu_times(percpu=True) 

2020 _last_per_cpu_times[tid] = cpu_times(percpu=True) 

2021 for t1, t2 in zip(tot1, _last_per_cpu_times[tid]): 

2022 ret.append(calculate(t1, t2)) 

2023 return ret 

2024 

2025 

2026# Use a separate dict for cpu_times_percent(), so it's independent from 

2027# cpu_percent() and they can both be used within the same program. 

2028_last_cpu_times_2 = _last_cpu_times.copy() 

2029_last_per_cpu_times_2 = _last_per_cpu_times.copy() 

2030 

2031 

2032def cpu_times_percent( 

2033 interval: float | None = None, percpu: bool = False 

2034) -> scputimes | list[scputimes]: 

2035 """Same as cpu_percent() but provides utilization percentages 

2036 for each specific CPU time as is returned by cpu_times(). 

2037 For instance, on Linux we'll get: 

2038 

2039 >>> cpu_times_percent() 

2040 cpupercent(user=4.8, nice=0.0, system=4.8, idle=90.5, iowait=0.0, 

2041 irq=0.0, softirq=0.0, steal=0.0, guest=0.0, guest_nice=0.0) 

2042 >>> 

2043 

2044 *interval* and *percpu* arguments have the same meaning as in 

2045 cpu_percent(). 

2046 """ 

2047 tid = threading.current_thread().ident 

2048 blocking = interval is not None and interval > 0.0 

2049 if interval is not None and interval < 0: 

2050 msg = f"interval is not positive (got {interval!r})" 

2051 raise ValueError(msg) 

2052 

2053 def calculate(t1, t2): 

2054 nums = [] 

2055 times_delta = _cpu_times_deltas(t1, t2) 

2056 all_delta = _cpu_tot_time(times_delta) 

2057 # "scale" is the value to multiply each delta with to get percentages. 

2058 # We use "max" to avoid division by zero (if all_delta is 0, then all 

2059 # fields are 0 so percentages will be 0 too. all_delta cannot be a 

2060 # fraction because cpu times are integers) 

2061 scale = 100.0 / max(1, all_delta) 

2062 for field_delta in times_delta: 

2063 field_perc = field_delta * scale 

2064 field_perc = round(field_perc, 1) 

2065 # make sure we don't return negative values or values over 100% 

2066 field_perc = min(max(0.0, field_perc), 100.0) 

2067 nums.append(field_perc) 

2068 return _ntp.scputimes(*nums) 

2069 

2070 # system-wide usage 

2071 if not percpu: 

2072 if blocking: 

2073 t1 = cpu_times() 

2074 time.sleep(interval) 

2075 else: 

2076 t1 = _last_cpu_times_2.get(tid) or cpu_times() 

2077 _last_cpu_times_2[tid] = cpu_times() 

2078 return calculate(t1, _last_cpu_times_2[tid]) 

2079 # per-cpu usage 

2080 else: 

2081 ret = [] 

2082 if blocking: 

2083 tot1 = cpu_times(percpu=True) 

2084 time.sleep(interval) 

2085 else: 

2086 tot1 = _last_per_cpu_times_2.get(tid) or cpu_times(percpu=True) 

2087 _last_per_cpu_times_2[tid] = cpu_times(percpu=True) 

2088 for t1, t2 in zip(tot1, _last_per_cpu_times_2[tid]): 

2089 ret.append(calculate(t1, t2)) 

2090 return ret 

2091 

2092 

2093def cpu_stats() -> scpustats: 

2094 """Return CPU statistics.""" 

2095 return _psplatform.cpu_stats() 

2096 

2097 

2098if hasattr(_psplatform, "cpu_freq"): 

2099 

2100 def cpu_freq(percpu: bool = False) -> scpufreq | list[scpufreq] | None: 

2101 """Return CPU frequency as a named tuple including current, 

2102 min and max frequency expressed in Mhz. 

2103 

2104 If *percpu* is True and the system supports per-cpu frequency 

2105 retrieval (Linux only) a list of frequencies is returned for 

2106 each CPU. If not a list with one element is returned. 

2107 """ 

2108 ret = _psplatform.cpu_freq() 

2109 if percpu: 

2110 return ret 

2111 else: 

2112 num_cpus = float(len(ret)) 

2113 if num_cpus == 0: 

2114 return None 

2115 elif num_cpus == 1: 

2116 return ret[0] 

2117 else: 

2118 currs, mins, maxs = 0.0, 0.0, 0.0 

2119 set_none = False 

2120 for cpu in ret: 

2121 currs += cpu.current 

2122 # On Linux if /proc/cpuinfo is used min/max are set 

2123 # to None. 

2124 if LINUX and cpu.min is None: 

2125 set_none = True 

2126 continue 

2127 mins += cpu.min 

2128 maxs += cpu.max 

2129 

2130 current = currs / num_cpus 

2131 

2132 if set_none: 

2133 min_ = max_ = None 

2134 else: 

2135 min_ = mins / num_cpus 

2136 max_ = maxs / num_cpus 

2137 

2138 return _ntp.scpufreq(current, min_, max_) 

2139 

2140 __all__.append("cpu_freq") 

2141 

2142 

2143def getloadavg() -> tuple[float, float, float]: 

2144 """Return the average system load over the last 1, 5 and 15 minutes 

2145 as a tuple. On Windows this is emulated by using a Windows API that 

2146 spawns a thread which keeps running in background and updates 

2147 results every 5 seconds, mimicking the UNIX behavior. 

2148 """ 

2149 if hasattr(os, "getloadavg"): 

2150 return os.getloadavg() 

2151 else: 

2152 return _psplatform.getloadavg() 

2153 

2154 

2155# ===================================================================== 

2156# --- system memory related functions 

2157# ===================================================================== 

2158 

2159 

2160def virtual_memory() -> svmem: 

2161 """Return statistics about system memory usage as a named tuple 

2162 including the following fields, expressed in bytes: 

2163 

2164 - total: 

2165 total physical memory available. 

2166 

2167 - available: 

2168 the memory that can be given instantly to processes without the 

2169 system going into swap. 

2170 This is calculated by summing different memory values depending 

2171 on the platform and it is supposed to be used to monitor actual 

2172 memory usage in a cross platform fashion. 

2173 

2174 - percent: 

2175 the percentage usage calculated as (total - available) / total * 100 

2176 

2177 - used: 

2178 memory used, calculated differently depending on the platform and 

2179 designed for informational purposes only: 

2180 macOS: active + wired 

2181 BSD: active + wired + cached 

2182 Linux: total - free 

2183 

2184 - free: 

2185 memory not being used at all (zeroed) that is readily available; 

2186 note that this doesn't reflect the actual memory available 

2187 (use 'available' instead) 

2188 

2189 Platform-specific fields: 

2190 

2191 - active (UNIX): 

2192 memory currently in use or very recently used, and so it is in RAM. 

2193 

2194 - inactive (UNIX): 

2195 memory that is marked as not used. 

2196 

2197 - buffers (BSD, Linux): 

2198 cache for things like file system metadata. 

2199 

2200 - cached (BSD, macOS): 

2201 cache for various things. 

2202 

2203 - wired (macOS, BSD): 

2204 memory that is marked to always stay in RAM. It is never moved to disk. 

2205 

2206 - shared (BSD): 

2207 memory that may be simultaneously accessed by multiple processes. 

2208 

2209 The sum of 'used' and 'available' does not necessarily equal total. 

2210 On Windows 'available' and 'free' are the same. 

2211 """ 

2212 global _TOTAL_PHYMEM 

2213 ret = _psplatform.virtual_memory() 

2214 # cached for later use in Process.memory_percent() 

2215 _TOTAL_PHYMEM = ret.total 

2216 return ret 

2217 

2218 

2219def swap_memory() -> sswap: 

2220 """Return system swap memory statistics as a named tuple including 

2221 the following fields: 

2222 

2223 - total: total swap memory in bytes 

2224 - used: used swap memory in bytes 

2225 - free: free swap memory in bytes 

2226 - percent: the percentage usage 

2227 - sin: no. of bytes the system has swapped in from disk (cumulative) 

2228 - sout: no. of bytes the system has swapped out from disk (cumulative) 

2229 

2230 'sin' and 'sout' on Windows are meaningless and always set to 0. 

2231 """ 

2232 return _psplatform.swap_memory() 

2233 

2234 

2235# ===================================================================== 

2236# --- disks/partitions related functions 

2237# ===================================================================== 

2238 

2239 

2240def disk_usage(path: str) -> sdiskusage: 

2241 """Return disk usage statistics about the given *path* as a 

2242 named tuple including total, used and free space expressed in bytes 

2243 plus the percentage usage. 

2244 """ 

2245 return _psplatform.disk_usage(path) 

2246 

2247 

2248def disk_partitions(all: bool = False) -> list[sdiskpart]: 

2249 """Return mounted partitions as a list of 

2250 (device, mountpoint, fstype, opts) named tuple. 

2251 'opts' field is a raw string separated by commas indicating mount 

2252 options which may vary depending on the platform. 

2253 

2254 If *all* parameter is False return physical devices only and ignore 

2255 all others. 

2256 """ 

2257 return _psplatform.disk_partitions(all) 

2258 

2259 

2260def disk_io_counters( 

2261 perdisk: bool = False, nowrap: bool = True 

2262) -> sdiskio | dict[str, sdiskio]: 

2263 """Return system disk I/O statistics as a named tuple including 

2264 the following fields: 

2265 

2266 - read_count: number of reads 

2267 - write_count: number of writes 

2268 - read_bytes: number of bytes read 

2269 - write_bytes: number of bytes written 

2270 - read_time: time spent reading from disk (in ms) 

2271 - write_time: time spent writing to disk (in ms) 

2272 

2273 Platform specific: 

2274 

2275 - busy_time: (Linux, FreeBSD) time spent doing actual I/Os (in ms) 

2276 - read_merged_count (Linux): number of merged reads 

2277 - write_merged_count (Linux): number of merged writes 

2278 

2279 If *perdisk* is True return the same information for every 

2280 physical disk installed on the system as a dictionary 

2281 with partition names as the keys and the named tuple 

2282 described above as the values. 

2283 

2284 If *nowrap* is True it detects and adjust the numbers which overflow 

2285 and wrap (restart from 0) and add "old value" to "new value" so that 

2286 the returned numbers will always be increasing or remain the same, 

2287 but never decrease. 

2288 "disk_io_counters.cache_clear()" can be used to invalidate the 

2289 cache. 

2290 

2291 On recent Windows versions 'diskperf -y' command may need to be 

2292 executed first otherwise this function won't find any disk. 

2293 """ 

2294 kwargs = dict(perdisk=perdisk) if LINUX else {} 

2295 rawdict = _psplatform.disk_io_counters(**kwargs) 

2296 if not rawdict: 

2297 return {} if perdisk else None 

2298 if nowrap: 

2299 rawdict = _wrap_numbers(rawdict, 'psutil.disk_io_counters') 

2300 if perdisk: 

2301 for disk, fields in rawdict.items(): 

2302 rawdict[disk] = _ntp.sdiskio(*fields) 

2303 return rawdict 

2304 else: 

2305 return _ntp.sdiskio(*(sum(x) for x in zip(*rawdict.values()))) 

2306 

2307 

2308disk_io_counters.cache_clear = functools.partial( 

2309 _wrap_numbers.cache_clear, 'psutil.disk_io_counters' 

2310) 

2311disk_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" 

2312 

2313 

2314# ===================================================================== 

2315# --- network related functions 

2316# ===================================================================== 

2317 

2318 

2319def net_io_counters( 

2320 pernic: bool = False, nowrap: bool = True 

2321) -> snetio | dict[str, snetio] | None: 

2322 """Return network I/O statistics as a named tuple including 

2323 the following fields: 

2324 

2325 - bytes_sent: number of bytes sent 

2326 - bytes_recv: number of bytes received 

2327 - packets_sent: number of packets sent 

2328 - packets_recv: number of packets received 

2329 - errin: total number of errors while receiving 

2330 - errout: total number of errors while sending 

2331 - dropin: total number of incoming packets which were dropped 

2332 - dropout: total number of outgoing packets which were dropped 

2333 (always 0 on macOS and BSD) 

2334 

2335 If *pernic* is True return the same information for every 

2336 network interface installed on the system as a dictionary 

2337 with network interface names as the keys and the named tuple 

2338 described above as the values. 

2339 

2340 If *nowrap* is True it detects and adjust the numbers which overflow 

2341 and wrap (restart from 0) and add "old value" to "new value" so that 

2342 the returned numbers will always be increasing or remain the same, 

2343 but never decrease. 

2344 "net_io_counters.cache_clear()" can be used to invalidate the 

2345 cache. 

2346 """ 

2347 rawdict = _psplatform.net_io_counters() 

2348 if not rawdict: 

2349 return {} if pernic else None 

2350 if nowrap: 

2351 rawdict = _wrap_numbers(rawdict, 'psutil.net_io_counters') 

2352 if pernic: 

2353 for nic, fields in rawdict.items(): 

2354 rawdict[nic] = _ntp.snetio(*fields) 

2355 return rawdict 

2356 else: 

2357 return _ntp.snetio(*[sum(x) for x in zip(*rawdict.values())]) 

2358 

2359 

2360net_io_counters.cache_clear = functools.partial( 

2361 _wrap_numbers.cache_clear, 'psutil.net_io_counters' 

2362) 

2363net_io_counters.cache_clear.__doc__ = "Clears nowrap argument cache" 

2364 

2365 

2366def net_connections(kind: str = 'inet') -> list[sconn]: 

2367 """Return system-wide socket connections as a list of 

2368 (fd, family, type, laddr, raddr, status, pid) named tuples. 

2369 In case of limited privileges 'fd' and 'pid' may be set to -1 

2370 and None respectively. 

2371 The *kind* parameter filters for connections that fit the 

2372 following criteria: 

2373 

2374 +------------+----------------------------------------------------+ 

2375 | Kind Value | Connections using | 

2376 +------------+----------------------------------------------------+ 

2377 | inet | IPv4 and IPv6 | 

2378 | inet4 | IPv4 | 

2379 | inet6 | IPv6 | 

2380 | tcp | TCP | 

2381 | tcp4 | TCP over IPv4 | 

2382 | tcp6 | TCP over IPv6 | 

2383 | udp | UDP | 

2384 | udp4 | UDP over IPv4 | 

2385 | udp6 | UDP over IPv6 | 

2386 | unix | UNIX socket (both UDP and TCP protocols) | 

2387 | all | the sum of all the possible families and protocols | 

2388 +------------+----------------------------------------------------+ 

2389 

2390 On macOS this function requires root privileges. 

2391 """ 

2392 _check_conn_kind(kind) 

2393 return _psplatform.net_connections(kind) 

2394 

2395 

2396def net_if_addrs() -> dict[str, list[snicaddr]]: 

2397 """Return the addresses associated to each NIC (network interface 

2398 card) installed on the system as a dictionary whose keys are the 

2399 NIC names and value is a list of named tuples for each address 

2400 assigned to the NIC. Each named tuple includes 5 fields: 

2401 

2402 - family: can be either socket.AF_INET, socket.AF_INET6 or 

2403 psutil.AF_LINK, which refers to a MAC address. 

2404 - address: is the primary address and it is always set. 

2405 - netmask: and 'broadcast' and 'ptp' may be None. 

2406 - ptp: stands for "point to point" and references the 

2407 destination address on a point to point interface 

2408 (typically a VPN). 

2409 - broadcast: and *ptp* are mutually exclusive. 

2410 

2411 Note: you can have more than one address of the same family 

2412 associated with each interface. 

2413 """ 

2414 rawlist = _psplatform.net_if_addrs() 

2415 rawlist.sort(key=lambda x: x[1]) # sort by family 

2416 ret = collections.defaultdict(list) 

2417 for name, fam, addr, mask, broadcast, ptp in rawlist: 

2418 try: 

2419 fam = socket.AddressFamily(fam) 

2420 except ValueError: 

2421 if WINDOWS and fam == -1: 

2422 fam = _psplatform.AF_LINK 

2423 elif ( 

2424 hasattr(_psplatform, "AF_LINK") and fam == _psplatform.AF_LINK 

2425 ): 

2426 # Linux defines AF_LINK as an alias for AF_PACKET. 

2427 # We re-set the family here so that repr(family) 

2428 # will show AF_LINK rather than AF_PACKET 

2429 fam = _psplatform.AF_LINK 

2430 

2431 if fam == _psplatform.AF_LINK: 

2432 # The underlying C function may return an incomplete MAC 

2433 # address in which case we fill it with null bytes, see: 

2434 # https://github.com/giampaolo/psutil/issues/786 

2435 separator = ":" if POSIX else "-" 

2436 while addr.count(separator) < 5: 

2437 addr += f"{separator}00" 

2438 

2439 nt = _ntp.snicaddr(fam, addr, mask, broadcast, ptp) 

2440 

2441 # On Windows broadcast is None, so we determine it via 

2442 # ipaddress module. 

2443 if WINDOWS and fam in {socket.AF_INET, socket.AF_INET6}: 

2444 try: 

2445 broadcast = _common.broadcast_addr(nt) 

2446 except Exception as err: # noqa: BLE001 

2447 debug(err) 

2448 else: 

2449 if broadcast is not None: 

2450 nt._replace(broadcast=broadcast) 

2451 

2452 ret[name].append(nt) 

2453 

2454 return dict(ret) 

2455 

2456 

2457def net_if_stats() -> dict[str, snicstats]: 

2458 """Return information about each NIC (network interface card) 

2459 installed on the system as a dictionary whose keys are the 

2460 NIC names and value is a named tuple with the following fields: 

2461 

2462 - isup: whether the interface is up (bool) 

2463 - duplex: can be either NIC_DUPLEX_FULL, NIC_DUPLEX_HALF or 

2464 NIC_DUPLEX_UNKNOWN 

2465 - speed: the NIC speed expressed in mega bits (MB); if it can't 

2466 be determined (e.g. 'localhost') it will be set to 0. 

2467 - mtu: the maximum transmission unit expressed in bytes. 

2468 """ 

2469 return _psplatform.net_if_stats() 

2470 

2471 

2472# ===================================================================== 

2473# --- sensors 

2474# ===================================================================== 

2475 

2476 

2477# Linux, macOS 

2478if hasattr(_psplatform, "sensors_temperatures"): 

2479 

2480 def sensors_temperatures( 

2481 fahrenheit: bool = False, 

2482 ) -> dict[str, list[shwtemp]]: 

2483 """Return hardware temperatures. Each entry is a named tuple 

2484 representing a certain hardware sensor (it may be a CPU, an 

2485 hard disk or something else, depending on the OS and its 

2486 configuration). 

2487 All temperatures are expressed in celsius unless *fahrenheit* 

2488 is set to True. 

2489 """ 

2490 

2491 def convert(n): 

2492 if n is not None: 

2493 return (float(n) * 9 / 5) + 32 if fahrenheit else n 

2494 

2495 ret = collections.defaultdict(list) 

2496 rawdict = _psplatform.sensors_temperatures() 

2497 

2498 for name, values in rawdict.items(): 

2499 while values: 

2500 label, current, high, critical = values.pop(0) 

2501 current = convert(current) 

2502 high = convert(high) 

2503 critical = convert(critical) 

2504 

2505 if high and not critical: 

2506 critical = high 

2507 elif critical and not high: 

2508 high = critical 

2509 

2510 ret[name].append(_ntp.shwtemp(label, current, high, critical)) 

2511 

2512 return dict(ret) 

2513 

2514 __all__.append("sensors_temperatures") 

2515 

2516 

2517# Linux 

2518if hasattr(_psplatform, "sensors_fans"): 

2519 

2520 def sensors_fans() -> dict[str, list[sfan]]: 

2521 """Return fans speed. Each entry is a named tuple 

2522 representing a certain hardware sensor. 

2523 All speed are expressed in RPM (rounds per minute). 

2524 """ 

2525 return _psplatform.sensors_fans() 

2526 

2527 __all__.append("sensors_fans") 

2528 

2529 

2530# Linux, Windows, FreeBSD, macOS 

2531if hasattr(_psplatform, "sensors_battery"): 

2532 

2533 def sensors_battery() -> sbattery | None: 

2534 """Return battery information. If no battery is installed 

2535 returns None. 

2536 

2537 - percent: battery power left as a percentage. 

2538 - secsleft: a rough approximation of how many seconds are left 

2539 before the battery runs out of power. May be 

2540 POWER_TIME_UNLIMITED or POWER_TIME_UNLIMITED. 

2541 - power_plugged: True if the AC power cable is connected. 

2542 """ 

2543 return _psplatform.sensors_battery() 

2544 

2545 __all__.append("sensors_battery") 

2546 

2547 

2548# ===================================================================== 

2549# --- other system related functions 

2550# ===================================================================== 

2551 

2552 

2553def boot_time() -> float: 

2554 """Return the system boot time expressed in seconds since the epoch 

2555 (seconds since January 1, 1970, at midnight UTC). The returned 

2556 value is based on the system clock, which means it may be affected 

2557 by changes such as manual adjustments or time synchronization (e.g. 

2558 NTP). 

2559 """ 

2560 return _psplatform.boot_time() 

2561 

2562 

2563def users() -> list[suser]: 

2564 """Return users currently connected on the system as a list of 

2565 named tuples including the following fields. 

2566 

2567 - user: the name of the user 

2568 - terminal: the tty or pseudo-tty associated with the user, if any. 

2569 - host: the host name associated with the entry, if any. 

2570 - started: the creation time as a floating point number expressed in 

2571 seconds since the epoch. 

2572 """ 

2573 return _psplatform.users() 

2574 

2575 

2576# ===================================================================== 

2577# --- Windows services 

2578# ===================================================================== 

2579 

2580 

2581if WINDOWS: 

2582 

2583 def win_service_iter() -> Iterator[WindowsService]: 

2584 """Return a generator yielding a WindowsService instance for all 

2585 Windows services installed. 

2586 """ 

2587 return _psplatform.win_service_iter() 

2588 

2589 def win_service_get(name) -> WindowsService: 

2590 """Get a Windows service by *name*. 

2591 Raise NoSuchProcess if no service with such name exists. 

2592 """ 

2593 return _psplatform.win_service_get(name) 

2594 

2595 

2596# ===================================================================== 

2597# --- malloc / heap 

2598# ===================================================================== 

2599 

2600 

2601# Linux + glibc, Windows, macOS, FreeBSD, NetBSD 

2602if hasattr(_psplatform, "heap_info"): 

2603 

2604 def heap_info() -> pheap: 

2605 """Return low-level heap statistics from the C heap allocator 

2606 (glibc). 

2607 

2608 - `heap_used`: the total number of bytes allocated via 

2609 malloc/free. These are typically allocations smaller than 

2610 MMAP_THRESHOLD. 

2611 

2612 - `mmap_used`: the total number of bytes allocated via `mmap()` 

2613 or via large ``malloc()`` allocations. 

2614 

2615 - `heap_count` (Windows only): number of private heaps created 

2616 via `HeapCreate()`. 

2617 """ 

2618 return _ntp.pheap(*_psplatform.heap_info()) 

2619 

2620 def heap_trim() -> None: 

2621 """Request that the underlying allocator free any unused memory 

2622 it's holding in the heap (typically small `malloc()` 

2623 allocations). 

2624 

2625 In practice, modern allocators rarely comply, so this is not a 

2626 general-purpose memory-reduction tool and won't meaningfully 

2627 shrink RSS in real programs. Its primary value is in **leak 

2628 detection tools**. 

2629 

2630 Calling `heap_trim()` before taking measurements helps reduce 

2631 allocator noise, giving you a cleaner baseline so that changes 

2632 in `heap_used` come from the code you're testing, not from 

2633 internal allocator caching or fragmentation. Its effectiveness 

2634 depends on allocator behavior and fragmentation patterns. 

2635 """ 

2636 _psplatform.heap_trim() 

2637 

2638 __all__.append("heap_info") 

2639 __all__.append("heap_trim") 

2640 

2641 

2642# ===================================================================== 

2643 

2644 

2645def _set_debug(value): 

2646 """Enable or disable PSUTIL_DEBUG option, which prints debugging 

2647 messages to stderr. 

2648 """ 

2649 import psutil._common 

2650 

2651 psutil._common.PSUTIL_DEBUG = bool(value) 

2652 _psplatform.cext.set_debug(bool(value)) 

2653 

2654 

2655del memoize_when_activated