Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/psutil-8.0.0-py3.11-linux-x86_64.egg/psutil/_pslinux.py: 19%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

1218 statements  

1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. 

2# Use of this source code is governed by a BSD-style license that can be 

3# found in the LICENSE file. 

4 

5"""Linux platform implementation.""" 

6 

7import base64 

8import collections 

9import enum 

10import errno 

11import functools 

12import glob 

13import os 

14import re 

15import resource 

16import socket 

17import struct 

18import sys 

19import warnings 

20from collections import defaultdict 

21 

22from . import _ntuples as ntp 

23from . import _psposix 

24from . import _psutil_linux as cext 

25from ._common import ENCODING 

26from ._common import AccessDenied 

27from ._common import NoSuchProcess 

28from ._common import ZombieProcess 

29from ._common import bcat 

30from ._common import cat 

31from ._common import debug 

32from ._common import decode 

33from ._common import get_procfs_path 

34from ._common import isfile_strict 

35from ._common import memoize_when_activated 

36from ._common import open_binary 

37from ._common import open_text 

38from ._common import parse_environ_block 

39from ._common import path_exists_strict 

40from ._common import socktype_to_enum 

41from ._common import supports_ipv6 

42from ._common import usage_percent 

43from ._enums import BatteryTime 

44from ._enums import ConnectionStatus 

45from ._enums import NicDuplex 

46from ._enums import ProcessIOPriority 

47from ._enums import ProcessStatus 

48 

49__extra__all__ = ['PROCFS_PATH'] 

50 

51 

52# ===================================================================== 

53# --- globals 

54# ===================================================================== 

55 

56 

57POWER_SUPPLY_PATH = "/sys/class/power_supply" 

58HAS_PROC_SMAPS = os.path.exists(f"/proc/{os.getpid()}/smaps") 

59HAS_PROC_SMAPS_ROLLUP = os.path.exists(f"/proc/{os.getpid()}/smaps_rollup") 

60HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get") 

61HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get") 

62 

63# Number of clock ticks per second 

64CLOCK_TICKS = os.sysconf("SC_CLK_TCK") 

65PAGESIZE = cext.getpagesize() 

66LITTLE_ENDIAN = sys.byteorder == 'little' 

67UNSET = object() 

68 

69# "man iostat" states that sectors are equivalent with blocks and have 

70# a size of 512 bytes. Despite this value can be queried at runtime 

71# via /sys/block/{DISK}/queue/hw_sector_size and results may vary 

72# between 1k, 2k, or 4k... 512 appears to be a magic constant used 

73# throughout Linux source code: 

74# * https://stackoverflow.com/a/38136179/376587 

75# * https://lists.gt.net/linux/kernel/2241060 

76# * https://github.com/giampaolo/psutil/issues/1305 

77# * https://github.com/torvalds/linux/blob/ 

78# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99 

79# * https://lkml.org/lkml/2015/8/17/234 

80DISK_SECTOR_SIZE = 512 

81 

82AddressFamily = enum.IntEnum( 

83 'AddressFamily', {'AF_LINK': int(socket.AF_PACKET)} 

84) 

85AF_LINK = AddressFamily.AF_LINK 

86 

87 

88# See: 

89# https://github.com/torvalds/linux/blame/master/fs/proc/array.c 

90# ...and (TASK_* constants): 

91# https://github.com/torvalds/linux/blob/master/include/linux/sched.h 

92PROC_STATUSES = { 

93 "R": ProcessStatus.STATUS_RUNNING, 

94 "S": ProcessStatus.STATUS_SLEEPING, 

95 "D": ProcessStatus.STATUS_DISK_SLEEP, 

96 "T": ProcessStatus.STATUS_STOPPED, 

97 "t": ProcessStatus.STATUS_TRACING_STOP, 

98 "Z": ProcessStatus.STATUS_ZOMBIE, 

99 "X": ProcessStatus.STATUS_DEAD, 

100 "x": ProcessStatus.STATUS_DEAD, 

101 "K": ProcessStatus.STATUS_WAKE_KILL, 

102 "W": ProcessStatus.STATUS_WAKING, 

103 "I": ProcessStatus.STATUS_IDLE, 

104 "P": ProcessStatus.STATUS_PARKED, 

105} 

106 

107# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h 

108TCP_STATUSES = { 

109 "01": ConnectionStatus.CONN_ESTABLISHED, 

110 "02": ConnectionStatus.CONN_SYN_SENT, 

111 "03": ConnectionStatus.CONN_SYN_RECV, 

112 "04": ConnectionStatus.CONN_FIN_WAIT1, 

113 "05": ConnectionStatus.CONN_FIN_WAIT2, 

114 "06": ConnectionStatus.CONN_TIME_WAIT, 

115 "07": ConnectionStatus.CONN_CLOSE, 

116 "08": ConnectionStatus.CONN_CLOSE_WAIT, 

117 "09": ConnectionStatus.CONN_LAST_ACK, 

118 "0A": ConnectionStatus.CONN_LISTEN, 

119 "0B": ConnectionStatus.CONN_CLOSING, 

120} 

121 

122 

123# ===================================================================== 

124# --- utils 

125# ===================================================================== 

126 

127 

128def readlink(path): 

129 """Wrapper around os.readlink().""" 

130 assert isinstance(path, str), path 

131 path = os.readlink(path) 

132 # readlink() might return paths containing null bytes ('\x00') 

133 # resulting in "TypeError: must be encoded string without NULL 

134 # bytes, not str" errors when the string is passed to other 

135 # fs-related functions (os.*, open(), ...). 

136 # Apparently everything after '\x00' is garbage (we can have 

137 # ' (deleted)', 'new' and possibly others), see: 

138 # https://github.com/giampaolo/psutil/issues/717 

139 path = path.split('\x00')[0] 

140 # Certain paths have ' (deleted)' appended. Usually this is 

141 # bogus as the file actually exists. Even if it doesn't we 

142 # don't care. 

143 if path.endswith(' (deleted)') and not path_exists_strict(path): 

144 path = path[:-10] 

145 return path 

146 

147 

148def file_flags_to_mode(flags): 

149 """Convert file's open() flags into a readable string. 

150 Used by Process.open_files(). 

151 """ 

152 modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'} 

153 mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)] 

154 if flags & os.O_APPEND: 

155 mode = mode.replace('w', 'a', 1) 

156 mode = mode.replace('w+', 'r+') 

157 # possible values: r, w, a, r+, a+ 

158 return mode 

159 

160 

161def is_storage_device(name): 

162 """Return True if the given name refers to a root device (e.g. 

163 "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1", 

164 "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram") 

165 return True. 

166 """ 

167 # Re-adapted from iostat source code, see: 

168 # https://github.com/sysstat/sysstat/blob/97912938cd476/common.c#L208 

169 # Some devices may have a slash in their name (e.g. cciss/c0d0...). 

170 name = name.replace('/', '!') 

171 including_virtual = True 

172 if including_virtual: 

173 path = f"/sys/block/{name}" 

174 else: 

175 path = f"/sys/block/{name}/device" 

176 return os.access(path, os.F_OK) 

177 

178 

179# ===================================================================== 

180# --- system memory 

181# ===================================================================== 

182 

183 

184def calculate_avail_vmem(mems): 

185 """Fallback for kernels < 3.14 where /proc/meminfo does not provide 

186 "MemAvailable", see: 

187 https://blog.famzah.net/2014/09/24/. 

188 

189 This code reimplements the algorithm outlined here: 

190 https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ 

191 commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 

192 

193 We use this function also when "MemAvailable" returns 0 (possibly a 

194 kernel bug, see: https://github.com/giampaolo/psutil/issues/1915). 

195 In that case this routine matches "free" CLI tool result ("available" 

196 column). 

197 

198 XXX: on recent kernels this calculation may differ by ~1.5% compared 

199 to "MemAvailable:", as it's calculated slightly differently. 

200 It is still way more realistic than doing (free + cached) though. 

201 See: 

202 * https://gitlab.com/procps-ng/procps/issues/42 

203 * https://github.com/famzah/linux-memavailable-procfs/issues/2 

204 """ 

205 # Note about "fallback" value. According to: 

206 # https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/commit/?id=34e431b0ae398 

207 # ...long ago "available" memory was calculated as (free + cached), 

208 # We use fallback when one of these is missing from /proc/meminfo: 

209 # "Active(file)": introduced in 2.6.28 / Dec 2008 

210 # "Inactive(file)": introduced in 2.6.28 / Dec 2008 

211 # "SReclaimable": introduced in 2.6.19 / Nov 2006 

212 # /proc/zoneinfo: introduced in 2.6.13 / Aug 2005 

213 free = mems[b'MemFree:'] 

214 fallback = free + mems.get(b"Cached:", 0) 

215 try: 

216 lru_active_file = mems[b'Active(file):'] 

217 lru_inactive_file = mems[b'Inactive(file):'] 

218 slab_reclaimable = mems[b'SReclaimable:'] 

219 except KeyError as err: 

220 debug( 

221 f"{err.args[0]} is missing from /proc/meminfo; using an" 

222 " approximation for calculating available memory" 

223 ) 

224 return fallback 

225 try: 

226 f = open_binary(f"{get_procfs_path()}/zoneinfo") 

227 except OSError: 

228 return fallback # kernel 2.6.13 

229 

230 watermark_low = 0 

231 with f: 

232 for line in f: 

233 line = line.strip() 

234 if line.startswith(b'low'): 

235 watermark_low += int(line.split()[1]) 

236 watermark_low *= PAGESIZE 

237 

238 avail = free - watermark_low 

239 pagecache = lru_active_file + lru_inactive_file 

240 pagecache -= min(pagecache / 2, watermark_low) 

241 avail += pagecache 

242 avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low) 

243 return int(avail) 

244 

245 

246def virtual_memory(): 

247 """Report virtual memory stats. 

248 This implementation mimics procps-ng-3.3.12, aka "free" CLI tool: 

249 https://gitlab.com/procps-ng/procps/blob/ 

250 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L778-791 

251 The returned values are supposed to match both "free" and "vmstat -s" 

252 CLI tools. 

253 """ 

254 missing_fields = [] 

255 mems = {} 

256 with open_binary(f"{get_procfs_path()}/meminfo") as f: 

257 for line in f: 

258 fields = line.split() 

259 mems[fields[0]] = int(fields[1]) * 1024 

260 

261 # /proc doc states that the available fields in /proc/meminfo vary 

262 # by architecture and compile options, but these 3 values are also 

263 # returned by sysinfo(2); as such we assume they are always there. 

264 total = mems[b'MemTotal:'] 

265 free = mems[b'MemFree:'] 

266 try: 

267 buffers = mems[b'Buffers:'] 

268 except KeyError: 

269 # https://github.com/giampaolo/psutil/issues/1010 

270 buffers = 0 

271 missing_fields.append('buffers') 

272 try: 

273 cached = mems[b"Cached:"] 

274 except KeyError: 

275 cached = 0 

276 missing_fields.append('cached') 

277 else: 

278 # "free" cmdline utility sums reclaimable to cached. 

279 # Older versions of procps used to add slab memory instead. 

280 # This got changed in: 

281 # https://gitlab.com/procps-ng/procps/-/commit/05d751c4f 

282 cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19 

283 

284 try: 

285 shared = mems[b'Shmem:'] # since kernel 2.6.32 

286 except KeyError: 

287 try: 

288 shared = mems[b'MemShared:'] # kernels 2.4 

289 except KeyError: 

290 shared = 0 

291 missing_fields.append('shared') 

292 

293 try: 

294 active = mems[b"Active:"] 

295 except KeyError: 

296 active = 0 

297 missing_fields.append('active') 

298 

299 try: 

300 inactive = mems[b"Inactive:"] 

301 except KeyError: 

302 try: 

303 inactive = ( 

304 mems[b"Inact_dirty:"] 

305 + mems[b"Inact_clean:"] 

306 + mems[b"Inact_laundry:"] 

307 ) 

308 except KeyError: 

309 inactive = 0 

310 missing_fields.append('inactive') 

311 

312 try: 

313 slab = mems[b"Slab:"] 

314 except KeyError: 

315 slab = 0 

316 

317 # - starting from 4.4.0 we match free's "available" column. 

318 # Before 4.4.0 we calculated it as (free + buffers + cached) 

319 # which matched htop. 

320 # - free and htop available memory differs as per: 

321 # http://askubuntu.com/a/369589 

322 # http://unix.stackexchange.com/a/65852/168884 

323 # - MemAvailable has been introduced in kernel 3.14 

324 try: 

325 avail = mems[b'MemAvailable:'] 

326 except KeyError: 

327 avail = calculate_avail_vmem(mems) 

328 else: 

329 if avail == 0: 

330 # Yes, it can happen (probably a kernel bug): 

331 # https://github.com/giampaolo/psutil/issues/1915 

332 # In this case "free" CLI tool makes an estimate. We do the same, 

333 # and it matches "free" CLI tool. 

334 avail = calculate_avail_vmem(mems) 

335 

336 if avail < 0: 

337 avail = 0 

338 missing_fields.append('available') 

339 elif avail > total: 

340 # If avail is greater than total or our calculation overflows, 

341 # that's symptomatic of running within a LCX container where such 

342 # values will be dramatically distorted over those of the host. 

343 # https://gitlab.com/procps-ng/procps/blob/24fd2605c51fcc/proc/sysinfo.c#L764 

344 avail = free 

345 

346 used = total - avail 

347 

348 percent = usage_percent((total - avail), total, round_=1) 

349 

350 # Warn about missing metrics which are set to 0. 

351 if missing_fields: 

352 msg = "{} memory stats couldn't be determined and {} set to 0".format( 

353 ", ".join(missing_fields), 

354 "was" if len(missing_fields) == 1 else "were", 

355 ) 

356 warnings.warn(msg, RuntimeWarning, stacklevel=2) 

357 

358 return ntp.svmem( 

359 total, 

360 avail, 

361 percent, 

362 used, 

363 free, 

364 active, 

365 inactive, 

366 buffers, 

367 cached, 

368 shared, 

369 slab, 

370 ) 

371 

372 

373def swap_memory(): 

374 """Return swap memory metrics.""" 

375 mems = {} 

376 with open_binary(f"{get_procfs_path()}/meminfo") as f: 

377 for line in f: 

378 fields = line.split() 

379 mems[fields[0]] = int(fields[1]) * 1024 

380 # We prefer /proc/meminfo over sysinfo() syscall so that 

381 # psutil.PROCFS_PATH can be used in order to allow retrieval 

382 # for linux containers, see: 

383 # https://github.com/giampaolo/psutil/issues/1015 

384 try: 

385 total = mems[b'SwapTotal:'] 

386 free = mems[b'SwapFree:'] 

387 except KeyError: 

388 _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() 

389 total *= unit_multiplier 

390 free *= unit_multiplier 

391 

392 used = total - free 

393 percent = usage_percent(used, total, round_=1) 

394 # get pgin/pgouts 

395 try: 

396 f = open_binary(f"{get_procfs_path()}/vmstat") 

397 except OSError as err: 

398 # see https://github.com/giampaolo/psutil/issues/722 

399 msg = ( 

400 "'sin' and 'sout' swap memory stats couldn't " 

401 f"be determined and were set to 0 ({err})" 

402 ) 

403 warnings.warn(msg, RuntimeWarning, stacklevel=2) 

404 sin = sout = 0 

405 else: 

406 with f: 

407 sin = sout = None 

408 for line in f: 

409 # values are expressed in 4 kilo bytes, we want 

410 # bytes instead 

411 if line.startswith(b'pswpin'): 

412 sin = int(line.split(b' ')[1]) * 4 * 1024 

413 elif line.startswith(b'pswpout'): 

414 sout = int(line.split(b' ')[1]) * 4 * 1024 

415 if sin is not None and sout is not None: 

416 break 

417 else: 

418 # we might get here when dealing with exotic Linux 

419 # flavors, see: 

420 # https://github.com/giampaolo/psutil/issues/313 

421 msg = "'sin' and 'sout' swap memory stats couldn't " 

422 msg += "be determined and were set to 0" 

423 warnings.warn(msg, RuntimeWarning, stacklevel=2) 

424 sin = sout = 0 

425 return ntp.sswap(total, used, free, percent, sin, sout) 

426 

427 

428# malloc / heap functions; require glibc 

429if hasattr(cext, "heap_info"): 

430 heap_info = cext.heap_info 

431 heap_trim = cext.heap_trim 

432 

433 

434# ===================================================================== 

435# --- CPU 

436# ===================================================================== 

437 

438 

439def cpu_times(): 

440 """Return a named tuple representing system-wide CPU times.""" 

441 

442 def lsget(lst, idx, field_name): 

443 try: 

444 return lst[idx] 

445 except IndexError: 

446 debug(f"can't get {field_name} CPU time; set it to 0") 

447 return 0 

448 

449 procfs_path = get_procfs_path() 

450 with open_binary(f"{procfs_path}/stat") as f: 

451 values = f.readline().split() 

452 nfields = len(ntp.scputimes._fields) 

453 raw = [float(x) / CLOCK_TICKS for x in values[1 : nfields + 1]] 

454 user, nice, system, idle = raw[:4] 

455 return ntp.scputimes( 

456 user, 

457 system, 

458 idle, 

459 nice, 

460 lsget(raw, 4, "iowait"), # Linux >= 2.5.41 

461 lsget(raw, 5, "irq"), # Linux >= 2.6.0 

462 lsget(raw, 6, "softirq"), # Linux >= 2.6.0 

463 lsget(raw, 7, "steal"), # Linux >= 2.6.11 

464 lsget(raw, 8, "guest"), # Linux >= 2.6.24 

465 lsget(raw, 9, "guest_nice"), # Linux >= 2.6.33 

466 ) 

467 

468 

469def per_cpu_times(): 

470 """Return a list of named tuples representing the CPU times 

471 for every CPU available on the system. 

472 """ 

473 procfs_path = get_procfs_path() 

474 cpus = [] 

475 nfields = len(ntp.scputimes._fields) 

476 with open_binary(f"{procfs_path}/stat") as f: 

477 # get rid of the first line which refers to system wide CPU stats 

478 f.readline() 

479 for line in f: 

480 if line.startswith(b'cpu'): 

481 values = line.split() 

482 raw = [float(x) / CLOCK_TICKS for x in values[1 : nfields + 1]] 

483 user, nice, system, idle = raw[0], raw[1], raw[2], raw[3] 

484 entry = ntp.scputimes(user, system, idle, nice, *raw[4:]) 

485 cpus.append(entry) 

486 return cpus 

487 

488 

489def cpu_count_logical(): 

490 """Return the number of logical CPUs in the system.""" 

491 try: 

492 return os.sysconf("SC_NPROCESSORS_ONLN") 

493 except ValueError: 

494 # as a second fallback we try to parse /proc/cpuinfo 

495 num = 0 

496 with open_binary(f"{get_procfs_path()}/cpuinfo") as f: 

497 for line in f: 

498 if line.lower().startswith(b'processor'): 

499 num += 1 

500 

501 # unknown format (e.g. amrel/sparc architectures), see: 

502 # https://github.com/giampaolo/psutil/issues/200 

503 # try to parse /proc/stat as a last resort 

504 if num == 0: 

505 search = re.compile(r'cpu\d') 

506 with open_text(f"{get_procfs_path()}/stat") as f: 

507 for line in f: 

508 line = line.split(' ')[0] 

509 if search.match(line): 

510 num += 1 

511 

512 if num == 0: 

513 # mimic os.cpu_count() 

514 return None 

515 return num 

516 

517 

518def cpu_count_cores(): 

519 """Return the number of CPU cores in the system.""" 

520 # Method #1 

521 ls = set() 

522 # These 2 files are the same but */core_cpus_list is newer while 

523 # */thread_siblings_list is deprecated and may disappear in the future. 

524 # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst 

525 # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964 

526 # https://lkml.org/lkml/2019/2/26/41 

527 p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list" 

528 p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list" 

529 for path in glob.glob(p1) or glob.glob(p2): 

530 with open_binary(path) as f: 

531 ls.add(f.read().strip()) 

532 result = len(ls) 

533 if result != 0: 

534 return result 

535 

536 # Method #2 

537 mapping = {} 

538 current_info = {} 

539 with open_binary(f"{get_procfs_path()}/cpuinfo") as f: 

540 for line in f: 

541 line = line.strip().lower() 

542 if not line: 

543 # new section 

544 try: 

545 mapping[current_info[b'physical id']] = current_info[ 

546 b'cpu cores' 

547 ] 

548 except KeyError: 

549 pass 

550 current_info = {} 

551 elif line.startswith((b'physical id', b'cpu cores')): 

552 # ongoing section 

553 key, value = line.split(b':', 1) 

554 current_info[key.strip()] = int(value) 

555 

556 result = sum(mapping.values()) 

557 return result or None # mimic os.cpu_count() 

558 

559 

560def cpu_stats(): 

561 """Return various CPU stats as a named tuple.""" 

562 with open_binary(f"{get_procfs_path()}/stat") as f: 

563 ctx_switches = None 

564 interrupts = None 

565 soft_interrupts = None 

566 for line in f: 

567 if line.startswith(b'ctxt'): 

568 ctx_switches = int(line.split()[1]) 

569 elif line.startswith(b'intr'): 

570 interrupts = int(line.split()[1]) 

571 elif line.startswith(b'softirq'): 

572 soft_interrupts = int(line.split()[1]) 

573 if ( 

574 ctx_switches is not None 

575 and soft_interrupts is not None 

576 and interrupts is not None 

577 ): 

578 break 

579 syscalls = 0 

580 return ntp.scpustats(ctx_switches, interrupts, soft_interrupts, syscalls) 

581 

582 

583def _cpu_get_cpuinfo_freq(): 

584 """Return current CPU frequency from cpuinfo if available.""" 

585 with open_binary(f"{get_procfs_path()}/cpuinfo") as f: 

586 return [ 

587 float(line.split(b':', 1)[1]) 

588 for line in f 

589 if line.lower().startswith(b'cpu mhz') 

590 ] 

591 

592 

593if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or os.path.exists( 

594 "/sys/devices/system/cpu/cpu0/cpufreq" 

595): 

596 

597 def cpu_freq(): 

598 """Return frequency metrics for all CPUs. 

599 Contrarily to other OSes, Linux updates these values in 

600 real-time. 

601 """ 

602 cpuinfo_freqs = _cpu_get_cpuinfo_freq() 

603 paths = glob.glob( 

604 "/sys/devices/system/cpu/cpufreq/policy[0-9]*" 

605 ) or glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq") 

606 paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group())) 

607 ret = [] 

608 pjoin = os.path.join 

609 for i, path in enumerate(paths): 

610 if len(paths) == len(cpuinfo_freqs): 

611 # take cached value from cpuinfo if available, see: 

612 # https://github.com/giampaolo/psutil/issues/1851 

613 curr = cpuinfo_freqs[i] * 1000 

614 else: 

615 curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None) 

616 if curr is None: 

617 # Likely an old RedHat, see: 

618 # https://github.com/giampaolo/psutil/issues/1071 

619 curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None) 

620 if curr is None: 

621 online_path = f"/sys/devices/system/cpu/cpu{i}/online" 

622 # if cpu core is offline, set to all zeroes 

623 if cat(online_path, fallback=None) == "0\n": 

624 ret.append(ntp.scpufreq(0.0, 0.0, 0.0)) 

625 continue 

626 msg = "can't find current frequency file" 

627 raise NotImplementedError(msg) 

628 curr = int(curr) / 1000 

629 max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000 

630 min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000 

631 ret.append(ntp.scpufreq(curr, min_, max_)) 

632 return ret 

633 

634else: 

635 

636 def cpu_freq(): 

637 """Alternate implementation using /proc/cpuinfo. 

638 min and max frequencies are not available and are set to None. 

639 """ 

640 return [ntp.scpufreq(x, 0.0, 0.0) for x in _cpu_get_cpuinfo_freq()] 

641 

642 

643# ===================================================================== 

644# --- network 

645# ===================================================================== 

646 

647 

648net_if_addrs = cext.net_if_addrs 

649 

650 

651class _Ipv6UnsupportedError(Exception): 

652 pass 

653 

654 

655class NetConnections: 

656 """A wrapper on top of /proc/net/* files, retrieving per-process 

657 and system-wide open connections (TCP, UDP, UNIX) similarly to 

658 "netstat -an". 

659 

660 Note: in case of UNIX sockets we're only able to determine the 

661 local endpoint/path, not the one it's connected to. 

662 According to [1] it would be possible but not easily. 

663 

664 [1] http://serverfault.com/a/417946 

665 """ 

666 

667 def __init__(self): 

668 # The string represents the basename of the corresponding 

669 # /proc/net/{proto_name} file. 

670 tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM) 

671 tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM) 

672 udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM) 

673 udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM) 

674 unix = ("unix", socket.AF_UNIX, None) 

675 self.tmap = { 

676 "all": (tcp4, tcp6, udp4, udp6, unix), 

677 "tcp": (tcp4, tcp6), 

678 "tcp4": (tcp4,), 

679 "tcp6": (tcp6,), 

680 "udp": (udp4, udp6), 

681 "udp4": (udp4,), 

682 "udp6": (udp6,), 

683 "unix": (unix,), 

684 "inet": (tcp4, tcp6, udp4, udp6), 

685 "inet4": (tcp4, udp4), 

686 "inet6": (tcp6, udp6), 

687 } 

688 self._procfs_path = None 

689 

690 def get_proc_inodes(self, pid): 

691 inodes = defaultdict(list) 

692 for fd in os.listdir(f"{self._procfs_path}/{pid}/fd"): 

693 try: 

694 inode = readlink(f"{self._procfs_path}/{pid}/fd/{fd}") 

695 except (FileNotFoundError, ProcessLookupError): 

696 # ENOENT == file which is gone in the meantime; 

697 # os.stat(f"/proc/{self.pid}") will be done later 

698 # to force NSP (if it's the case) 

699 continue 

700 except OSError as err: 

701 if err.errno == errno.EINVAL: 

702 # not a link 

703 continue 

704 if err.errno == errno.ENAMETOOLONG: 

705 # file name too long 

706 debug(err) 

707 continue 

708 raise 

709 else: 

710 if inode.startswith('socket:['): 

711 # the process is using a socket 

712 inode = inode[8:][:-1] 

713 inodes[inode].append((pid, int(fd))) 

714 return inodes 

715 

716 def get_all_inodes(self): 

717 inodes = {} 

718 for pid in pids(): 

719 try: 

720 inodes.update(self.get_proc_inodes(pid)) 

721 except (FileNotFoundError, ProcessLookupError, PermissionError): 

722 # os.listdir() is gonna raise a lot of access denied 

723 # exceptions in case of unprivileged user; that's fine 

724 # as we'll just end up returning a connection with PID 

725 # and fd set to None anyway. 

726 # Both netstat -an and lsof does the same so it's 

727 # unlikely we can do any better. 

728 # ENOENT just means a PID disappeared on us. 

729 continue 

730 return inodes 

731 

732 @staticmethod 

733 def decode_address(addr, family): 

734 """Accept an "ip:port" address as displayed in /proc/net/* 

735 and convert it into a human readable form, like: 

736 

737 "0500000A:0016" -> ("10.0.0.5", 22) 

738 "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521) 

739 

740 The IP address portion is a little or big endian four-byte 

741 hexadecimal number; that is, the least significant byte is listed 

742 first, so we need to reverse the order of the bytes to convert it 

743 to an IP address. 

744 The port is represented as a two-byte hexadecimal number. 

745 

746 Reference: 

747 http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html 

748 """ 

749 ip, port = addr.split(':') 

750 port = int(port, 16) 

751 # this usually refers to a local socket in listen mode with 

752 # no end-points connected 

753 if not port: 

754 return () 

755 ip = ip.encode('ascii') 

756 if family == socket.AF_INET: 

757 # see: https://github.com/giampaolo/psutil/issues/201 

758 if LITTLE_ENDIAN: 

759 ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1]) 

760 else: 

761 ip = socket.inet_ntop(family, base64.b16decode(ip)) 

762 else: # IPv6 

763 ip = base64.b16decode(ip) 

764 try: 

765 # see: https://github.com/giampaolo/psutil/issues/201 

766 if LITTLE_ENDIAN: 

767 ip = socket.inet_ntop( 

768 socket.AF_INET6, 

769 struct.pack('>4I', *struct.unpack('<4I', ip)), 

770 ) 

771 else: 

772 ip = socket.inet_ntop( 

773 socket.AF_INET6, 

774 struct.pack('<4I', *struct.unpack('<4I', ip)), 

775 ) 

776 except ValueError: 

777 # see: https://github.com/giampaolo/psutil/issues/623 

778 if not supports_ipv6(): 

779 raise _Ipv6UnsupportedError from None 

780 raise 

781 return ntp.addr(ip, port) 

782 

783 @staticmethod 

784 def process_inet(file, family, type_, inodes, filter_pid=None): 

785 """Parse /proc/net/tcp* and /proc/net/udp* files.""" 

786 if file.endswith('6') and not os.path.exists(file): 

787 # IPv6 not supported 

788 return 

789 with open_text(file) as f: 

790 f.readline() # skip the first line 

791 for lineno, line in enumerate(f, 1): 

792 try: 

793 _, laddr, raddr, status, _, _, _, _, _, inode = ( 

794 line.split()[:10] 

795 ) 

796 except ValueError: 

797 msg = ( 

798 f"error while parsing {file}; malformed line" 

799 f" {lineno} {line!r}" 

800 ) 

801 raise RuntimeError(msg) from None 

802 if inode in inodes: 

803 # # We assume inet sockets are unique, so we error 

804 # # out if there are multiple references to the 

805 # # same inode. We won't do this for UNIX sockets. 

806 # if len(inodes[inode]) > 1 and family != socket.AF_UNIX: 

807 # raise ValueError("ambiguous inode with multiple " 

808 # "PIDs references") 

809 pid, fd = inodes[inode][0] 

810 else: 

811 pid, fd = None, -1 

812 if filter_pid is not None and filter_pid != pid: 

813 continue 

814 else: 

815 if type_ == socket.SOCK_STREAM: 

816 status = TCP_STATUSES[status] 

817 else: 

818 status = ConnectionStatus.CONN_NONE 

819 try: 

820 laddr = NetConnections.decode_address(laddr, family) 

821 raddr = NetConnections.decode_address(raddr, family) 

822 except _Ipv6UnsupportedError: 

823 continue 

824 yield (fd, family, type_, laddr, raddr, status, pid) 

825 

826 @staticmethod 

827 def process_unix(file, family, inodes, filter_pid=None): 

828 """Parse /proc/net/unix files.""" 

829 with open_text(file) as f: 

830 f.readline() # skip the first line 

831 for line in f: 

832 tokens = line.split() 

833 try: 

834 _, _, _, _, type_, _, inode = tokens[0:7] 

835 except ValueError: 

836 if ' ' not in line: 

837 # see: https://github.com/giampaolo/psutil/issues/766 

838 continue 

839 msg = ( 

840 f"error while parsing {file}; malformed line {line!r}" 

841 ) 

842 raise RuntimeError(msg) # noqa: B904 

843 if inode in inodes: # noqa: SIM108 

844 # With UNIX sockets we can have a single inode 

845 # referencing many file descriptors. 

846 pairs = inodes[inode] 

847 else: 

848 pairs = [(None, -1)] 

849 for pid, fd in pairs: 

850 if filter_pid is not None and filter_pid != pid: 

851 continue 

852 else: 

853 path = tokens[-1] if len(tokens) == 8 else '' 

854 type_ = socktype_to_enum(int(type_)) 

855 # XXX: determining the remote endpoint of a 

856 # UNIX socket on Linux is not possible, see: 

857 # https://serverfault.com/questions/252723/ 

858 raddr = "" 

859 status = ConnectionStatus.CONN_NONE 

860 yield (fd, family, type_, path, raddr, status, pid) 

861 

862 def retrieve(self, kind, pid=None): 

863 self._procfs_path = get_procfs_path() 

864 if pid is not None: 

865 inodes = self.get_proc_inodes(pid) 

866 if not inodes: 

867 # no connections for this process 

868 return [] 

869 else: 

870 inodes = self.get_all_inodes() 

871 ret = set() 

872 for proto_name, family, type_ in self.tmap[kind]: 

873 path = f"{self._procfs_path}/net/{proto_name}" 

874 if family in {socket.AF_INET, socket.AF_INET6}: 

875 ls = self.process_inet( 

876 path, family, type_, inodes, filter_pid=pid 

877 ) 

878 else: 

879 ls = self.process_unix(path, family, inodes, filter_pid=pid) 

880 for fd, family, type_, laddr, raddr, status, bound_pid in ls: 

881 if pid: 

882 conn = ntp.pconn(fd, family, type_, laddr, raddr, status) 

883 else: 

884 conn = ntp.sconn( 

885 fd, family, type_, laddr, raddr, status, bound_pid 

886 ) 

887 ret.add(conn) 

888 return list(ret) 

889 

890 

891_net_connections = NetConnections() 

892 

893 

894def net_connections(kind='inet'): 

895 """Return system-wide open connections.""" 

896 return _net_connections.retrieve(kind) 

897 

898 

899def net_io_counters(): 

900 """Return network I/O statistics for every network interface 

901 installed on the system as a dict of raw tuples. 

902 """ 

903 with open_text(f"{get_procfs_path()}/net/dev") as f: 

904 lines = f.readlines() 

905 retdict = {} 

906 for line in lines[2:]: 

907 colon = line.rfind(':') 

908 assert colon > 0, repr(line) 

909 name = line[:colon].strip() 

910 fields = line[colon + 1 :].strip().split() 

911 

912 ( 

913 # in 

914 bytes_recv, 

915 packets_recv, 

916 errin, 

917 dropin, 

918 _fifoin, # unused 

919 _framein, # unused 

920 _compressedin, # unused 

921 _multicastin, # unused 

922 # out 

923 bytes_sent, 

924 packets_sent, 

925 errout, 

926 dropout, 

927 _fifoout, # unused 

928 _collisionsout, # unused 

929 _carrierout, # unused 

930 _compressedout, # unused 

931 ) = map(int, fields) 

932 

933 retdict[name] = ( 

934 bytes_sent, 

935 bytes_recv, 

936 packets_sent, 

937 packets_recv, 

938 errin, 

939 errout, 

940 dropin, 

941 dropout, 

942 ) 

943 return retdict 

944 

945 

946def net_if_stats(): 

947 """Get NIC stats (isup, duplex, speed, mtu).""" 

948 duplex_map = { 

949 cext.DUPLEX_FULL: NicDuplex.NIC_DUPLEX_FULL, 

950 cext.DUPLEX_HALF: NicDuplex.NIC_DUPLEX_HALF, 

951 cext.DUPLEX_UNKNOWN: NicDuplex.NIC_DUPLEX_UNKNOWN, 

952 } 

953 names = net_io_counters().keys() 

954 ret = {} 

955 for name in names: 

956 try: 

957 mtu = cext.net_if_mtu(name) 

958 flags = cext.net_if_flags(name) 

959 duplex, speed = cext.net_if_duplex_speed(name) 

960 except OSError as err: 

961 # https://github.com/giampaolo/psutil/issues/1279 

962 if err.errno != errno.ENODEV: 

963 raise 

964 debug(err) 

965 else: 

966 output_flags = ','.join(flags) 

967 isup = 'running' in flags 

968 ret[name] = ntp.snicstats( 

969 isup, duplex_map[duplex], speed, mtu, output_flags 

970 ) 

971 return ret 

972 

973 

974# ===================================================================== 

975# --- disks 

976# ===================================================================== 

977 

978 

979disk_usage = _psposix.disk_usage 

980 

981 

982def disk_io_counters(perdisk=False): 

983 """Return disk I/O statistics for every disk installed on the 

984 system as a dict of raw tuples. 

985 """ 

986 

987 def read_procfs(): 

988 # OK, this is a bit confusing. The format of /proc/diskstats can 

989 # have 3 variations. 

990 # On Linux 2.4 each line has always 15 fields, e.g.: 

991 # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8" 

992 # On Linux 2.6+ each line *usually* has 14 fields, and the disk 

993 # name is in another position, like this: 

994 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8" 

995 # ...unless (Linux 2.6) the line refers to a partition instead 

996 # of a disk, in which case the line has less fields (7): 

997 # "3 1 hda1 8 8 8 8" 

998 # 4.18+ has 4 fields added: 

999 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0" 

1000 # 5.5 has 2 more fields. 

1001 # See: 

1002 # https://www.kernel.org/doc/Documentation/iostats.txt 

1003 # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats 

1004 with open_text(f"{get_procfs_path()}/diskstats") as f: 

1005 lines = f.readlines() 

1006 for line in lines: 

1007 fields = line.split() 

1008 flen = len(fields) 

1009 # fmt: off 

1010 if flen == 15: 

1011 # Linux 2.4 

1012 name = fields[3] 

1013 reads = int(fields[2]) 

1014 (reads_merged, rbytes, rtime, writes, writes_merged, 

1015 wbytes, wtime, _, busy_time, _) = map(int, fields[4:14]) 

1016 elif flen == 14 or flen >= 18: 

1017 # Linux 2.6+, line referring to a disk 

1018 name = fields[2] 

1019 (reads, reads_merged, rbytes, rtime, writes, writes_merged, 

1020 wbytes, wtime, _, busy_time, _) = map(int, fields[3:14]) 

1021 elif flen == 7: 

1022 # Linux 2.6+, line referring to a partition 

1023 name = fields[2] 

1024 reads, rbytes, writes, wbytes = map(int, fields[3:]) 

1025 rtime = wtime = reads_merged = writes_merged = busy_time = 0 

1026 else: 

1027 msg = f"not sure how to interpret line {line!r}" 

1028 raise ValueError(msg) 

1029 yield (name, reads, writes, rbytes, wbytes, rtime, wtime, 

1030 reads_merged, writes_merged, busy_time) 

1031 # fmt: on 

1032 

1033 def read_sysfs(): 

1034 for block in os.listdir('/sys/block'): 

1035 for root, _, files in os.walk(os.path.join('/sys/block', block)): 

1036 if 'stat' not in files: 

1037 continue 

1038 with open_text(os.path.join(root, 'stat')) as f: 

1039 fields = f.read().strip().split() 

1040 name = os.path.basename(root) 

1041 # fmt: off 

1042 (reads, reads_merged, rbytes, rtime, writes, writes_merged, 

1043 wbytes, wtime, _, busy_time) = map(int, fields[:10]) 

1044 yield (name, reads, writes, rbytes, wbytes, rtime, 

1045 wtime, reads_merged, writes_merged, busy_time) 

1046 # fmt: on 

1047 

1048 if os.path.exists(f"{get_procfs_path()}/diskstats"): 

1049 gen = read_procfs() 

1050 elif os.path.exists('/sys/block'): 

1051 gen = read_sysfs() 

1052 else: 

1053 msg = ( 

1054 f"{get_procfs_path()}/diskstats nor /sys/block are available on" 

1055 " this system" 

1056 ) 

1057 raise NotImplementedError(msg) 

1058 

1059 retdict = {} 

1060 for entry in gen: 

1061 # fmt: off 

1062 (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, 

1063 writes_merged, busy_time) = entry 

1064 if not perdisk and not is_storage_device(name): 

1065 # perdisk=False means we want to calculate totals so we skip 

1066 # partitions (e.g. 'sda1', 'nvme0n1p1') and only include 

1067 # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks 

1068 # include a total of all their partitions + some extra size 

1069 # of their own: 

1070 # $ cat /proc/diskstats 

1071 # 259 0 sda 10485760 ... 

1072 # 259 1 sda1 5186039 ... 

1073 # 259 1 sda2 5082039 ... 

1074 # See: 

1075 # https://github.com/giampaolo/psutil/pull/1313 

1076 continue 

1077 

1078 rbytes *= DISK_SECTOR_SIZE 

1079 wbytes *= DISK_SECTOR_SIZE 

1080 retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime, 

1081 reads_merged, writes_merged, busy_time) 

1082 # fmt: on 

1083 

1084 return retdict 

1085 

1086 

1087class RootFsDeviceFinder: 

1088 """disk_partitions() may return partitions with device == "/dev/root" 

1089 or "rootfs". This container class uses different strategies to try to 

1090 obtain the real device path. Resources: 

1091 https://bootlin.com/blog/find-root-device/ 

1092 https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/. 

1093 """ 

1094 

1095 __slots__ = ['major', 'minor'] 

1096 

1097 def __init__(self): 

1098 dev = os.stat("/").st_dev 

1099 self.major = os.major(dev) 

1100 self.minor = os.minor(dev) 

1101 

1102 def ask_proc_partitions(self): 

1103 with open_text(f"{get_procfs_path()}/partitions") as f: 

1104 for line in f.readlines()[2:]: 

1105 fields = line.split() 

1106 if len(fields) < 4: # just for extra safety 

1107 continue 

1108 major = int(fields[0]) if fields[0].isdigit() else None 

1109 minor = int(fields[1]) if fields[1].isdigit() else None 

1110 name = fields[3] 

1111 if major == self.major and minor == self.minor: 

1112 if name: # just for extra safety 

1113 return f"/dev/{name}" 

1114 

1115 def ask_sys_dev_block(self): 

1116 path = f"/sys/dev/block/{self.major}:{self.minor}/uevent" 

1117 with open_text(path) as f: 

1118 for line in f: 

1119 if line.startswith("DEVNAME="): 

1120 name = line.strip().rpartition("DEVNAME=")[2] 

1121 if name: # just for extra safety 

1122 return f"/dev/{name}" 

1123 

1124 def ask_sys_class_block(self): 

1125 needle = f"{self.major}:{self.minor}" 

1126 files = glob.iglob("/sys/class/block/*/dev") 

1127 for file in files: 

1128 try: 

1129 f = open_text(file) 

1130 except FileNotFoundError: # race condition 

1131 continue 

1132 else: 

1133 with f: 

1134 data = f.read().strip() 

1135 if data == needle: 

1136 name = os.path.basename(os.path.dirname(file)) 

1137 return f"/dev/{name}" 

1138 

1139 def find(self): 

1140 path = None 

1141 if path is None: 

1142 try: 

1143 path = self.ask_proc_partitions() 

1144 except OSError as err: 

1145 debug(err) 

1146 if path is None: 

1147 try: 

1148 path = self.ask_sys_dev_block() 

1149 except OSError as err: 

1150 debug(err) 

1151 if path is None: 

1152 try: 

1153 path = self.ask_sys_class_block() 

1154 except OSError as err: 

1155 debug(err) 

1156 # We use exists() because the "/dev/*" part of the path is hard 

1157 # coded, so we want to be sure. 

1158 if path is not None and os.path.exists(path): 

1159 return path 

1160 

1161 

1162def disk_partitions(all=False): 

1163 """Return mounted disk partitions as a list of named tuples.""" 

1164 fstypes = set() 

1165 procfs_path = get_procfs_path() 

1166 if not all: 

1167 with open_text(f"{procfs_path}/filesystems") as f: 

1168 for line in f: 

1169 line = line.strip() 

1170 if not line.startswith("nodev"): 

1171 fstypes.add(line.strip()) 

1172 else: 

1173 # ignore all lines starting with "nodev" except "nodev zfs" 

1174 fstype = line.split("\t")[1] 

1175 if fstype == "zfs": 

1176 fstypes.add("zfs") 

1177 

1178 # See: https://github.com/giampaolo/psutil/issues/1307 

1179 if procfs_path == "/proc" and os.path.isfile('/etc/mtab'): 

1180 mounts_path = os.path.realpath("/etc/mtab") 

1181 else: 

1182 mounts_path = os.path.realpath(f"{procfs_path}/self/mounts") 

1183 

1184 retlist = [] 

1185 partitions = cext.disk_partitions(mounts_path) 

1186 for partition in partitions: 

1187 device, mountpoint, fstype, opts = partition 

1188 if device == 'none': 

1189 device = '' 

1190 if device in {"/dev/root", "rootfs"}: 

1191 device = RootFsDeviceFinder().find() or device 

1192 if not all: 

1193 if not device or fstype not in fstypes: 

1194 continue 

1195 ntuple = ntp.sdiskpart(device, mountpoint, fstype, opts) 

1196 retlist.append(ntuple) 

1197 

1198 return retlist 

1199 

1200 

1201# ===================================================================== 

1202# --- sensors 

1203# ===================================================================== 

1204 

1205 

1206def sensors_temperatures(): 

1207 """Return hardware (CPU and others) temperatures as a dict 

1208 including hardware name, label, current, max and critical 

1209 temperatures. 

1210 

1211 Implementation notes: 

1212 - /sys/class/hwmon looks like the most recent interface to 

1213 retrieve this info, and this implementation relies on it 

1214 only (old distros will probably use something else) 

1215 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon 

1216 - /sys/class/thermal/thermal_zone* is another one but it's more 

1217 difficult to parse 

1218 """ 

1219 ret = collections.defaultdict(list) 

1220 basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') 

1221 # CentOS has an intermediate /device directory: 

1222 # https://github.com/giampaolo/psutil/issues/971 

1223 # https://github.com/nicolargo/glances/issues/1060 

1224 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) 

1225 basenames = sorted({x.split('_')[0] for x in basenames}) 

1226 

1227 # Only add the coretemp hwmon entries if they're not already in 

1228 # /sys/class/hwmon/ 

1229 # https://github.com/giampaolo/psutil/issues/1708 

1230 # https://github.com/giampaolo/psutil/pull/1648 

1231 basenames2 = glob.glob( 

1232 '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*' 

1233 ) 

1234 repl = re.compile(r"/sys/devices/platform/coretemp.*/hwmon/") 

1235 for name in basenames2: 

1236 altname = repl.sub('/sys/class/hwmon/', name) 

1237 if altname not in basenames: 

1238 basenames.append(name) 

1239 

1240 for base in basenames: 

1241 try: 

1242 path = base + '_input' 

1243 current = float(bcat(path)) / 1000.0 

1244 path = os.path.join(os.path.dirname(base), 'name') 

1245 unit_name = cat(path).strip() 

1246 except (OSError, ValueError): 

1247 # A lot of things can go wrong here, so let's just skip the 

1248 # whole entry. Sure thing is Linux's /sys/class/hwmon really 

1249 # is a stinky broken mess. 

1250 # https://github.com/giampaolo/psutil/issues/1009 

1251 # https://github.com/giampaolo/psutil/issues/1101 

1252 # https://github.com/giampaolo/psutil/issues/1129 

1253 # https://github.com/giampaolo/psutil/issues/1245 

1254 # https://github.com/giampaolo/psutil/issues/1323 

1255 continue 

1256 

1257 high = bcat(base + '_max', fallback=None) 

1258 critical = bcat(base + '_crit', fallback=None) 

1259 label = cat(base + '_label', fallback='').strip() 

1260 

1261 if high is not None: 

1262 try: 

1263 high = float(high) / 1000.0 

1264 except ValueError: 

1265 high = None 

1266 if critical is not None: 

1267 try: 

1268 critical = float(critical) / 1000.0 

1269 except ValueError: 

1270 critical = None 

1271 

1272 ret[unit_name].append((label, current, high, critical)) 

1273 

1274 # Indication that no sensors were detected in /sys/class/hwmon/ 

1275 if not basenames: 

1276 basenames = glob.glob('/sys/class/thermal/thermal_zone*') 

1277 basenames = sorted(set(basenames)) 

1278 

1279 for base in basenames: 

1280 try: 

1281 path = os.path.join(base, 'temp') 

1282 current = float(bcat(path)) / 1000.0 

1283 path = os.path.join(base, 'type') 

1284 unit_name = cat(path).strip() 

1285 except (OSError, ValueError) as err: 

1286 debug(err) 

1287 continue 

1288 

1289 trip_paths = glob.glob(base + '/trip_point*') 

1290 trip_points = { 

1291 '_'.join(os.path.basename(p).split('_')[0:3]) 

1292 for p in trip_paths 

1293 } 

1294 critical = None 

1295 high = None 

1296 for trip_point in trip_points: 

1297 path = os.path.join(base, trip_point + "_type") 

1298 trip_type = cat(path, fallback='').strip() 

1299 if trip_type == 'critical': 

1300 critical = bcat( 

1301 os.path.join(base, trip_point + "_temp"), fallback=None 

1302 ) 

1303 elif trip_type == 'high': 

1304 high = bcat( 

1305 os.path.join(base, trip_point + "_temp"), fallback=None 

1306 ) 

1307 

1308 if high is not None: 

1309 try: 

1310 high = float(high) / 1000.0 

1311 except ValueError: 

1312 high = None 

1313 if critical is not None: 

1314 try: 

1315 critical = float(critical) / 1000.0 

1316 except ValueError: 

1317 critical = None 

1318 

1319 ret[unit_name].append(('', current, high, critical)) 

1320 

1321 return dict(ret) 

1322 

1323 

1324def sensors_fans(): 

1325 """Return hardware fans info (for CPU and other peripherals) as a 

1326 dict including hardware label and current speed. 

1327 

1328 Implementation notes: 

1329 - /sys/class/hwmon looks like the most recent interface to 

1330 retrieve this info, and this implementation relies on it 

1331 only (old distros will probably use something else) 

1332 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon 

1333 """ 

1334 ret = collections.defaultdict(list) 

1335 basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') 

1336 if not basenames: 

1337 # CentOS has an intermediate /device directory: 

1338 # https://github.com/giampaolo/psutil/issues/971 

1339 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') 

1340 

1341 basenames = sorted({x.split("_")[0] for x in basenames}) 

1342 for base in basenames: 

1343 try: 

1344 current = int(bcat(base + '_input')) 

1345 except OSError as err: 

1346 debug(err) 

1347 continue 

1348 unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip() 

1349 label = cat(base + '_label', fallback='').strip() 

1350 ret[unit_name].append(ntp.sfan(label, current)) 

1351 

1352 return dict(ret) 

1353 

1354 

1355def sensors_battery(): 

1356 """Return battery information. 

1357 Implementation note: it appears /sys/class/power_supply/BAT0/ 

1358 directory structure may vary and provide files with the same 

1359 meaning but under different names, see: 

1360 https://github.com/giampaolo/psutil/issues/966. 

1361 """ 

1362 null = object() 

1363 

1364 def multi_bcat(*paths): 

1365 """Attempt to read the content of multiple files which may 

1366 not exist. If none of them exist return None. 

1367 """ 

1368 for path in paths: 

1369 ret = bcat(path, fallback=null) 

1370 if ret != null: 

1371 try: 

1372 return int(ret) 

1373 except ValueError: 

1374 return ret.strip() 

1375 return None 

1376 

1377 bats = [ 

1378 x 

1379 for x in os.listdir(POWER_SUPPLY_PATH) 

1380 if x.startswith('BAT') or 'battery' in x.lower() 

1381 ] 

1382 if not bats: 

1383 return None 

1384 # Get the first available battery. Usually this is "BAT0", except 

1385 # some rare exceptions: 

1386 # https://github.com/giampaolo/psutil/issues/1238 

1387 root = os.path.join(POWER_SUPPLY_PATH, min(bats)) 

1388 

1389 # Base metrics. 

1390 energy_now = multi_bcat(root + "/energy_now", root + "/charge_now") 

1391 power_now = multi_bcat(root + "/power_now", root + "/current_now") 

1392 energy_full = multi_bcat(root + "/energy_full", root + "/charge_full") 

1393 time_to_empty = multi_bcat(root + "/time_to_empty_now") 

1394 

1395 # Percent. If we have energy_full the percentage will be more 

1396 # accurate compared to reading /capacity file (float vs. int). 

1397 if energy_full is not None and energy_now is not None: 

1398 try: 

1399 percent = 100.0 * energy_now / energy_full 

1400 except ZeroDivisionError: 

1401 percent = 0.0 

1402 else: 

1403 percent = float(cat(root + "/capacity", fallback=-1)) 

1404 if percent == -1: 

1405 return None 

1406 

1407 # Is AC power cable plugged in? 

1408 # Note: AC0 is not always available and sometimes (e.g. CentOS7) 

1409 # it's called "AC". 

1410 power_plugged = None 

1411 online = multi_bcat( 

1412 os.path.join(POWER_SUPPLY_PATH, "AC0/online"), 

1413 os.path.join(POWER_SUPPLY_PATH, "AC/online"), 

1414 ) 

1415 if online is not None: 

1416 power_plugged = online == 1 

1417 else: 

1418 status = cat(root + "/status", fallback="").strip().lower() 

1419 if status == "discharging": 

1420 power_plugged = False 

1421 elif status in {"charging", "full"}: 

1422 power_plugged = True 

1423 

1424 # Seconds left. 

1425 if power_plugged: 

1426 secsleft = BatteryTime.POWER_TIME_UNLIMITED 

1427 elif energy_now is not None and power_now is not None: 

1428 try: 

1429 secsleft = int(energy_now / abs(power_now) * 3600) 

1430 except ZeroDivisionError: 

1431 secsleft = BatteryTime.POWER_TIME_UNKNOWN 

1432 elif time_to_empty is not None: 

1433 secsleft = int(time_to_empty * 60) 

1434 if secsleft < 0: 

1435 secsleft = BatteryTime.POWER_TIME_UNKNOWN 

1436 else: 

1437 secsleft = BatteryTime.POWER_TIME_UNKNOWN 

1438 

1439 return ntp.sbattery(percent, secsleft, power_plugged) 

1440 

1441 

1442# ===================================================================== 

1443# --- other system functions 

1444# ===================================================================== 

1445 

1446 

1447def users(): 

1448 """Return currently connected users as a list of named tuples.""" 

1449 retlist = [] 

1450 rawlist = cext.users() 

1451 for item in rawlist: 

1452 user, tty, hostname, tstamp, pid = item 

1453 nt = ntp.suser(user, tty or None, hostname, tstamp, pid) 

1454 retlist.append(nt) 

1455 return retlist 

1456 

1457 

1458def boot_time(): 

1459 """Return the system boot time expressed in seconds since the epoch.""" 

1460 path = f"{get_procfs_path()}/stat" 

1461 with open_binary(path) as f: 

1462 for line in f: 

1463 if line.startswith(b'btime'): 

1464 return float(line.strip().split()[1]) 

1465 msg = f"line 'btime' not found in {path}" 

1466 raise RuntimeError(msg) 

1467 

1468 

1469# ===================================================================== 

1470# --- processes 

1471# ===================================================================== 

1472 

1473 

1474def pids(): 

1475 """Returns a list of PIDs currently running on the system.""" 

1476 path = get_procfs_path().encode(ENCODING) 

1477 return [int(x) for x in os.listdir(path) if x.isdigit()] 

1478 

1479 

1480def pid_exists(pid): 

1481 """Check for the existence of a unix PID. Linux TIDs are not 

1482 supported (always return False). 

1483 """ 

1484 if not _psposix.pid_exists(pid): 

1485 return False 

1486 else: 

1487 # Linux's apparently does not distinguish between PIDs and TIDs 

1488 # (thread IDs). 

1489 # listdir("/proc") won't show any TID (only PIDs) but 

1490 # os.stat("/proc/{tid}") will succeed if {tid} exists. 

1491 # os.kill() can also be passed a TID. This is quite confusing. 

1492 # In here we want to enforce this distinction and support PIDs 

1493 # only, see: 

1494 # https://github.com/giampaolo/psutil/issues/687 

1495 try: 

1496 # Note: already checked that this is faster than using a 

1497 # regular expr. Also (a lot) faster than doing 

1498 # 'return pid in pids()' 

1499 path = f"{get_procfs_path()}/{pid}/status" 

1500 with open_binary(path) as f: 

1501 for line in f: 

1502 if line.startswith(b"Tgid:"): 

1503 tgid = int(line.split()[1]) 

1504 # If tgid and pid are the same then we're 

1505 # dealing with a process PID. 

1506 return tgid == pid 

1507 msg = f"'Tgid' line not found in {path}" 

1508 raise ValueError(msg) 

1509 except (OSError, ValueError): 

1510 return pid in pids() 

1511 

1512 

1513def ppid_map(): 

1514 """Obtain a {pid: ppid, ...} dict for all running processes in 

1515 one shot. Used to speed up Process.children(). 

1516 """ 

1517 ret = {} 

1518 procfs_path = get_procfs_path() 

1519 for pid in pids(): 

1520 try: 

1521 with open_binary(f"{procfs_path}/{pid}/stat") as f: 

1522 data = f.read() 

1523 except (FileNotFoundError, ProcessLookupError): 

1524 pass 

1525 except PermissionError as err: 

1526 raise AccessDenied(pid) from err 

1527 else: 

1528 rpar = data.rfind(b')') 

1529 dset = data[rpar + 2 :].split() 

1530 ppid = int(dset[1]) 

1531 ret[pid] = ppid 

1532 return ret 

1533 

1534 

1535def wrap_exceptions(fun): 

1536 """Decorator which translates bare OSError exceptions into 

1537 NoSuchProcess and AccessDenied. 

1538 """ 

1539 

1540 @functools.wraps(fun) 

1541 def wrapper(self, *args, **kwargs): 

1542 pid, name = self.pid, self._name 

1543 try: 

1544 return fun(self, *args, **kwargs) 

1545 except PermissionError as err: 

1546 raise AccessDenied(pid, name) from err 

1547 except ProcessLookupError as err: 

1548 self._raise_if_zombie() 

1549 raise NoSuchProcess(pid, name) from err 

1550 except FileNotFoundError as err: 

1551 self._raise_if_zombie() 

1552 # /proc/PID directory may still exist, but the files within 

1553 # it may not, indicating the process is gone, see: 

1554 # https://github.com/giampaolo/psutil/issues/2418 

1555 if not os.path.exists(f"{self._procfs_path}/{pid}/stat"): 

1556 raise NoSuchProcess(pid, name) from err 

1557 raise 

1558 

1559 return wrapper 

1560 

1561 

1562class Process: 

1563 """Linux process implementation.""" 

1564 

1565 __slots__ = [ 

1566 "_cache", 

1567 "_ctime", 

1568 "_name", 

1569 "_ppid", 

1570 "_procfs_path", 

1571 "pid", 

1572 ] 

1573 

1574 def __init__(self, pid): 

1575 self.pid = pid 

1576 self._name = None 

1577 self._ppid = None 

1578 self._ctime = None 

1579 self._procfs_path = get_procfs_path() 

1580 

1581 def _is_zombie(self): 

1582 # Note: most of the times Linux is able to return info about the 

1583 # process even if it's a zombie, and /proc/{pid} will exist. 

1584 # There are some exceptions though, like exe(), cmdline() and 

1585 # memory_maps(). In these cases /proc/{pid}/{file} exists but 

1586 # it's empty. Instead of returning a "null" value we'll raise an 

1587 # exception. 

1588 try: 

1589 data = bcat(f"{self._procfs_path}/{self.pid}/stat") 

1590 except OSError: 

1591 return False 

1592 else: 

1593 rpar = data.rfind(b')') 

1594 status = data[rpar + 2 : rpar + 3] 

1595 return status == b"Z" 

1596 

1597 def _raise_if_zombie(self): 

1598 if self._is_zombie(): 

1599 raise ZombieProcess(self.pid, self._name, self._ppid) 

1600 

1601 def _raise_if_not_alive(self): 

1602 """Raise NSP if the process disappeared on us.""" 

1603 # For those C function who do not raise NSP, possibly returning 

1604 # incorrect or incomplete result. 

1605 os.stat(f"{self._procfs_path}/{self.pid}") 

1606 

1607 def _readlink(self, path, fallback=UNSET): 

1608 # * https://github.com/giampaolo/psutil/issues/503 

1609 # os.readlink('/proc/pid/exe') may raise ESRCH (ProcessLookupError) 

1610 # instead of ENOENT (FileNotFoundError) when it races. 

1611 # * ENOENT may occur also if the path actually exists if PID is 

1612 # a low PID (~0-20 range). 

1613 # * https://github.com/giampaolo/psutil/issues/2514 

1614 try: 

1615 return readlink(path) 

1616 except (FileNotFoundError, ProcessLookupError): 

1617 if os.path.lexists(f"{self._procfs_path}/{self.pid}"): 

1618 self._raise_if_zombie() 

1619 if fallback is not UNSET: 

1620 return fallback 

1621 raise 

1622 

1623 @wrap_exceptions 

1624 @memoize_when_activated 

1625 def _parse_stat_file(self): 

1626 """Parse /proc/{pid}/stat file and return a dict with various 

1627 process info. 

1628 Using "man proc" as a reference: where "man proc" refers to 

1629 position N always subtract 3 (e.g ppid position 4 in 

1630 'man proc' == position 1 in here). 

1631 The return value is cached in case oneshot() ctx manager is 

1632 in use. 

1633 """ 

1634 data = bcat(f"{self._procfs_path}/{self.pid}/stat") 

1635 # Process name is between parentheses. It can contain spaces and 

1636 # other parentheses. This is taken into account by looking for 

1637 # the first occurrence of "(" and the last occurrence of ")". 

1638 rpar = data.rfind(b')') 

1639 name = data[data.find(b'(') + 1 : rpar] 

1640 fields = data[rpar + 2 :].split() 

1641 

1642 ret = {} 

1643 ret['name'] = name 

1644 ret['status'] = fields[0] 

1645 ret['ppid'] = fields[1] 

1646 ret['ttynr'] = fields[4] 

1647 ret['minflt'] = fields[7] 

1648 ret['majflt'] = fields[9] 

1649 ret['utime'] = fields[11] 

1650 ret['stime'] = fields[12] 

1651 ret['children_utime'] = fields[13] 

1652 ret['children_stime'] = fields[14] 

1653 ret['create_time'] = fields[19] 

1654 ret['cpu_num'] = fields[36] 

1655 try: 

1656 ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks' 

1657 except IndexError: 

1658 # https://github.com/giampaolo/psutil/issues/2455 

1659 debug("can't get blkio_ticks, set iowait to 0") 

1660 ret['blkio_ticks'] = 0 

1661 

1662 return ret 

1663 

1664 @wrap_exceptions 

1665 @memoize_when_activated 

1666 def _read_status_file(self): 

1667 """Read /proc/{pid}/stat file and return its content. 

1668 The return value is cached in case oneshot() ctx manager is 

1669 in use. 

1670 """ 

1671 with open_binary(f"{self._procfs_path}/{self.pid}/status") as f: 

1672 return f.read() 

1673 

1674 @wrap_exceptions 

1675 @memoize_when_activated 

1676 def _read_smaps_file(self): 

1677 with open_binary(f"{self._procfs_path}/{self.pid}/smaps") as f: 

1678 return f.read().strip() 

1679 

1680 def oneshot_enter(self): 

1681 self._parse_stat_file.cache_activate(self) 

1682 self._read_status_file.cache_activate(self) 

1683 self._read_smaps_file.cache_activate(self) 

1684 

1685 def oneshot_exit(self): 

1686 self._parse_stat_file.cache_deactivate(self) 

1687 self._read_status_file.cache_deactivate(self) 

1688 self._read_smaps_file.cache_deactivate(self) 

1689 

1690 @wrap_exceptions 

1691 def name(self): 

1692 # XXX - gets changed later and probably needs refactoring 

1693 return decode(self._parse_stat_file()['name']) 

1694 

1695 @wrap_exceptions 

1696 def exe(self): 

1697 return self._readlink( 

1698 f"{self._procfs_path}/{self.pid}/exe", fallback="" 

1699 ) 

1700 

1701 @wrap_exceptions 

1702 def cmdline(self): 

1703 with open_text(f"{self._procfs_path}/{self.pid}/cmdline") as f: 

1704 data = f.read() 

1705 if not data: 

1706 # may happen in case of zombie process 

1707 self._raise_if_zombie() 

1708 return [] 

1709 # 'man proc' states that args are separated by null bytes '\0' 

1710 # and last char is supposed to be a null byte. Nevertheless 

1711 # some processes may change their cmdline after being started 

1712 # (via setproctitle() or similar), they are usually not 

1713 # compliant with this rule and use spaces instead. Google 

1714 # Chrome process is an example. See: 

1715 # https://github.com/giampaolo/psutil/issues/1179 

1716 sep = '\x00' if data.endswith('\x00') else ' ' 

1717 if data.endswith(sep): 

1718 data = data[:-1] 

1719 cmdline = data.split(sep) 

1720 # Sometimes last char is a null byte '\0' but the args are 

1721 # separated by spaces, see: https://github.com/giampaolo/psutil/ 

1722 # issues/1179#issuecomment-552984549 

1723 if sep == '\x00' and len(cmdline) == 1 and ' ' in data: 

1724 cmdline = data.split(' ') 

1725 return cmdline 

1726 

1727 @wrap_exceptions 

1728 def environ(self): 

1729 with open_text(f"{self._procfs_path}/{self.pid}/environ") as f: 

1730 data = f.read() 

1731 return parse_environ_block(data) 

1732 

1733 @wrap_exceptions 

1734 def terminal(self): 

1735 tty_nr = int(self._parse_stat_file()['ttynr']) 

1736 tmap = _psposix.get_terminal_map() 

1737 try: 

1738 return tmap[tty_nr] 

1739 except KeyError: 

1740 return None 

1741 

1742 # May not be available on old kernels. 

1743 if os.path.exists(f"/proc/{os.getpid()}/io"): 

1744 

1745 @wrap_exceptions 

1746 def io_counters(self): 

1747 fname = f"{self._procfs_path}/{self.pid}/io" 

1748 fields = {} 

1749 with open_binary(fname) as f: 

1750 for line in f: 

1751 # https://github.com/giampaolo/psutil/issues/1004 

1752 line = line.strip() 

1753 if line: 

1754 try: 

1755 name, value = line.split(b': ') 

1756 except ValueError: 

1757 # https://github.com/giampaolo/psutil/issues/1004 

1758 continue 

1759 else: 

1760 fields[name] = int(value) 

1761 if not fields: 

1762 msg = f"{fname} file was empty" 

1763 raise RuntimeError(msg) 

1764 try: 

1765 return ntp.pio( 

1766 fields[b'syscr'], # read syscalls 

1767 fields[b'syscw'], # write syscalls 

1768 fields[b'read_bytes'], # read bytes 

1769 fields[b'write_bytes'], # write bytes 

1770 fields[b'rchar'], # read chars 

1771 fields[b'wchar'], # write chars 

1772 ) 

1773 except KeyError as err: 

1774 msg = ( 

1775 f"{err.args[0]!r} field was not found in {fname}; found" 

1776 f" fields are {fields!r}" 

1777 ) 

1778 raise ValueError(msg) from None 

1779 

1780 @wrap_exceptions 

1781 def cpu_times(self): 

1782 values = self._parse_stat_file() 

1783 utime = float(values['utime']) / CLOCK_TICKS 

1784 stime = float(values['stime']) / CLOCK_TICKS 

1785 children_utime = float(values['children_utime']) / CLOCK_TICKS 

1786 children_stime = float(values['children_stime']) / CLOCK_TICKS 

1787 iowait = float(values['blkio_ticks']) / CLOCK_TICKS 

1788 return ntp.pcputimes( 

1789 utime, stime, children_utime, children_stime, iowait 

1790 ) 

1791 

1792 @wrap_exceptions 

1793 def cpu_num(self): 

1794 """What CPU the process is on.""" 

1795 return int(self._parse_stat_file()['cpu_num']) 

1796 

1797 @wrap_exceptions 

1798 def wait(self, timeout=None): 

1799 return _psposix.wait_pid(self.pid, timeout) 

1800 

1801 @wrap_exceptions 

1802 def create_time(self, monotonic=False): 

1803 # The 'starttime' field in /proc/[pid]/stat is expressed in 

1804 # jiffies (clock ticks per second), a relative value which 

1805 # represents the number of clock ticks that have passed since 

1806 # the system booted until the process was created. It never 

1807 # changes and is unaffected by system clock updates. 

1808 if self._ctime is None: 

1809 self._ctime = ( 

1810 float(self._parse_stat_file()['create_time']) / CLOCK_TICKS 

1811 ) 

1812 if monotonic: 

1813 return self._ctime 

1814 # Add the boot time, returning time expressed in seconds since 

1815 # the epoch. This is subject to system clock updates. 

1816 return self._ctime + boot_time() 

1817 

1818 @wrap_exceptions 

1819 def memory_info(self): 

1820 # ============================================================ 

1821 # | FIELD | DESCRIPTION | AKA | TOP | 

1822 # ============================================================ 

1823 # | rss | resident set size | | RES | 

1824 # | vms | total program size | size | VIRT | 

1825 # | shared | shared pages (from shared mappings) | | SHR | 

1826 # | text | text ('code') | trs | CODE | 

1827 # | lib | library (unused in Linux 2.6) | lrs | | 

1828 # | data | data + stack | drs | DATA | 

1829 # | dirty | dirty pages (unused in Linux 2.6) | dt | | 

1830 # ============================================================ 

1831 with open_binary(f"{self._procfs_path}/{self.pid}/statm") as f: 

1832 vms, rss, shared, text, _lib, data, _dirty = ( 

1833 int(x) * PAGESIZE for x in f.readline().split()[:7] 

1834 ) 

1835 return ntp.pmem(rss, vms, shared, text, data) 

1836 

1837 @wrap_exceptions 

1838 def memory_info_ex( 

1839 self, 

1840 _vmpeak_re=re.compile(br"VmPeak:\s+(\d+)"), 

1841 _vmhwm_re=re.compile(br"VmHWM:\s+(\d+)"), 

1842 _rssanon_re=re.compile(br"RssAnon:\s+(\d+)"), 

1843 _rssfile_re=re.compile(br"RssFile:\s+(\d+)"), 

1844 _rssshmem_re=re.compile(br"RssShmem:\s+(\d+)"), 

1845 _vmswap_re=re.compile(br"VmSwap:\s+(\d+)"), 

1846 _hugetlb_re=re.compile(br"HugetlbPages:\s+(\d+)"), 

1847 ): 

1848 # Read /proc/{pid}/status which provides peak RSS/VMS and a 

1849 # cheaper way to get swap (no smaps parsing needed). 

1850 # RssAnon/RssFile/RssShmem were added in Linux 4.5; 

1851 # VmSwap in 2.6.34; HugetlbPages in 4.4. 

1852 data = self._read_status_file() 

1853 

1854 def parse(regex): 

1855 m = regex.search(data) 

1856 return int(m.group(1)) * 1024 if m else 0 

1857 

1858 return { 

1859 "peak_rss": parse(_vmhwm_re), 

1860 "peak_vms": parse(_vmpeak_re), 

1861 "rss_anon": parse(_rssanon_re), 

1862 "rss_file": parse(_rssfile_re), 

1863 "rss_shmem": parse(_rssshmem_re), 

1864 "swap": parse(_vmswap_re), 

1865 "hugetlb": parse(_hugetlb_re), 

1866 } 

1867 

1868 if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS: 

1869 

1870 def _parse_smaps_rollup(self): 

1871 # /proc/pid/smaps_rollup was added to Linux in 2017. Faster 

1872 # than /proc/pid/smaps. It reports higher PSS than */smaps 

1873 # (from 1k up to 200k higher; tested against all processes). 

1874 # IMPORTANT: /proc/pid/smaps_rollup is weird, because it 

1875 # raises ESRCH / ENOENT for many PIDs, even if they're alive 

1876 # (also as root). In that case we'll use /proc/pid/smaps as 

1877 # fallback, which is slower but has a +50% success rate 

1878 # compared to /proc/pid/smaps_rollup. 

1879 uss = pss = swap = 0 

1880 with open_binary( 

1881 f"{self._procfs_path}/{self.pid}/smaps_rollup" 

1882 ) as f: 

1883 for line in f: 

1884 if line.startswith(b"Private_"): 

1885 # Private_Clean, Private_Dirty, Private_Hugetlb 

1886 uss += int(line.split()[1]) * 1024 

1887 elif line.startswith(b"Pss:"): 

1888 pss = int(line.split()[1]) * 1024 

1889 elif line.startswith(b"Swap:"): 

1890 swap = int(line.split()[1]) * 1024 

1891 return (uss, pss, swap) 

1892 

1893 @wrap_exceptions 

1894 def _parse_smaps( 

1895 self, 

1896 # Gets Private_Clean, Private_Dirty, Private_Hugetlb. 

1897 _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"), 

1898 _pss_re=re.compile(br"\nPss\:\s+(\d+)"), 

1899 _swap_re=re.compile(br"\nSwap\:\s+(\d+)"), 

1900 ): 

1901 # /proc/pid/smaps does not exist on kernels < 2.6.14 or if 

1902 # CONFIG_MMU kernel configuration option is not enabled. 

1903 

1904 # Note: using 3 regexes is faster than reading the file 

1905 # line by line. 

1906 # 

1907 # You might be tempted to calculate USS by subtracting 

1908 # the "shared" value from the "resident" value in 

1909 # /proc/<pid>/statm. But at least on Linux, statm's "shared" 

1910 # value actually counts pages backed by files, which has 

1911 # little to do with whether the pages are actually shared. 

1912 # /proc/self/smaps on the other hand appears to give us the 

1913 # correct information. 

1914 smaps_data = self._read_smaps_file() 

1915 # Note: smaps file can be empty for certain processes. 

1916 # The code below will not crash though and will result to 0. 

1917 uss = sum(map(int, _private_re.findall(smaps_data))) * 1024 

1918 pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024 

1919 swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024 

1920 return (uss, pss, swap) 

1921 

1922 @wrap_exceptions 

1923 def memory_footprint(self): 

1924 def fetch(): 

1925 if HAS_PROC_SMAPS_ROLLUP: # faster 

1926 try: 

1927 return self._parse_smaps_rollup() 

1928 except (ProcessLookupError, FileNotFoundError): 

1929 pass 

1930 return self._parse_smaps() 

1931 

1932 uss, pss, swap = fetch() 

1933 return ntp.pfootprint(uss, pss, swap) 

1934 

1935 if HAS_PROC_SMAPS: 

1936 

1937 @wrap_exceptions 

1938 def memory_maps(self): 

1939 """Return process's mapped memory regions as a list of named 

1940 tuples. Fields are explained in 'man proc'; here is an updated 

1941 (Apr 2012) version: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/proc.txt?id=b76437579d1344b612cf1851ae610c636cec7db0. 

1942 

1943 /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if 

1944 CONFIG_MMU kernel configuration option is not enabled. 

1945 """ 

1946 

1947 def get_blocks(lines, current_block): 

1948 data = {} 

1949 for line in lines: 

1950 fields = line.split(None, 5) 

1951 if not fields[0].endswith(b':'): 

1952 # new block section 

1953 yield (current_block.pop(), data) 

1954 current_block.append(line) 

1955 else: 

1956 try: 

1957 data[fields[0]] = int(fields[1]) * 1024 

1958 except (ValueError, IndexError): 

1959 if fields[0].startswith(b'VmFlags:'): 

1960 # see issue #369 

1961 continue 

1962 msg = f"don't know how to interpret line {line!r}" 

1963 raise ValueError(msg) from None 

1964 yield (current_block.pop(), data) 

1965 

1966 data = self._read_smaps_file() 

1967 # Note: smaps file can be empty for certain processes or for 

1968 # zombies. 

1969 if not data: 

1970 self._raise_if_zombie() 

1971 return [] 

1972 lines = data.split(b'\n') 

1973 ls = [] 

1974 first_line = lines.pop(0) 

1975 current_block = [first_line] 

1976 for header, data in get_blocks(lines, current_block): 

1977 hfields = header.split(None, 5) 

1978 try: 

1979 addr, perms, _offset, _dev, _inode, path = hfields 

1980 except ValueError: 

1981 addr, perms, _offset, _dev, _inode, path = hfields + [''] 

1982 if not path: 

1983 path = '[anon]' 

1984 else: 

1985 path = decode(path) 

1986 path = path.strip() 

1987 if path.endswith(' (deleted)') and not path_exists_strict( 

1988 path 

1989 ): 

1990 path = path[:-10] 

1991 item = ( 

1992 decode(addr), 

1993 decode(perms), 

1994 path, 

1995 data.get(b'Rss:', 0), 

1996 data.get(b'Size:', 0), 

1997 data.get(b'Pss:', 0), 

1998 data.get(b'Shared_Clean:', 0), 

1999 data.get(b'Shared_Dirty:', 0), 

2000 data.get(b'Private_Clean:', 0), 

2001 data.get(b'Private_Dirty:', 0), 

2002 data.get(b'Referenced:', 0), 

2003 data.get(b'Anonymous:', 0), 

2004 data.get(b'Swap:', 0), 

2005 ) 

2006 ls.append(item) 

2007 return ls 

2008 

2009 @wrap_exceptions 

2010 def page_faults(self): 

2011 values = self._parse_stat_file() 

2012 return ntp.ppagefaults(int(values['minflt']), int(values['majflt'])) 

2013 

2014 @wrap_exceptions 

2015 def cwd(self): 

2016 return self._readlink( 

2017 f"{self._procfs_path}/{self.pid}/cwd", fallback="" 

2018 ) 

2019 

2020 @wrap_exceptions 

2021 def num_ctx_switches( 

2022 self, _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)') 

2023 ): 

2024 data = self._read_status_file() 

2025 ctxsw = _ctxsw_re.findall(data) 

2026 if not ctxsw: 

2027 msg = ( 

2028 "'voluntary_ctxt_switches' and" 

2029 " 'nonvoluntary_ctxt_switches'lines were not found in" 

2030 f" {self._procfs_path}/{self.pid}/status; the kernel is" 

2031 " probably older than 2.6.23" 

2032 ) 

2033 raise NotImplementedError(msg) 

2034 return ntp.pctxsw(int(ctxsw[0]), int(ctxsw[1])) 

2035 

2036 @wrap_exceptions 

2037 def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')): 

2038 # Using a re is faster than iterating over file line by line. 

2039 data = self._read_status_file() 

2040 return int(_num_threads_re.findall(data)[0]) 

2041 

2042 @wrap_exceptions 

2043 def threads(self): 

2044 thread_ids = os.listdir(f"{self._procfs_path}/{self.pid}/task") 

2045 thread_ids.sort() 

2046 retlist = [] 

2047 hit_enoent = False 

2048 for thread_id in thread_ids: 

2049 fname = f"{self._procfs_path}/{self.pid}/task/{thread_id}/stat" 

2050 try: 

2051 with open_binary(fname) as f: 

2052 st = f.read().strip() 

2053 except (FileNotFoundError, ProcessLookupError): 

2054 # no such file or directory or no such process; 

2055 # it means thread disappeared on us 

2056 hit_enoent = True 

2057 continue 

2058 # ignore the first two values ("pid (exe)") 

2059 st = st[st.find(b')') + 2 :] 

2060 values = st.split(b' ') 

2061 utime = float(values[11]) / CLOCK_TICKS 

2062 stime = float(values[12]) / CLOCK_TICKS 

2063 ntuple = ntp.pthread(int(thread_id), utime, stime) 

2064 retlist.append(ntuple) 

2065 if hit_enoent: 

2066 self._raise_if_not_alive() 

2067 return retlist 

2068 

2069 @wrap_exceptions 

2070 def nice_get(self): 

2071 # with open_text(f"{self._procfs_path}/{self.pid}/stat") as f: 

2072 # data = f.read() 

2073 # return int(data.split()[18]) 

2074 

2075 # Use C implementation 

2076 return cext.proc_priority_get(self.pid) 

2077 

2078 @wrap_exceptions 

2079 def nice_set(self, value): 

2080 return cext.proc_priority_set(self.pid, value) 

2081 

2082 # starting from CentOS 6. 

2083 if HAS_CPU_AFFINITY: 

2084 

2085 @wrap_exceptions 

2086 def cpu_affinity_get(self): 

2087 return cext.proc_cpu_affinity_get(self.pid) 

2088 

2089 def _get_eligible_cpus( 

2090 self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)") 

2091 ): 

2092 # See: https://github.com/giampaolo/psutil/issues/956 

2093 data = self._read_status_file() 

2094 match = _re.findall(data) 

2095 if match: 

2096 return list(range(int(match[0][0]), int(match[0][1]) + 1)) 

2097 else: 

2098 return list(range(len(per_cpu_times()))) 

2099 

2100 @wrap_exceptions 

2101 def cpu_affinity_set(self, cpus): 

2102 try: 

2103 cext.proc_cpu_affinity_set(self.pid, cpus) 

2104 except (OSError, ValueError) as err: 

2105 if isinstance(err, ValueError) or err.errno == errno.EINVAL: 

2106 eligible_cpus = self._get_eligible_cpus() 

2107 all_cpus = tuple(range(len(per_cpu_times()))) 

2108 for cpu in cpus: 

2109 if cpu not in all_cpus: 

2110 msg = ( 

2111 f"invalid CPU {cpu!r}; choose between" 

2112 f" {eligible_cpus!r}" 

2113 ) 

2114 raise ValueError(msg) from None 

2115 if cpu not in eligible_cpus: 

2116 msg = ( 

2117 f"CPU number {cpu} is not eligible; choose" 

2118 f" between {eligible_cpus}" 

2119 ) 

2120 raise ValueError(msg) from err 

2121 raise 

2122 

2123 # only starting from kernel 2.6.13 

2124 if HAS_PROC_IO_PRIORITY: 

2125 

2126 @wrap_exceptions 

2127 def ionice_get(self): 

2128 ioclass, value = cext.proc_ioprio_get(self.pid) 

2129 ioclass = ProcessIOPriority(ioclass) 

2130 return ntp.pionice(ioclass, value) 

2131 

2132 @wrap_exceptions 

2133 def ionice_set(self, ioclass, value): 

2134 if value is None: 

2135 value = 0 

2136 if value and ioclass in { 

2137 ProcessIOPriority.IOPRIO_CLASS_IDLE, 

2138 ProcessIOPriority.IOPRIO_CLASS_NONE, 

2139 }: 

2140 msg = f"{ioclass!r} ioclass accepts no value" 

2141 raise ValueError(msg) 

2142 if value < 0 or value > 7: 

2143 msg = "value not in 0-7 range" 

2144 raise ValueError(msg) 

2145 return cext.proc_ioprio_set(self.pid, ioclass, value) 

2146 

2147 if hasattr(resource, "prlimit"): 

2148 

2149 @wrap_exceptions 

2150 def rlimit(self, resource_, limits=None): 

2151 # If pid is 0 prlimit() applies to the calling process and 

2152 # we don't want that. We should never get here though as 

2153 # PID 0 is not supported on Linux. 

2154 if self.pid == 0: 

2155 msg = "can't use prlimit() against PID 0 process" 

2156 raise ValueError(msg) 

2157 try: 

2158 if limits is None: 

2159 # get 

2160 return resource.prlimit(self.pid, resource_) 

2161 else: 

2162 # set 

2163 if len(limits) != 2: 

2164 msg = ( 

2165 "second argument must be a (soft, hard) " 

2166 f"tuple, got {limits!r}" 

2167 ) 

2168 raise ValueError(msg) 

2169 resource.prlimit(self.pid, resource_, limits) 

2170 except OSError as err: 

2171 if err.errno == errno.ENOSYS: 

2172 # I saw this happening on Travis: 

2173 # https://travis-ci.org/giampaolo/psutil/jobs/51368273 

2174 self._raise_if_zombie() 

2175 raise 

2176 

2177 @wrap_exceptions 

2178 def status(self): 

2179 letter = self._parse_stat_file()['status'] 

2180 letter = letter.decode() 

2181 # XXX is '?' legit? (we're not supposed to return it anyway) 

2182 return PROC_STATUSES.get(letter, '?') 

2183 

2184 @wrap_exceptions 

2185 def open_files(self): 

2186 retlist = [] 

2187 files = os.listdir(f"{self._procfs_path}/{self.pid}/fd") 

2188 hit_enoent = False 

2189 for fd in files: 

2190 file = f"{self._procfs_path}/{self.pid}/fd/{fd}" 

2191 try: 

2192 path = readlink(file) 

2193 except (FileNotFoundError, ProcessLookupError): 

2194 # ENOENT == file which is gone in the meantime 

2195 hit_enoent = True 

2196 continue 

2197 except OSError as err: 

2198 if err.errno == errno.EINVAL: 

2199 # not a link 

2200 continue 

2201 if err.errno == errno.ENAMETOOLONG: 

2202 # file name too long 

2203 debug(err) 

2204 continue 

2205 raise 

2206 else: 

2207 # If path is not an absolute there's no way to tell 

2208 # whether it's a regular file or not, so we skip it. 

2209 # A regular file is always supposed to be have an 

2210 # absolute path though. 

2211 if path.startswith('/') and isfile_strict(path): 

2212 # Get file position and flags. 

2213 file = f"{self._procfs_path}/{self.pid}/fdinfo/{fd}" 

2214 try: 

2215 with open_binary(file) as f: 

2216 pos = int(f.readline().split()[1]) 

2217 flags = int(f.readline().split()[1], 8) 

2218 except (FileNotFoundError, ProcessLookupError): 

2219 # fd gone in the meantime; process may 

2220 # still be alive 

2221 hit_enoent = True 

2222 else: 

2223 mode = file_flags_to_mode(flags) 

2224 ntuple = ntp.popenfile( 

2225 path, int(fd), int(pos), mode, flags 

2226 ) 

2227 retlist.append(ntuple) 

2228 if hit_enoent: 

2229 self._raise_if_not_alive() 

2230 return retlist 

2231 

2232 @wrap_exceptions 

2233 def net_connections(self, kind='inet'): 

2234 ret = _net_connections.retrieve(kind, self.pid) 

2235 self._raise_if_not_alive() 

2236 return ret 

2237 

2238 @wrap_exceptions 

2239 def num_fds(self): 

2240 return len(os.listdir(f"{self._procfs_path}/{self.pid}/fd")) 

2241 

2242 @wrap_exceptions 

2243 def ppid(self): 

2244 return int(self._parse_stat_file()['ppid']) 

2245 

2246 @wrap_exceptions 

2247 def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')): 

2248 data = self._read_status_file() 

2249 real, effective, saved = _uids_re.findall(data)[0] 

2250 return ntp.puids(int(real), int(effective), int(saved)) 

2251 

2252 @wrap_exceptions 

2253 def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')): 

2254 data = self._read_status_file() 

2255 real, effective, saved = _gids_re.findall(data)[0] 

2256 return ntp.pgids(int(real), int(effective), int(saved))