Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/psutil-5.9.9-py3.8-linux-x86_64.egg/psutil/_pslinux.py: 24%

1256 statements  

« prev     ^ index     » next       coverage.py v7.4.4, created at 2024-04-02 06:13 +0000

1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved. 

2# Use of this source code is governed by a BSD-style license that can be 

3# found in the LICENSE file. 

4 

5"""Linux platform implementation.""" 

6 

7from __future__ import division 

8 

9import base64 

10import collections 

11import errno 

12import functools 

13import glob 

14import os 

15import re 

16import socket 

17import struct 

18import sys 

19import warnings 

20from collections import defaultdict 

21from collections import namedtuple 

22 

23from . import _common 

24from . import _psposix 

25from . import _psutil_linux as cext 

26from . import _psutil_posix as cext_posix 

27from ._common import NIC_DUPLEX_FULL 

28from ._common import NIC_DUPLEX_HALF 

29from ._common import NIC_DUPLEX_UNKNOWN 

30from ._common import AccessDenied 

31from ._common import NoSuchProcess 

32from ._common import ZombieProcess 

33from ._common import bcat 

34from ._common import cat 

35from ._common import debug 

36from ._common import decode 

37from ._common import get_procfs_path 

38from ._common import isfile_strict 

39from ._common import memoize 

40from ._common import memoize_when_activated 

41from ._common import open_binary 

42from ._common import open_text 

43from ._common import parse_environ_block 

44from ._common import path_exists_strict 

45from ._common import supports_ipv6 

46from ._common import usage_percent 

47from ._compat import PY3 

48from ._compat import FileNotFoundError 

49from ._compat import PermissionError 

50from ._compat import ProcessLookupError 

51from ._compat import b 

52from ._compat import basestring 

53 

54 

55if PY3: 

56 import enum 

57else: 

58 enum = None 

59 

60 

61# fmt: off 

62__extra__all__ = [ 

63 'PROCFS_PATH', 

64 # io prio constants 

65 "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE", 

66 "IOPRIO_CLASS_IDLE", 

67 # connection status constants 

68 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1", 

69 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT", 

70 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", 

71] 

72# fmt: on 

73 

74 

75# ===================================================================== 

76# --- globals 

77# ===================================================================== 

78 

79 

80POWER_SUPPLY_PATH = "/sys/class/power_supply" 

81HAS_PROC_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid()) 

82HAS_PROC_SMAPS_ROLLUP = os.path.exists('/proc/%s/smaps_rollup' % os.getpid()) 

83HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get") 

84HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get") 

85 

86# Number of clock ticks per second 

87CLOCK_TICKS = os.sysconf("SC_CLK_TCK") 

88PAGESIZE = cext_posix.getpagesize() 

89BOOT_TIME = None # set later 

90LITTLE_ENDIAN = sys.byteorder == 'little' 

91 

92# "man iostat" states that sectors are equivalent with blocks and have 

93# a size of 512 bytes. Despite this value can be queried at runtime 

94# via /sys/block/{DISK}/queue/hw_sector_size and results may vary 

95# between 1k, 2k, or 4k... 512 appears to be a magic constant used 

96# throughout Linux source code: 

97# * https://stackoverflow.com/a/38136179/376587 

98# * https://lists.gt.net/linux/kernel/2241060 

99# * https://github.com/giampaolo/psutil/issues/1305 

100# * https://github.com/torvalds/linux/blob/ 

101# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99 

102# * https://lkml.org/lkml/2015/8/17/234 

103DISK_SECTOR_SIZE = 512 

104 

105if enum is None: 

106 AF_LINK = socket.AF_PACKET 

107else: 

108 AddressFamily = enum.IntEnum( 

109 'AddressFamily', {'AF_LINK': int(socket.AF_PACKET)} 

110 ) 

111 AF_LINK = AddressFamily.AF_LINK 

112 

113# ioprio_* constants http://linux.die.net/man/2/ioprio_get 

114if enum is None: 

115 IOPRIO_CLASS_NONE = 0 

116 IOPRIO_CLASS_RT = 1 

117 IOPRIO_CLASS_BE = 2 

118 IOPRIO_CLASS_IDLE = 3 

119else: 

120 

121 class IOPriority(enum.IntEnum): 

122 IOPRIO_CLASS_NONE = 0 

123 IOPRIO_CLASS_RT = 1 

124 IOPRIO_CLASS_BE = 2 

125 IOPRIO_CLASS_IDLE = 3 

126 

127 globals().update(IOPriority.__members__) 

128 

129# See: 

130# https://github.com/torvalds/linux/blame/master/fs/proc/array.c 

131# ...and (TASK_* constants): 

132# https://github.com/torvalds/linux/blob/master/include/linux/sched.h 

133PROC_STATUSES = { 

134 "R": _common.STATUS_RUNNING, 

135 "S": _common.STATUS_SLEEPING, 

136 "D": _common.STATUS_DISK_SLEEP, 

137 "T": _common.STATUS_STOPPED, 

138 "t": _common.STATUS_TRACING_STOP, 

139 "Z": _common.STATUS_ZOMBIE, 

140 "X": _common.STATUS_DEAD, 

141 "x": _common.STATUS_DEAD, 

142 "K": _common.STATUS_WAKE_KILL, 

143 "W": _common.STATUS_WAKING, 

144 "I": _common.STATUS_IDLE, 

145 "P": _common.STATUS_PARKED, 

146} 

147 

148# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h 

149TCP_STATUSES = { 

150 "01": _common.CONN_ESTABLISHED, 

151 "02": _common.CONN_SYN_SENT, 

152 "03": _common.CONN_SYN_RECV, 

153 "04": _common.CONN_FIN_WAIT1, 

154 "05": _common.CONN_FIN_WAIT2, 

155 "06": _common.CONN_TIME_WAIT, 

156 "07": _common.CONN_CLOSE, 

157 "08": _common.CONN_CLOSE_WAIT, 

158 "09": _common.CONN_LAST_ACK, 

159 "0A": _common.CONN_LISTEN, 

160 "0B": _common.CONN_CLOSING, 

161} 

162 

163 

164# ===================================================================== 

165# --- named tuples 

166# ===================================================================== 

167 

168 

169# fmt: off 

170# psutil.virtual_memory() 

171svmem = namedtuple( 

172 'svmem', ['total', 'available', 'percent', 'used', 'free', 

173 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab']) 

174# psutil.disk_io_counters() 

175sdiskio = namedtuple( 

176 'sdiskio', ['read_count', 'write_count', 

177 'read_bytes', 'write_bytes', 

178 'read_time', 'write_time', 

179 'read_merged_count', 'write_merged_count', 

180 'busy_time']) 

181# psutil.Process().open_files() 

182popenfile = namedtuple( 

183 'popenfile', ['path', 'fd', 'position', 'mode', 'flags']) 

184# psutil.Process().memory_info() 

185pmem = namedtuple('pmem', 'rss vms shared text lib data dirty') 

186# psutil.Process().memory_full_info() 

187pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap')) 

188# psutil.Process().memory_maps(grouped=True) 

189pmmap_grouped = namedtuple( 

190 'pmmap_grouped', 

191 ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty', 

192 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap']) 

193# psutil.Process().memory_maps(grouped=False) 

194pmmap_ext = namedtuple( 

195 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields)) 

196# psutil.Process.io_counters() 

197pio = namedtuple('pio', ['read_count', 'write_count', 

198 'read_bytes', 'write_bytes', 

199 'read_chars', 'write_chars']) 

200# psutil.Process.cpu_times() 

201pcputimes = namedtuple('pcputimes', 

202 ['user', 'system', 'children_user', 'children_system', 

203 'iowait']) 

204# fmt: on 

205 

206 

207# ===================================================================== 

208# --- utils 

209# ===================================================================== 

210 

211 

212def readlink(path): 

213 """Wrapper around os.readlink().""" 

214 assert isinstance(path, basestring), path 

215 path = os.readlink(path) 

216 # readlink() might return paths containing null bytes ('\x00') 

217 # resulting in "TypeError: must be encoded string without NULL 

218 # bytes, not str" errors when the string is passed to other 

219 # fs-related functions (os.*, open(), ...). 

220 # Apparently everything after '\x00' is garbage (we can have 

221 # ' (deleted)', 'new' and possibly others), see: 

222 # https://github.com/giampaolo/psutil/issues/717 

223 path = path.split('\x00')[0] 

224 # Certain paths have ' (deleted)' appended. Usually this is 

225 # bogus as the file actually exists. Even if it doesn't we 

226 # don't care. 

227 if path.endswith(' (deleted)') and not path_exists_strict(path): 

228 path = path[:-10] 

229 return path 

230 

231 

232def file_flags_to_mode(flags): 

233 """Convert file's open() flags into a readable string. 

234 Used by Process.open_files(). 

235 """ 

236 modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'} 

237 mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)] 

238 if flags & os.O_APPEND: 

239 mode = mode.replace('w', 'a', 1) 

240 mode = mode.replace('w+', 'r+') 

241 # possible values: r, w, a, r+, a+ 

242 return mode 

243 

244 

245def is_storage_device(name): 

246 """Return True if the given name refers to a root device (e.g. 

247 "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1", 

248 "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram") 

249 return True. 

250 """ 

251 # Re-adapted from iostat source code, see: 

252 # https://github.com/sysstat/sysstat/blob/ 

253 # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208 

254 # Some devices may have a slash in their name (e.g. cciss/c0d0...). 

255 name = name.replace('/', '!') 

256 including_virtual = True 

257 if including_virtual: 

258 path = "/sys/block/%s" % name 

259 else: 

260 path = "/sys/block/%s/device" % name 

261 return os.access(path, os.F_OK) 

262 

263 

264@memoize 

265def set_scputimes_ntuple(procfs_path): 

266 """Set a namedtuple of variable fields depending on the CPU times 

267 available on this Linux kernel version which may be: 

268 (user, nice, system, idle, iowait, irq, softirq, [steal, [guest, 

269 [guest_nice]]]) 

270 Used by cpu_times() function. 

271 """ 

272 global scputimes 

273 with open_binary('%s/stat' % procfs_path) as f: 

274 values = f.readline().split()[1:] 

275 fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq'] 

276 vlen = len(values) 

277 if vlen >= 8: 

278 # Linux >= 2.6.11 

279 fields.append('steal') 

280 if vlen >= 9: 

281 # Linux >= 2.6.24 

282 fields.append('guest') 

283 if vlen >= 10: 

284 # Linux >= 3.2.0 

285 fields.append('guest_nice') 

286 scputimes = namedtuple('scputimes', fields) 

287 

288 

289try: 

290 set_scputimes_ntuple("/proc") 

291except Exception as err: # noqa: BLE001 

292 # Don't want to crash at import time. 

293 debug("ignoring exception on import: %r" % err) 

294 scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0) 

295 

296 

297# ===================================================================== 

298# --- prlimit 

299# ===================================================================== 

300 

301# Backport of resource.prlimit() for Python 2. Originally this was done 

302# in C, but CentOS-6 which we use to create manylinux wheels is too old 

303# and does not support prlimit() syscall. As such the resulting wheel 

304# would not include prlimit(), even when installed on newer systems. 

305# This is the only part of psutil using ctypes. 

306 

307prlimit = None 

308try: 

309 from resource import prlimit # python >= 3.4 

310except ImportError: 

311 import ctypes 

312 

313 libc = ctypes.CDLL(None, use_errno=True) 

314 

315 if hasattr(libc, "prlimit"): 

316 

317 def prlimit(pid, resource_, limits=None): 

318 class StructRlimit(ctypes.Structure): 

319 _fields_ = [ 

320 ('rlim_cur', ctypes.c_longlong), 

321 ('rlim_max', ctypes.c_longlong), 

322 ] 

323 

324 current = StructRlimit() 

325 if limits is None: 

326 # get 

327 ret = libc.prlimit(pid, resource_, None, ctypes.byref(current)) 

328 else: 

329 # set 

330 new = StructRlimit() 

331 new.rlim_cur = limits[0] 

332 new.rlim_max = limits[1] 

333 ret = libc.prlimit( 

334 pid, resource_, ctypes.byref(new), ctypes.byref(current) 

335 ) 

336 

337 if ret != 0: 

338 errno_ = ctypes.get_errno() 

339 raise OSError(errno_, os.strerror(errno_)) 

340 return (current.rlim_cur, current.rlim_max) 

341 

342 

343if prlimit is not None: 

344 __extra__all__.extend( 

345 [x for x in dir(cext) if x.startswith('RLIM') and x.isupper()] 

346 ) 

347 

348 

349# ===================================================================== 

350# --- system memory 

351# ===================================================================== 

352 

353 

354def calculate_avail_vmem(mems): 

355 """Fallback for kernels < 3.14 where /proc/meminfo does not provide 

356 "MemAvailable", see: 

357 https://blog.famzah.net/2014/09/24/. 

358 

359 This code reimplements the algorithm outlined here: 

360 https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ 

361 commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 

362 

363 We use this function also when "MemAvailable" returns 0 (possibly a 

364 kernel bug, see: https://github.com/giampaolo/psutil/issues/1915). 

365 In that case this routine matches "free" CLI tool result ("available" 

366 column). 

367 

368 XXX: on recent kernels this calculation may differ by ~1.5% compared 

369 to "MemAvailable:", as it's calculated slightly differently. 

370 It is still way more realistic than doing (free + cached) though. 

371 See: 

372 * https://gitlab.com/procps-ng/procps/issues/42 

373 * https://github.com/famzah/linux-memavailable-procfs/issues/2 

374 """ 

375 # Note about "fallback" value. According to: 

376 # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/ 

377 # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773 

378 # ...long ago "available" memory was calculated as (free + cached), 

379 # We use fallback when one of these is missing from /proc/meminfo: 

380 # "Active(file)": introduced in 2.6.28 / Dec 2008 

381 # "Inactive(file)": introduced in 2.6.28 / Dec 2008 

382 # "SReclaimable": introduced in 2.6.19 / Nov 2006 

383 # /proc/zoneinfo: introduced in 2.6.13 / Aug 2005 

384 free = mems[b'MemFree:'] 

385 fallback = free + mems.get(b"Cached:", 0) 

386 try: 

387 lru_active_file = mems[b'Active(file):'] 

388 lru_inactive_file = mems[b'Inactive(file):'] 

389 slab_reclaimable = mems[b'SReclaimable:'] 

390 except KeyError as err: 

391 debug( 

392 "%s is missing from /proc/meminfo; using an approximation for " 

393 "calculating available memory" 

394 % err.args[0] 

395 ) 

396 return fallback 

397 try: 

398 f = open_binary('%s/zoneinfo' % get_procfs_path()) 

399 except IOError: 

400 return fallback # kernel 2.6.13 

401 

402 watermark_low = 0 

403 with f: 

404 for line in f: 

405 line = line.strip() 

406 if line.startswith(b'low'): 

407 watermark_low += int(line.split()[1]) 

408 watermark_low *= PAGESIZE 

409 

410 avail = free - watermark_low 

411 pagecache = lru_active_file + lru_inactive_file 

412 pagecache -= min(pagecache / 2, watermark_low) 

413 avail += pagecache 

414 avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low) 

415 return int(avail) 

416 

417 

418def virtual_memory(): 

419 """Report virtual memory stats. 

420 This implementation mimics procps-ng-3.3.12, aka "free" CLI tool: 

421 https://gitlab.com/procps-ng/procps/blob/ 

422 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L778-791 

423 The returned values are supposed to match both "free" and "vmstat -s" 

424 CLI tools. 

425 """ 

426 missing_fields = [] 

427 mems = {} 

428 with open_binary('%s/meminfo' % get_procfs_path()) as f: 

429 for line in f: 

430 fields = line.split() 

431 mems[fields[0]] = int(fields[1]) * 1024 

432 

433 # /proc doc states that the available fields in /proc/meminfo vary 

434 # by architecture and compile options, but these 3 values are also 

435 # returned by sysinfo(2); as such we assume they are always there. 

436 total = mems[b'MemTotal:'] 

437 free = mems[b'MemFree:'] 

438 try: 

439 buffers = mems[b'Buffers:'] 

440 except KeyError: 

441 # https://github.com/giampaolo/psutil/issues/1010 

442 buffers = 0 

443 missing_fields.append('buffers') 

444 try: 

445 cached = mems[b"Cached:"] 

446 except KeyError: 

447 cached = 0 

448 missing_fields.append('cached') 

449 else: 

450 # "free" cmdline utility sums reclaimable to cached. 

451 # Older versions of procps used to add slab memory instead. 

452 # This got changed in: 

453 # https://gitlab.com/procps-ng/procps/commit/ 

454 # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e 

455 cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19 

456 

457 try: 

458 shared = mems[b'Shmem:'] # since kernel 2.6.32 

459 except KeyError: 

460 try: 

461 shared = mems[b'MemShared:'] # kernels 2.4 

462 except KeyError: 

463 shared = 0 

464 missing_fields.append('shared') 

465 

466 try: 

467 active = mems[b"Active:"] 

468 except KeyError: 

469 active = 0 

470 missing_fields.append('active') 

471 

472 try: 

473 inactive = mems[b"Inactive:"] 

474 except KeyError: 

475 try: 

476 inactive = ( 

477 mems[b"Inact_dirty:"] 

478 + mems[b"Inact_clean:"] 

479 + mems[b"Inact_laundry:"] 

480 ) 

481 except KeyError: 

482 inactive = 0 

483 missing_fields.append('inactive') 

484 

485 try: 

486 slab = mems[b"Slab:"] 

487 except KeyError: 

488 slab = 0 

489 

490 used = total - free - cached - buffers 

491 if used < 0: 

492 # May be symptomatic of running within a LCX container where such 

493 # values will be dramatically distorted over those of the host. 

494 used = total - free 

495 

496 # - starting from 4.4.0 we match free's "available" column. 

497 # Before 4.4.0 we calculated it as (free + buffers + cached) 

498 # which matched htop. 

499 # - free and htop available memory differs as per: 

500 # http://askubuntu.com/a/369589 

501 # http://unix.stackexchange.com/a/65852/168884 

502 # - MemAvailable has been introduced in kernel 3.14 

503 try: 

504 avail = mems[b'MemAvailable:'] 

505 except KeyError: 

506 avail = calculate_avail_vmem(mems) 

507 else: 

508 if avail == 0: 

509 # Yes, it can happen (probably a kernel bug): 

510 # https://github.com/giampaolo/psutil/issues/1915 

511 # In this case "free" CLI tool makes an estimate. We do the same, 

512 # and it matches "free" CLI tool. 

513 avail = calculate_avail_vmem(mems) 

514 

515 if avail < 0: 

516 avail = 0 

517 missing_fields.append('available') 

518 elif avail > total: 

519 # If avail is greater than total or our calculation overflows, 

520 # that's symptomatic of running within a LCX container where such 

521 # values will be dramatically distorted over those of the host. 

522 # https://gitlab.com/procps-ng/procps/blob/ 

523 # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764 

524 avail = free 

525 

526 percent = usage_percent((total - avail), total, round_=1) 

527 

528 # Warn about missing metrics which are set to 0. 

529 if missing_fields: 

530 msg = "%s memory stats couldn't be determined and %s set to 0" % ( 

531 ", ".join(missing_fields), 

532 "was" if len(missing_fields) == 1 else "were", 

533 ) 

534 warnings.warn(msg, RuntimeWarning, stacklevel=2) 

535 

536 return svmem( 

537 total, 

538 avail, 

539 percent, 

540 used, 

541 free, 

542 active, 

543 inactive, 

544 buffers, 

545 cached, 

546 shared, 

547 slab, 

548 ) 

549 

550 

551def swap_memory(): 

552 """Return swap memory metrics.""" 

553 mems = {} 

554 with open_binary('%s/meminfo' % get_procfs_path()) as f: 

555 for line in f: 

556 fields = line.split() 

557 mems[fields[0]] = int(fields[1]) * 1024 

558 # We prefer /proc/meminfo over sysinfo() syscall so that 

559 # psutil.PROCFS_PATH can be used in order to allow retrieval 

560 # for linux containers, see: 

561 # https://github.com/giampaolo/psutil/issues/1015 

562 try: 

563 total = mems[b'SwapTotal:'] 

564 free = mems[b'SwapFree:'] 

565 except KeyError: 

566 _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo() 

567 total *= unit_multiplier 

568 free *= unit_multiplier 

569 

570 used = total - free 

571 percent = usage_percent(used, total, round_=1) 

572 # get pgin/pgouts 

573 try: 

574 f = open_binary("%s/vmstat" % get_procfs_path()) 

575 except IOError as err: 

576 # see https://github.com/giampaolo/psutil/issues/722 

577 msg = ( 

578 "'sin' and 'sout' swap memory stats couldn't " 

579 + "be determined and were set to 0 (%s)" % str(err) 

580 ) 

581 warnings.warn(msg, RuntimeWarning, stacklevel=2) 

582 sin = sout = 0 

583 else: 

584 with f: 

585 sin = sout = None 

586 for line in f: 

587 # values are expressed in 4 kilo bytes, we want 

588 # bytes instead 

589 if line.startswith(b'pswpin'): 

590 sin = int(line.split(b' ')[1]) * 4 * 1024 

591 elif line.startswith(b'pswpout'): 

592 sout = int(line.split(b' ')[1]) * 4 * 1024 

593 if sin is not None and sout is not None: 

594 break 

595 else: 

596 # we might get here when dealing with exotic Linux 

597 # flavors, see: 

598 # https://github.com/giampaolo/psutil/issues/313 

599 msg = "'sin' and 'sout' swap memory stats couldn't " 

600 msg += "be determined and were set to 0" 

601 warnings.warn(msg, RuntimeWarning, stacklevel=2) 

602 sin = sout = 0 

603 return _common.sswap(total, used, free, percent, sin, sout) 

604 

605 

606# ===================================================================== 

607# --- CPU 

608# ===================================================================== 

609 

610 

611def cpu_times(): 

612 """Return a named tuple representing the following system-wide 

613 CPU times: 

614 (user, nice, system, idle, iowait, irq, softirq [steal, [guest, 

615 [guest_nice]]]) 

616 Last 3 fields may not be available on all Linux kernel versions. 

617 """ 

618 procfs_path = get_procfs_path() 

619 set_scputimes_ntuple(procfs_path) 

620 with open_binary('%s/stat' % procfs_path) as f: 

621 values = f.readline().split() 

622 fields = values[1 : len(scputimes._fields) + 1] 

623 fields = [float(x) / CLOCK_TICKS for x in fields] 

624 return scputimes(*fields) 

625 

626 

627def per_cpu_times(): 

628 """Return a list of namedtuple representing the CPU times 

629 for every CPU available on the system. 

630 """ 

631 procfs_path = get_procfs_path() 

632 set_scputimes_ntuple(procfs_path) 

633 cpus = [] 

634 with open_binary('%s/stat' % procfs_path) as f: 

635 # get rid of the first line which refers to system wide CPU stats 

636 f.readline() 

637 for line in f: 

638 if line.startswith(b'cpu'): 

639 values = line.split() 

640 fields = values[1 : len(scputimes._fields) + 1] 

641 fields = [float(x) / CLOCK_TICKS for x in fields] 

642 entry = scputimes(*fields) 

643 cpus.append(entry) 

644 return cpus 

645 

646 

647def cpu_count_logical(): 

648 """Return the number of logical CPUs in the system.""" 

649 try: 

650 return os.sysconf("SC_NPROCESSORS_ONLN") 

651 except ValueError: 

652 # as a second fallback we try to parse /proc/cpuinfo 

653 num = 0 

654 with open_binary('%s/cpuinfo' % get_procfs_path()) as f: 

655 for line in f: 

656 if line.lower().startswith(b'processor'): 

657 num += 1 

658 

659 # unknown format (e.g. amrel/sparc architectures), see: 

660 # https://github.com/giampaolo/psutil/issues/200 

661 # try to parse /proc/stat as a last resort 

662 if num == 0: 

663 search = re.compile(r'cpu\d') 

664 with open_text('%s/stat' % get_procfs_path()) as f: 

665 for line in f: 

666 line = line.split(' ')[0] 

667 if search.match(line): 

668 num += 1 

669 

670 if num == 0: 

671 # mimic os.cpu_count() 

672 return None 

673 return num 

674 

675 

676def cpu_count_cores(): 

677 """Return the number of CPU cores in the system.""" 

678 # Method #1 

679 ls = set() 

680 # These 2 files are the same but */core_cpus_list is newer while 

681 # */thread_siblings_list is deprecated and may disappear in the future. 

682 # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst 

683 # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964 

684 # https://lkml.org/lkml/2019/2/26/41 

685 p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list" 

686 p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list" 

687 for path in glob.glob(p1) or glob.glob(p2): 

688 with open_binary(path) as f: 

689 ls.add(f.read().strip()) 

690 result = len(ls) 

691 if result != 0: 

692 return result 

693 

694 # Method #2 

695 mapping = {} 

696 current_info = {} 

697 with open_binary('%s/cpuinfo' % get_procfs_path()) as f: 

698 for line in f: 

699 line = line.strip().lower() 

700 if not line: 

701 # new section 

702 try: 

703 mapping[current_info[b'physical id']] = current_info[ 

704 b'cpu cores' 

705 ] 

706 except KeyError: 

707 pass 

708 current_info = {} 

709 else: 

710 # ongoing section 

711 if line.startswith((b'physical id', b'cpu cores')): 

712 key, value = line.split(b'\t:', 1) 

713 current_info[key] = int(value) 

714 

715 result = sum(mapping.values()) 

716 return result or None # mimic os.cpu_count() 

717 

718 

719def cpu_stats(): 

720 """Return various CPU stats as a named tuple.""" 

721 with open_binary('%s/stat' % get_procfs_path()) as f: 

722 ctx_switches = None 

723 interrupts = None 

724 soft_interrupts = None 

725 for line in f: 

726 if line.startswith(b'ctxt'): 

727 ctx_switches = int(line.split()[1]) 

728 elif line.startswith(b'intr'): 

729 interrupts = int(line.split()[1]) 

730 elif line.startswith(b'softirq'): 

731 soft_interrupts = int(line.split()[1]) 

732 if ( 

733 ctx_switches is not None 

734 and soft_interrupts is not None 

735 and interrupts is not None 

736 ): 

737 break 

738 syscalls = 0 

739 return _common.scpustats( 

740 ctx_switches, interrupts, soft_interrupts, syscalls 

741 ) 

742 

743 

744def _cpu_get_cpuinfo_freq(): 

745 """Return current CPU frequency from cpuinfo if available.""" 

746 ret = [] 

747 with open_binary('%s/cpuinfo' % get_procfs_path()) as f: 

748 for line in f: 

749 if line.lower().startswith(b'cpu mhz'): 

750 ret.append(float(line.split(b':', 1)[1])) 

751 return ret 

752 

753 

754if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or os.path.exists( 

755 "/sys/devices/system/cpu/cpu0/cpufreq" 

756): 

757 

758 def cpu_freq(): 

759 """Return frequency metrics for all CPUs. 

760 Contrarily to other OSes, Linux updates these values in 

761 real-time. 

762 """ 

763 cpuinfo_freqs = _cpu_get_cpuinfo_freq() 

764 paths = glob.glob( 

765 "/sys/devices/system/cpu/cpufreq/policy[0-9]*" 

766 ) or glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq") 

767 paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group())) 

768 ret = [] 

769 pjoin = os.path.join 

770 for i, path in enumerate(paths): 

771 if len(paths) == len(cpuinfo_freqs): 

772 # take cached value from cpuinfo if available, see: 

773 # https://github.com/giampaolo/psutil/issues/1851 

774 curr = cpuinfo_freqs[i] * 1000 

775 else: 

776 curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None) 

777 if curr is None: 

778 # Likely an old RedHat, see: 

779 # https://github.com/giampaolo/psutil/issues/1071 

780 curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None) 

781 if curr is None: 

782 online_path = ( 

783 "/sys/devices/system/cpu/cpu{}/online".format(i) 

784 ) 

785 # if cpu core is offline, set to all zeroes 

786 if cat(online_path, fallback=None) == "0\n": 

787 ret.append(_common.scpufreq(0.0, 0.0, 0.0)) 

788 continue 

789 msg = "can't find current frequency file" 

790 raise NotImplementedError(msg) 

791 curr = int(curr) / 1000 

792 max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000 

793 min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000 

794 ret.append(_common.scpufreq(curr, min_, max_)) 

795 return ret 

796 

797else: 

798 

799 def cpu_freq(): 

800 """Alternate implementation using /proc/cpuinfo. 

801 min and max frequencies are not available and are set to None. 

802 """ 

803 return [_common.scpufreq(x, 0.0, 0.0) for x in _cpu_get_cpuinfo_freq()] 

804 

805 

806# ===================================================================== 

807# --- network 

808# ===================================================================== 

809 

810 

811net_if_addrs = cext_posix.net_if_addrs 

812 

813 

814class _Ipv6UnsupportedError(Exception): 

815 pass 

816 

817 

818class Connections: 

819 """A wrapper on top of /proc/net/* files, retrieving per-process 

820 and system-wide open connections (TCP, UDP, UNIX) similarly to 

821 "netstat -an". 

822 

823 Note: in case of UNIX sockets we're only able to determine the 

824 local endpoint/path, not the one it's connected to. 

825 According to [1] it would be possible but not easily. 

826 

827 [1] http://serverfault.com/a/417946 

828 """ 

829 

830 def __init__(self): 

831 # The string represents the basename of the corresponding 

832 # /proc/net/{proto_name} file. 

833 tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM) 

834 tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM) 

835 udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM) 

836 udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM) 

837 unix = ("unix", socket.AF_UNIX, None) 

838 self.tmap = { 

839 "all": (tcp4, tcp6, udp4, udp6, unix), 

840 "tcp": (tcp4, tcp6), 

841 "tcp4": (tcp4,), 

842 "tcp6": (tcp6,), 

843 "udp": (udp4, udp6), 

844 "udp4": (udp4,), 

845 "udp6": (udp6,), 

846 "unix": (unix,), 

847 "inet": (tcp4, tcp6, udp4, udp6), 

848 "inet4": (tcp4, udp4), 

849 "inet6": (tcp6, udp6), 

850 } 

851 self._procfs_path = None 

852 

853 def get_proc_inodes(self, pid): 

854 inodes = defaultdict(list) 

855 for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)): 

856 try: 

857 inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd)) 

858 except (FileNotFoundError, ProcessLookupError): 

859 # ENOENT == file which is gone in the meantime; 

860 # os.stat('/proc/%s' % self.pid) will be done later 

861 # to force NSP (if it's the case) 

862 continue 

863 except OSError as err: 

864 if err.errno == errno.EINVAL: 

865 # not a link 

866 continue 

867 if err.errno == errno.ENAMETOOLONG: 

868 # file name too long 

869 debug(err) 

870 continue 

871 raise 

872 else: 

873 if inode.startswith('socket:['): 

874 # the process is using a socket 

875 inode = inode[8:][:-1] 

876 inodes[inode].append((pid, int(fd))) 

877 return inodes 

878 

879 def get_all_inodes(self): 

880 inodes = {} 

881 for pid in pids(): 

882 try: 

883 inodes.update(self.get_proc_inodes(pid)) 

884 except (FileNotFoundError, ProcessLookupError, PermissionError): 

885 # os.listdir() is gonna raise a lot of access denied 

886 # exceptions in case of unprivileged user; that's fine 

887 # as we'll just end up returning a connection with PID 

888 # and fd set to None anyway. 

889 # Both netstat -an and lsof does the same so it's 

890 # unlikely we can do any better. 

891 # ENOENT just means a PID disappeared on us. 

892 continue 

893 return inodes 

894 

895 @staticmethod 

896 def decode_address(addr, family): 

897 """Accept an "ip:port" address as displayed in /proc/net/* 

898 and convert it into a human readable form, like: 

899 

900 "0500000A:0016" -> ("10.0.0.5", 22) 

901 "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521) 

902 

903 The IP address portion is a little or big endian four-byte 

904 hexadecimal number; that is, the least significant byte is listed 

905 first, so we need to reverse the order of the bytes to convert it 

906 to an IP address. 

907 The port is represented as a two-byte hexadecimal number. 

908 

909 Reference: 

910 http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html 

911 """ 

912 ip, port = addr.split(':') 

913 port = int(port, 16) 

914 # this usually refers to a local socket in listen mode with 

915 # no end-points connected 

916 if not port: 

917 return () 

918 if PY3: 

919 ip = ip.encode('ascii') 

920 if family == socket.AF_INET: 

921 # see: https://github.com/giampaolo/psutil/issues/201 

922 if LITTLE_ENDIAN: 

923 ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1]) 

924 else: 

925 ip = socket.inet_ntop(family, base64.b16decode(ip)) 

926 else: # IPv6 

927 ip = base64.b16decode(ip) 

928 try: 

929 # see: https://github.com/giampaolo/psutil/issues/201 

930 if LITTLE_ENDIAN: 

931 ip = socket.inet_ntop( 

932 socket.AF_INET6, 

933 struct.pack('>4I', *struct.unpack('<4I', ip)), 

934 ) 

935 else: 

936 ip = socket.inet_ntop( 

937 socket.AF_INET6, 

938 struct.pack('<4I', *struct.unpack('<4I', ip)), 

939 ) 

940 except ValueError: 

941 # see: https://github.com/giampaolo/psutil/issues/623 

942 if not supports_ipv6(): 

943 raise _Ipv6UnsupportedError 

944 else: 

945 raise 

946 return _common.addr(ip, port) 

947 

948 @staticmethod 

949 def process_inet(file, family, type_, inodes, filter_pid=None): 

950 """Parse /proc/net/tcp* and /proc/net/udp* files.""" 

951 if file.endswith('6') and not os.path.exists(file): 

952 # IPv6 not supported 

953 return 

954 with open_text(file) as f: 

955 f.readline() # skip the first line 

956 for lineno, line in enumerate(f, 1): 

957 try: 

958 _, laddr, raddr, status, _, _, _, _, _, inode = ( 

959 line.split()[:10] 

960 ) 

961 except ValueError: 

962 raise RuntimeError( 

963 "error while parsing %s; malformed line %s %r" 

964 % (file, lineno, line) 

965 ) 

966 if inode in inodes: 

967 # # We assume inet sockets are unique, so we error 

968 # # out if there are multiple references to the 

969 # # same inode. We won't do this for UNIX sockets. 

970 # if len(inodes[inode]) > 1 and family != socket.AF_UNIX: 

971 # raise ValueError("ambiguous inode with multiple " 

972 # "PIDs references") 

973 pid, fd = inodes[inode][0] 

974 else: 

975 pid, fd = None, -1 

976 if filter_pid is not None and filter_pid != pid: 

977 continue 

978 else: 

979 if type_ == socket.SOCK_STREAM: 

980 status = TCP_STATUSES[status] 

981 else: 

982 status = _common.CONN_NONE 

983 try: 

984 laddr = Connections.decode_address(laddr, family) 

985 raddr = Connections.decode_address(raddr, family) 

986 except _Ipv6UnsupportedError: 

987 continue 

988 yield (fd, family, type_, laddr, raddr, status, pid) 

989 

990 @staticmethod 

991 def process_unix(file, family, inodes, filter_pid=None): 

992 """Parse /proc/net/unix files.""" 

993 with open_text(file) as f: 

994 f.readline() # skip the first line 

995 for line in f: 

996 tokens = line.split() 

997 try: 

998 _, _, _, _, type_, _, inode = tokens[0:7] 

999 except ValueError: 

1000 if ' ' not in line: 

1001 # see: https://github.com/giampaolo/psutil/issues/766 

1002 continue 

1003 raise RuntimeError( 

1004 "error while parsing %s; malformed line %r" 

1005 % (file, line) 

1006 ) 

1007 if inode in inodes: # noqa 

1008 # With UNIX sockets we can have a single inode 

1009 # referencing many file descriptors. 

1010 pairs = inodes[inode] 

1011 else: 

1012 pairs = [(None, -1)] 

1013 for pid, fd in pairs: 

1014 if filter_pid is not None and filter_pid != pid: 

1015 continue 

1016 else: 

1017 path = tokens[-1] if len(tokens) == 8 else '' 

1018 type_ = _common.socktype_to_enum(int(type_)) 

1019 # XXX: determining the remote endpoint of a 

1020 # UNIX socket on Linux is not possible, see: 

1021 # https://serverfault.com/questions/252723/ 

1022 raddr = "" 

1023 status = _common.CONN_NONE 

1024 yield (fd, family, type_, path, raddr, status, pid) 

1025 

1026 def retrieve(self, kind, pid=None): 

1027 if kind not in self.tmap: 

1028 raise ValueError( 

1029 "invalid %r kind argument; choose between %s" 

1030 % (kind, ', '.join([repr(x) for x in self.tmap])) 

1031 ) 

1032 self._procfs_path = get_procfs_path() 

1033 if pid is not None: 

1034 inodes = self.get_proc_inodes(pid) 

1035 if not inodes: 

1036 # no connections for this process 

1037 return [] 

1038 else: 

1039 inodes = self.get_all_inodes() 

1040 ret = set() 

1041 for proto_name, family, type_ in self.tmap[kind]: 

1042 path = "%s/net/%s" % (self._procfs_path, proto_name) 

1043 if family in (socket.AF_INET, socket.AF_INET6): 

1044 ls = self.process_inet( 

1045 path, family, type_, inodes, filter_pid=pid 

1046 ) 

1047 else: 

1048 ls = self.process_unix(path, family, inodes, filter_pid=pid) 

1049 for fd, family, type_, laddr, raddr, status, bound_pid in ls: 

1050 if pid: 

1051 conn = _common.pconn( 

1052 fd, family, type_, laddr, raddr, status 

1053 ) 

1054 else: 

1055 conn = _common.sconn( 

1056 fd, family, type_, laddr, raddr, status, bound_pid 

1057 ) 

1058 ret.add(conn) 

1059 return list(ret) 

1060 

1061 

1062_connections = Connections() 

1063 

1064 

1065def net_connections(kind='inet'): 

1066 """Return system-wide open connections.""" 

1067 return _connections.retrieve(kind) 

1068 

1069 

1070def net_io_counters(): 

1071 """Return network I/O statistics for every network interface 

1072 installed on the system as a dict of raw tuples. 

1073 """ 

1074 with open_text("%s/net/dev" % get_procfs_path()) as f: 

1075 lines = f.readlines() 

1076 retdict = {} 

1077 for line in lines[2:]: 

1078 colon = line.rfind(':') 

1079 assert colon > 0, repr(line) 

1080 name = line[:colon].strip() 

1081 fields = line[colon + 1 :].strip().split() 

1082 

1083 # in 

1084 ( 

1085 bytes_recv, 

1086 packets_recv, 

1087 errin, 

1088 dropin, 

1089 fifoin, # unused 

1090 framein, # unused 

1091 compressedin, # unused 

1092 multicastin, # unused 

1093 # out 

1094 bytes_sent, 

1095 packets_sent, 

1096 errout, 

1097 dropout, 

1098 fifoout, # unused 

1099 collisionsout, # unused 

1100 carrierout, # unused 

1101 compressedout, 

1102 ) = map(int, fields) 

1103 

1104 retdict[name] = ( 

1105 bytes_sent, 

1106 bytes_recv, 

1107 packets_sent, 

1108 packets_recv, 

1109 errin, 

1110 errout, 

1111 dropin, 

1112 dropout, 

1113 ) 

1114 return retdict 

1115 

1116 

1117def net_if_stats(): 

1118 """Get NIC stats (isup, duplex, speed, mtu).""" 

1119 duplex_map = { 

1120 cext.DUPLEX_FULL: NIC_DUPLEX_FULL, 

1121 cext.DUPLEX_HALF: NIC_DUPLEX_HALF, 

1122 cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN, 

1123 } 

1124 names = net_io_counters().keys() 

1125 ret = {} 

1126 for name in names: 

1127 try: 

1128 mtu = cext_posix.net_if_mtu(name) 

1129 flags = cext_posix.net_if_flags(name) 

1130 duplex, speed = cext.net_if_duplex_speed(name) 

1131 except OSError as err: 

1132 # https://github.com/giampaolo/psutil/issues/1279 

1133 if err.errno != errno.ENODEV: 

1134 raise 

1135 else: 

1136 debug(err) 

1137 else: 

1138 output_flags = ','.join(flags) 

1139 isup = 'running' in flags 

1140 ret[name] = _common.snicstats( 

1141 isup, duplex_map[duplex], speed, mtu, output_flags 

1142 ) 

1143 return ret 

1144 

1145 

1146# ===================================================================== 

1147# --- disks 

1148# ===================================================================== 

1149 

1150 

1151disk_usage = _psposix.disk_usage 

1152 

1153 

1154def disk_io_counters(perdisk=False): 

1155 """Return disk I/O statistics for every disk installed on the 

1156 system as a dict of raw tuples. 

1157 """ 

1158 

1159 def read_procfs(): 

1160 # OK, this is a bit confusing. The format of /proc/diskstats can 

1161 # have 3 variations. 

1162 # On Linux 2.4 each line has always 15 fields, e.g.: 

1163 # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8" 

1164 # On Linux 2.6+ each line *usually* has 14 fields, and the disk 

1165 # name is in another position, like this: 

1166 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8" 

1167 # ...unless (Linux 2.6) the line refers to a partition instead 

1168 # of a disk, in which case the line has less fields (7): 

1169 # "3 1 hda1 8 8 8 8" 

1170 # 4.18+ has 4 fields added: 

1171 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0" 

1172 # 5.5 has 2 more fields. 

1173 # See: 

1174 # https://www.kernel.org/doc/Documentation/iostats.txt 

1175 # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats 

1176 with open_text("%s/diskstats" % get_procfs_path()) as f: 

1177 lines = f.readlines() 

1178 for line in lines: 

1179 fields = line.split() 

1180 flen = len(fields) 

1181 # fmt: off 

1182 if flen == 15: 

1183 # Linux 2.4 

1184 name = fields[3] 

1185 reads = int(fields[2]) 

1186 (reads_merged, rbytes, rtime, writes, writes_merged, 

1187 wbytes, wtime, _, busy_time, _) = map(int, fields[4:14]) 

1188 elif flen == 14 or flen >= 18: 

1189 # Linux 2.6+, line referring to a disk 

1190 name = fields[2] 

1191 (reads, reads_merged, rbytes, rtime, writes, writes_merged, 

1192 wbytes, wtime, _, busy_time, _) = map(int, fields[3:14]) 

1193 elif flen == 7: 

1194 # Linux 2.6+, line referring to a partition 

1195 name = fields[2] 

1196 reads, rbytes, writes, wbytes = map(int, fields[3:]) 

1197 rtime = wtime = reads_merged = writes_merged = busy_time = 0 

1198 else: 

1199 raise ValueError("not sure how to interpret line %r" % line) 

1200 yield (name, reads, writes, rbytes, wbytes, rtime, wtime, 

1201 reads_merged, writes_merged, busy_time) 

1202 # fmt: on 

1203 

1204 def read_sysfs(): 

1205 for block in os.listdir('/sys/block'): 

1206 for root, _, files in os.walk(os.path.join('/sys/block', block)): 

1207 if 'stat' not in files: 

1208 continue 

1209 with open_text(os.path.join(root, 'stat')) as f: 

1210 fields = f.read().strip().split() 

1211 name = os.path.basename(root) 

1212 # fmt: off 

1213 (reads, reads_merged, rbytes, rtime, writes, writes_merged, 

1214 wbytes, wtime, _, busy_time) = map(int, fields[:10]) 

1215 yield (name, reads, writes, rbytes, wbytes, rtime, 

1216 wtime, reads_merged, writes_merged, busy_time) 

1217 # fmt: on 

1218 

1219 if os.path.exists('%s/diskstats' % get_procfs_path()): 

1220 gen = read_procfs() 

1221 elif os.path.exists('/sys/block'): 

1222 gen = read_sysfs() 

1223 else: 

1224 raise NotImplementedError( 

1225 "%s/diskstats nor /sys/block filesystem are available on this " 

1226 "system" 

1227 % get_procfs_path() 

1228 ) 

1229 

1230 retdict = {} 

1231 for entry in gen: 

1232 # fmt: off 

1233 (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged, 

1234 writes_merged, busy_time) = entry 

1235 if not perdisk and not is_storage_device(name): 

1236 # perdisk=False means we want to calculate totals so we skip 

1237 # partitions (e.g. 'sda1', 'nvme0n1p1') and only include 

1238 # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks 

1239 # include a total of all their partitions + some extra size 

1240 # of their own: 

1241 # $ cat /proc/diskstats 

1242 # 259 0 sda 10485760 ... 

1243 # 259 1 sda1 5186039 ... 

1244 # 259 1 sda2 5082039 ... 

1245 # See: 

1246 # https://github.com/giampaolo/psutil/pull/1313 

1247 continue 

1248 

1249 rbytes *= DISK_SECTOR_SIZE 

1250 wbytes *= DISK_SECTOR_SIZE 

1251 retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime, 

1252 reads_merged, writes_merged, busy_time) 

1253 # fmt: on 

1254 

1255 return retdict 

1256 

1257 

1258class RootFsDeviceFinder: 

1259 """disk_partitions() may return partitions with device == "/dev/root" 

1260 or "rootfs". This container class uses different strategies to try to 

1261 obtain the real device path. Resources: 

1262 https://bootlin.com/blog/find-root-device/ 

1263 https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/. 

1264 """ 

1265 

1266 __slots__ = ['major', 'minor'] 

1267 

1268 def __init__(self): 

1269 dev = os.stat("/").st_dev 

1270 self.major = os.major(dev) 

1271 self.minor = os.minor(dev) 

1272 

1273 def ask_proc_partitions(self): 

1274 with open_text("%s/partitions" % get_procfs_path()) as f: 

1275 for line in f.readlines()[2:]: 

1276 fields = line.split() 

1277 if len(fields) < 4: # just for extra safety 

1278 continue 

1279 major = int(fields[0]) if fields[0].isdigit() else None 

1280 minor = int(fields[1]) if fields[1].isdigit() else None 

1281 name = fields[3] 

1282 if major == self.major and minor == self.minor: 

1283 if name: # just for extra safety 

1284 return "/dev/%s" % name 

1285 

1286 def ask_sys_dev_block(self): 

1287 path = "/sys/dev/block/%s:%s/uevent" % (self.major, self.minor) 

1288 with open_text(path) as f: 

1289 for line in f: 

1290 if line.startswith("DEVNAME="): 

1291 name = line.strip().rpartition("DEVNAME=")[2] 

1292 if name: # just for extra safety 

1293 return "/dev/%s" % name 

1294 

1295 def ask_sys_class_block(self): 

1296 needle = "%s:%s" % (self.major, self.minor) 

1297 files = glob.iglob("/sys/class/block/*/dev") 

1298 for file in files: 

1299 try: 

1300 f = open_text(file) 

1301 except FileNotFoundError: # race condition 

1302 continue 

1303 else: 

1304 with f: 

1305 data = f.read().strip() 

1306 if data == needle: 

1307 name = os.path.basename(os.path.dirname(file)) 

1308 return "/dev/%s" % name 

1309 

1310 def find(self): 

1311 path = None 

1312 if path is None: 

1313 try: 

1314 path = self.ask_proc_partitions() 

1315 except (IOError, OSError) as err: 

1316 debug(err) 

1317 if path is None: 

1318 try: 

1319 path = self.ask_sys_dev_block() 

1320 except (IOError, OSError) as err: 

1321 debug(err) 

1322 if path is None: 

1323 try: 

1324 path = self.ask_sys_class_block() 

1325 except (IOError, OSError) as err: 

1326 debug(err) 

1327 # We use exists() because the "/dev/*" part of the path is hard 

1328 # coded, so we want to be sure. 

1329 if path is not None and os.path.exists(path): 

1330 return path 

1331 

1332 

1333def disk_partitions(all=False): 

1334 """Return mounted disk partitions as a list of namedtuples.""" 

1335 fstypes = set() 

1336 procfs_path = get_procfs_path() 

1337 if not all: 

1338 with open_text("%s/filesystems" % procfs_path) as f: 

1339 for line in f: 

1340 line = line.strip() 

1341 if not line.startswith("nodev"): 

1342 fstypes.add(line.strip()) 

1343 else: 

1344 # ignore all lines starting with "nodev" except "nodev zfs" 

1345 fstype = line.split("\t")[1] 

1346 if fstype == "zfs": 

1347 fstypes.add("zfs") 

1348 

1349 # See: https://github.com/giampaolo/psutil/issues/1307 

1350 if procfs_path == "/proc" and os.path.isfile('/etc/mtab'): 

1351 mounts_path = os.path.realpath("/etc/mtab") 

1352 else: 

1353 mounts_path = os.path.realpath("%s/self/mounts" % procfs_path) 

1354 

1355 retlist = [] 

1356 partitions = cext.disk_partitions(mounts_path) 

1357 for partition in partitions: 

1358 device, mountpoint, fstype, opts = partition 

1359 if device == 'none': 

1360 device = '' 

1361 if device in ("/dev/root", "rootfs"): 

1362 device = RootFsDeviceFinder().find() or device 

1363 if not all: 

1364 if not device or fstype not in fstypes: 

1365 continue 

1366 maxfile = maxpath = None # set later 

1367 ntuple = _common.sdiskpart( 

1368 device, mountpoint, fstype, opts, maxfile, maxpath 

1369 ) 

1370 retlist.append(ntuple) 

1371 

1372 return retlist 

1373 

1374 

1375# ===================================================================== 

1376# --- sensors 

1377# ===================================================================== 

1378 

1379 

1380def sensors_temperatures(): 

1381 """Return hardware (CPU and others) temperatures as a dict 

1382 including hardware name, label, current, max and critical 

1383 temperatures. 

1384 

1385 Implementation notes: 

1386 - /sys/class/hwmon looks like the most recent interface to 

1387 retrieve this info, and this implementation relies on it 

1388 only (old distros will probably use something else) 

1389 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon 

1390 - /sys/class/thermal/thermal_zone* is another one but it's more 

1391 difficult to parse 

1392 """ 

1393 ret = collections.defaultdict(list) 

1394 basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*') 

1395 # CentOS has an intermediate /device directory: 

1396 # https://github.com/giampaolo/psutil/issues/971 

1397 # https://github.com/nicolargo/glances/issues/1060 

1398 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*')) 

1399 basenames = sorted(set([x.split('_')[0] for x in basenames])) 

1400 

1401 # Only add the coretemp hwmon entries if they're not already in 

1402 # /sys/class/hwmon/ 

1403 # https://github.com/giampaolo/psutil/issues/1708 

1404 # https://github.com/giampaolo/psutil/pull/1648 

1405 basenames2 = glob.glob( 

1406 '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*' 

1407 ) 

1408 repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/') 

1409 for name in basenames2: 

1410 altname = repl.sub('/sys/class/hwmon/', name) 

1411 if altname not in basenames: 

1412 basenames.append(name) 

1413 

1414 for base in basenames: 

1415 try: 

1416 path = base + '_input' 

1417 current = float(bcat(path)) / 1000.0 

1418 path = os.path.join(os.path.dirname(base), 'name') 

1419 unit_name = cat(path).strip() 

1420 except (IOError, OSError, ValueError): 

1421 # A lot of things can go wrong here, so let's just skip the 

1422 # whole entry. Sure thing is Linux's /sys/class/hwmon really 

1423 # is a stinky broken mess. 

1424 # https://github.com/giampaolo/psutil/issues/1009 

1425 # https://github.com/giampaolo/psutil/issues/1101 

1426 # https://github.com/giampaolo/psutil/issues/1129 

1427 # https://github.com/giampaolo/psutil/issues/1245 

1428 # https://github.com/giampaolo/psutil/issues/1323 

1429 continue 

1430 

1431 high = bcat(base + '_max', fallback=None) 

1432 critical = bcat(base + '_crit', fallback=None) 

1433 label = cat(base + '_label', fallback='').strip() 

1434 

1435 if high is not None: 

1436 try: 

1437 high = float(high) / 1000.0 

1438 except ValueError: 

1439 high = None 

1440 if critical is not None: 

1441 try: 

1442 critical = float(critical) / 1000.0 

1443 except ValueError: 

1444 critical = None 

1445 

1446 ret[unit_name].append((label, current, high, critical)) 

1447 

1448 # Indication that no sensors were detected in /sys/class/hwmon/ 

1449 if not basenames: 

1450 basenames = glob.glob('/sys/class/thermal/thermal_zone*') 

1451 basenames = sorted(set(basenames)) 

1452 

1453 for base in basenames: 

1454 try: 

1455 path = os.path.join(base, 'temp') 

1456 current = float(bcat(path)) / 1000.0 

1457 path = os.path.join(base, 'type') 

1458 unit_name = cat(path).strip() 

1459 except (IOError, OSError, ValueError) as err: 

1460 debug(err) 

1461 continue 

1462 

1463 trip_paths = glob.glob(base + '/trip_point*') 

1464 trip_points = set([ 

1465 '_'.join(os.path.basename(p).split('_')[0:3]) 

1466 for p in trip_paths 

1467 ]) 

1468 critical = None 

1469 high = None 

1470 for trip_point in trip_points: 

1471 path = os.path.join(base, trip_point + "_type") 

1472 trip_type = cat(path, fallback='').strip() 

1473 if trip_type == 'critical': 

1474 critical = bcat( 

1475 os.path.join(base, trip_point + "_temp"), fallback=None 

1476 ) 

1477 elif trip_type == 'high': 

1478 high = bcat( 

1479 os.path.join(base, trip_point + "_temp"), fallback=None 

1480 ) 

1481 

1482 if high is not None: 

1483 try: 

1484 high = float(high) / 1000.0 

1485 except ValueError: 

1486 high = None 

1487 if critical is not None: 

1488 try: 

1489 critical = float(critical) / 1000.0 

1490 except ValueError: 

1491 critical = None 

1492 

1493 ret[unit_name].append(('', current, high, critical)) 

1494 

1495 return dict(ret) 

1496 

1497 

1498def sensors_fans(): 

1499 """Return hardware fans info (for CPU and other peripherals) as a 

1500 dict including hardware label and current speed. 

1501 

1502 Implementation notes: 

1503 - /sys/class/hwmon looks like the most recent interface to 

1504 retrieve this info, and this implementation relies on it 

1505 only (old distros will probably use something else) 

1506 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon 

1507 """ 

1508 ret = collections.defaultdict(list) 

1509 basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*') 

1510 if not basenames: 

1511 # CentOS has an intermediate /device directory: 

1512 # https://github.com/giampaolo/psutil/issues/971 

1513 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*') 

1514 

1515 basenames = sorted(set([x.split('_')[0] for x in basenames])) 

1516 for base in basenames: 

1517 try: 

1518 current = int(bcat(base + '_input')) 

1519 except (IOError, OSError) as err: 

1520 debug(err) 

1521 continue 

1522 unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip() 

1523 label = cat(base + '_label', fallback='').strip() 

1524 ret[unit_name].append(_common.sfan(label, current)) 

1525 

1526 return dict(ret) 

1527 

1528 

1529def sensors_battery(): 

1530 """Return battery information. 

1531 Implementation note: it appears /sys/class/power_supply/BAT0/ 

1532 directory structure may vary and provide files with the same 

1533 meaning but under different names, see: 

1534 https://github.com/giampaolo/psutil/issues/966. 

1535 """ 

1536 null = object() 

1537 

1538 def multi_bcat(*paths): 

1539 """Attempt to read the content of multiple files which may 

1540 not exist. If none of them exist return None. 

1541 """ 

1542 for path in paths: 

1543 ret = bcat(path, fallback=null) 

1544 if ret != null: 

1545 try: 

1546 return int(ret) 

1547 except ValueError: 

1548 return ret.strip() 

1549 return None 

1550 

1551 bats = [ 

1552 x 

1553 for x in os.listdir(POWER_SUPPLY_PATH) 

1554 if x.startswith('BAT') or 'battery' in x.lower() 

1555 ] 

1556 if not bats: 

1557 return None 

1558 # Get the first available battery. Usually this is "BAT0", except 

1559 # some rare exceptions: 

1560 # https://github.com/giampaolo/psutil/issues/1238 

1561 root = os.path.join(POWER_SUPPLY_PATH, sorted(bats)[0]) 

1562 

1563 # Base metrics. 

1564 energy_now = multi_bcat(root + "/energy_now", root + "/charge_now") 

1565 power_now = multi_bcat(root + "/power_now", root + "/current_now") 

1566 energy_full = multi_bcat(root + "/energy_full", root + "/charge_full") 

1567 time_to_empty = multi_bcat(root + "/time_to_empty_now") 

1568 

1569 # Percent. If we have energy_full the percentage will be more 

1570 # accurate compared to reading /capacity file (float vs. int). 

1571 if energy_full is not None and energy_now is not None: 

1572 try: 

1573 percent = 100.0 * energy_now / energy_full 

1574 except ZeroDivisionError: 

1575 percent = 0.0 

1576 else: 

1577 percent = int(cat(root + "/capacity", fallback=-1)) 

1578 if percent == -1: 

1579 return None 

1580 

1581 # Is AC power cable plugged in? 

1582 # Note: AC0 is not always available and sometimes (e.g. CentOS7) 

1583 # it's called "AC". 

1584 power_plugged = None 

1585 online = multi_bcat( 

1586 os.path.join(POWER_SUPPLY_PATH, "AC0/online"), 

1587 os.path.join(POWER_SUPPLY_PATH, "AC/online"), 

1588 ) 

1589 if online is not None: 

1590 power_plugged = online == 1 

1591 else: 

1592 status = cat(root + "/status", fallback="").strip().lower() 

1593 if status == "discharging": 

1594 power_plugged = False 

1595 elif status in ("charging", "full"): 

1596 power_plugged = True 

1597 

1598 # Seconds left. 

1599 # Note to self: we may also calculate the charging ETA as per: 

1600 # https://github.com/thialfihar/dotfiles/blob/ 

1601 # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55 

1602 if power_plugged: 

1603 secsleft = _common.POWER_TIME_UNLIMITED 

1604 elif energy_now is not None and power_now is not None: 

1605 try: 

1606 secsleft = int(energy_now / power_now * 3600) 

1607 except ZeroDivisionError: 

1608 secsleft = _common.POWER_TIME_UNKNOWN 

1609 elif time_to_empty is not None: 

1610 secsleft = int(time_to_empty * 60) 

1611 if secsleft < 0: 

1612 secsleft = _common.POWER_TIME_UNKNOWN 

1613 else: 

1614 secsleft = _common.POWER_TIME_UNKNOWN 

1615 

1616 return _common.sbattery(percent, secsleft, power_plugged) 

1617 

1618 

1619# ===================================================================== 

1620# --- other system functions 

1621# ===================================================================== 

1622 

1623 

1624def users(): 

1625 """Return currently connected users as a list of namedtuples.""" 

1626 retlist = [] 

1627 rawlist = cext.users() 

1628 for item in rawlist: 

1629 user, tty, hostname, tstamp, pid = item 

1630 nt = _common.suser(user, tty or None, hostname, tstamp, pid) 

1631 retlist.append(nt) 

1632 return retlist 

1633 

1634 

1635def boot_time(): 

1636 """Return the system boot time expressed in seconds since the epoch.""" 

1637 global BOOT_TIME 

1638 path = '%s/stat' % get_procfs_path() 

1639 with open_binary(path) as f: 

1640 for line in f: 

1641 if line.startswith(b'btime'): 

1642 ret = float(line.strip().split()[1]) 

1643 BOOT_TIME = ret 

1644 return ret 

1645 raise RuntimeError("line 'btime' not found in %s" % path) 

1646 

1647 

1648# ===================================================================== 

1649# --- processes 

1650# ===================================================================== 

1651 

1652 

1653def pids(): 

1654 """Returns a list of PIDs currently running on the system.""" 

1655 return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()] 

1656 

1657 

1658def pid_exists(pid): 

1659 """Check for the existence of a unix PID. Linux TIDs are not 

1660 supported (always return False). 

1661 """ 

1662 if not _psposix.pid_exists(pid): 

1663 return False 

1664 else: 

1665 # Linux's apparently does not distinguish between PIDs and TIDs 

1666 # (thread IDs). 

1667 # listdir("/proc") won't show any TID (only PIDs) but 

1668 # os.stat("/proc/{tid}") will succeed if {tid} exists. 

1669 # os.kill() can also be passed a TID. This is quite confusing. 

1670 # In here we want to enforce this distinction and support PIDs 

1671 # only, see: 

1672 # https://github.com/giampaolo/psutil/issues/687 

1673 try: 

1674 # Note: already checked that this is faster than using a 

1675 # regular expr. Also (a lot) faster than doing 

1676 # 'return pid in pids()' 

1677 path = "%s/%s/status" % (get_procfs_path(), pid) 

1678 with open_binary(path) as f: 

1679 for line in f: 

1680 if line.startswith(b"Tgid:"): 

1681 tgid = int(line.split()[1]) 

1682 # If tgid and pid are the same then we're 

1683 # dealing with a process PID. 

1684 return tgid == pid 

1685 raise ValueError("'Tgid' line not found in %s" % path) 

1686 except (EnvironmentError, ValueError): 

1687 return pid in pids() 

1688 

1689 

1690def ppid_map(): 

1691 """Obtain a {pid: ppid, ...} dict for all running processes in 

1692 one shot. Used to speed up Process.children(). 

1693 """ 

1694 ret = {} 

1695 procfs_path = get_procfs_path() 

1696 for pid in pids(): 

1697 try: 

1698 with open_binary("%s/%s/stat" % (procfs_path, pid)) as f: 

1699 data = f.read() 

1700 except (FileNotFoundError, ProcessLookupError): 

1701 # Note: we should be able to access /stat for all processes 

1702 # aka it's unlikely we'll bump into EPERM, which is good. 

1703 pass 

1704 else: 

1705 rpar = data.rfind(b')') 

1706 dset = data[rpar + 2 :].split() 

1707 ppid = int(dset[1]) 

1708 ret[pid] = ppid 

1709 return ret 

1710 

1711 

1712def wrap_exceptions(fun): 

1713 """Decorator which translates bare OSError and IOError exceptions 

1714 into NoSuchProcess and AccessDenied. 

1715 """ 

1716 

1717 @functools.wraps(fun) 

1718 def wrapper(self, *args, **kwargs): 

1719 try: 

1720 return fun(self, *args, **kwargs) 

1721 except PermissionError: 

1722 raise AccessDenied(self.pid, self._name) 

1723 except ProcessLookupError: 

1724 self._raise_if_zombie() 

1725 raise NoSuchProcess(self.pid, self._name) 

1726 except FileNotFoundError: 

1727 self._raise_if_zombie() 

1728 if not os.path.exists("%s/%s" % (self._procfs_path, self.pid)): 

1729 raise NoSuchProcess(self.pid, self._name) 

1730 raise 

1731 

1732 return wrapper 

1733 

1734 

1735class Process: 

1736 """Linux process implementation.""" 

1737 

1738 __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"] 

1739 

1740 def __init__(self, pid): 

1741 self.pid = pid 

1742 self._name = None 

1743 self._ppid = None 

1744 self._procfs_path = get_procfs_path() 

1745 

1746 def _is_zombie(self): 

1747 # Note: most of the times Linux is able to return info about the 

1748 # process even if it's a zombie, and /proc/{pid} will exist. 

1749 # There are some exceptions though, like exe(), cmdline() and 

1750 # memory_maps(). In these cases /proc/{pid}/{file} exists but 

1751 # it's empty. Instead of returning a "null" value we'll raise an 

1752 # exception. 

1753 try: 

1754 data = bcat("%s/%s/stat" % (self._procfs_path, self.pid)) 

1755 except (IOError, OSError): 

1756 return False 

1757 else: 

1758 rpar = data.rfind(b')') 

1759 status = data[rpar + 2 : rpar + 3] 

1760 return status == b"Z" 

1761 

1762 def _raise_if_zombie(self): 

1763 if self._is_zombie(): 

1764 raise ZombieProcess(self.pid, self._name, self._ppid) 

1765 

1766 def _raise_if_not_alive(self): 

1767 """Raise NSP if the process disappeared on us.""" 

1768 # For those C function who do not raise NSP, possibly returning 

1769 # incorrect or incomplete result. 

1770 os.stat('%s/%s' % (self._procfs_path, self.pid)) 

1771 

1772 @wrap_exceptions 

1773 @memoize_when_activated 

1774 def _parse_stat_file(self): 

1775 """Parse /proc/{pid}/stat file and return a dict with various 

1776 process info. 

1777 Using "man proc" as a reference: where "man proc" refers to 

1778 position N always subtract 3 (e.g ppid position 4 in 

1779 'man proc' == position 1 in here). 

1780 The return value is cached in case oneshot() ctx manager is 

1781 in use. 

1782 """ 

1783 data = bcat("%s/%s/stat" % (self._procfs_path, self.pid)) 

1784 # Process name is between parentheses. It can contain spaces and 

1785 # other parentheses. This is taken into account by looking for 

1786 # the first occurrence of "(" and the last occurrence of ")". 

1787 rpar = data.rfind(b')') 

1788 name = data[data.find(b'(') + 1 : rpar] 

1789 fields = data[rpar + 2 :].split() 

1790 

1791 ret = {} 

1792 ret['name'] = name 

1793 ret['status'] = fields[0] 

1794 ret['ppid'] = fields[1] 

1795 ret['ttynr'] = fields[4] 

1796 ret['utime'] = fields[11] 

1797 ret['stime'] = fields[12] 

1798 ret['children_utime'] = fields[13] 

1799 ret['children_stime'] = fields[14] 

1800 ret['create_time'] = fields[19] 

1801 ret['cpu_num'] = fields[36] 

1802 ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks' 

1803 

1804 return ret 

1805 

1806 @wrap_exceptions 

1807 @memoize_when_activated 

1808 def _read_status_file(self): 

1809 """Read /proc/{pid}/stat file and return its content. 

1810 The return value is cached in case oneshot() ctx manager is 

1811 in use. 

1812 """ 

1813 with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f: 

1814 return f.read() 

1815 

1816 @wrap_exceptions 

1817 @memoize_when_activated 

1818 def _read_smaps_file(self): 

1819 with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid)) as f: 

1820 return f.read().strip() 

1821 

1822 def oneshot_enter(self): 

1823 self._parse_stat_file.cache_activate(self) 

1824 self._read_status_file.cache_activate(self) 

1825 self._read_smaps_file.cache_activate(self) 

1826 

1827 def oneshot_exit(self): 

1828 self._parse_stat_file.cache_deactivate(self) 

1829 self._read_status_file.cache_deactivate(self) 

1830 self._read_smaps_file.cache_deactivate(self) 

1831 

1832 @wrap_exceptions 

1833 def name(self): 

1834 name = self._parse_stat_file()['name'] 

1835 if PY3: 

1836 name = decode(name) 

1837 # XXX - gets changed later and probably needs refactoring 

1838 return name 

1839 

1840 @wrap_exceptions 

1841 def exe(self): 

1842 try: 

1843 return readlink("%s/%s/exe" % (self._procfs_path, self.pid)) 

1844 except (FileNotFoundError, ProcessLookupError): 

1845 self._raise_if_zombie() 

1846 # no such file error; might be raised also if the 

1847 # path actually exists for system processes with 

1848 # low pids (about 0-20) 

1849 if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)): 

1850 return "" 

1851 raise 

1852 

1853 @wrap_exceptions 

1854 def cmdline(self): 

1855 with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f: 

1856 data = f.read() 

1857 if not data: 

1858 # may happen in case of zombie process 

1859 self._raise_if_zombie() 

1860 return [] 

1861 # 'man proc' states that args are separated by null bytes '\0' 

1862 # and last char is supposed to be a null byte. Nevertheless 

1863 # some processes may change their cmdline after being started 

1864 # (via setproctitle() or similar), they are usually not 

1865 # compliant with this rule and use spaces instead. Google 

1866 # Chrome process is an example. See: 

1867 # https://github.com/giampaolo/psutil/issues/1179 

1868 sep = '\x00' if data.endswith('\x00') else ' ' 

1869 if data.endswith(sep): 

1870 data = data[:-1] 

1871 cmdline = data.split(sep) 

1872 # Sometimes last char is a null byte '\0' but the args are 

1873 # separated by spaces, see: https://github.com/giampaolo/psutil/ 

1874 # issues/1179#issuecomment-552984549 

1875 if sep == '\x00' and len(cmdline) == 1 and ' ' in data: 

1876 cmdline = data.split(' ') 

1877 return cmdline 

1878 

1879 @wrap_exceptions 

1880 def environ(self): 

1881 with open_text("%s/%s/environ" % (self._procfs_path, self.pid)) as f: 

1882 data = f.read() 

1883 return parse_environ_block(data) 

1884 

1885 @wrap_exceptions 

1886 def terminal(self): 

1887 tty_nr = int(self._parse_stat_file()['ttynr']) 

1888 tmap = _psposix.get_terminal_map() 

1889 try: 

1890 return tmap[tty_nr] 

1891 except KeyError: 

1892 return None 

1893 

1894 # May not be available on old kernels. 

1895 if os.path.exists('/proc/%s/io' % os.getpid()): 

1896 

1897 @wrap_exceptions 

1898 def io_counters(self): 

1899 fname = "%s/%s/io" % (self._procfs_path, self.pid) 

1900 fields = {} 

1901 with open_binary(fname) as f: 

1902 for line in f: 

1903 # https://github.com/giampaolo/psutil/issues/1004 

1904 line = line.strip() 

1905 if line: 

1906 try: 

1907 name, value = line.split(b': ') 

1908 except ValueError: 

1909 # https://github.com/giampaolo/psutil/issues/1004 

1910 continue 

1911 else: 

1912 fields[name] = int(value) 

1913 if not fields: 

1914 raise RuntimeError("%s file was empty" % fname) 

1915 try: 

1916 return pio( 

1917 fields[b'syscr'], # read syscalls 

1918 fields[b'syscw'], # write syscalls 

1919 fields[b'read_bytes'], # read bytes 

1920 fields[b'write_bytes'], # write bytes 

1921 fields[b'rchar'], # read chars 

1922 fields[b'wchar'], # write chars 

1923 ) 

1924 except KeyError as err: 

1925 raise ValueError( 

1926 "%r field was not found in %s; found fields are %r" 

1927 % (err.args[0], fname, fields) 

1928 ) 

1929 

1930 @wrap_exceptions 

1931 def cpu_times(self): 

1932 values = self._parse_stat_file() 

1933 utime = float(values['utime']) / CLOCK_TICKS 

1934 stime = float(values['stime']) / CLOCK_TICKS 

1935 children_utime = float(values['children_utime']) / CLOCK_TICKS 

1936 children_stime = float(values['children_stime']) / CLOCK_TICKS 

1937 iowait = float(values['blkio_ticks']) / CLOCK_TICKS 

1938 return pcputimes(utime, stime, children_utime, children_stime, iowait) 

1939 

1940 @wrap_exceptions 

1941 def cpu_num(self): 

1942 """What CPU the process is on.""" 

1943 return int(self._parse_stat_file()['cpu_num']) 

1944 

1945 @wrap_exceptions 

1946 def wait(self, timeout=None): 

1947 return _psposix.wait_pid(self.pid, timeout, self._name) 

1948 

1949 @wrap_exceptions 

1950 def create_time(self): 

1951 ctime = float(self._parse_stat_file()['create_time']) 

1952 # According to documentation, starttime is in field 21 and the 

1953 # unit is jiffies (clock ticks). 

1954 # We first divide it for clock ticks and then add uptime returning 

1955 # seconds since the epoch. 

1956 # Also use cached value if available. 

1957 bt = BOOT_TIME or boot_time() 

1958 return (ctime / CLOCK_TICKS) + bt 

1959 

1960 @wrap_exceptions 

1961 def memory_info(self): 

1962 # ============================================================ 

1963 # | FIELD | DESCRIPTION | AKA | TOP | 

1964 # ============================================================ 

1965 # | rss | resident set size | | RES | 

1966 # | vms | total program size | size | VIRT | 

1967 # | shared | shared pages (from shared mappings) | | SHR | 

1968 # | text | text ('code') | trs | CODE | 

1969 # | lib | library (unused in Linux 2.6) | lrs | | 

1970 # | data | data + stack | drs | DATA | 

1971 # | dirty | dirty pages (unused in Linux 2.6) | dt | | 

1972 # ============================================================ 

1973 with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f: 

1974 vms, rss, shared, text, lib, data, dirty = ( 

1975 int(x) * PAGESIZE for x in f.readline().split()[:7] 

1976 ) 

1977 return pmem(rss, vms, shared, text, lib, data, dirty) 

1978 

1979 if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS: 

1980 

1981 def _parse_smaps_rollup(self): 

1982 # /proc/pid/smaps_rollup was added to Linux in 2017. Faster 

1983 # than /proc/pid/smaps. It reports higher PSS than */smaps 

1984 # (from 1k up to 200k higher; tested against all processes). 

1985 # IMPORTANT: /proc/pid/smaps_rollup is weird, because it 

1986 # raises ESRCH / ENOENT for many PIDs, even if they're alive 

1987 # (also as root). In that case we'll use /proc/pid/smaps as 

1988 # fallback, which is slower but has a +50% success rate 

1989 # compared to /proc/pid/smaps_rollup. 

1990 uss = pss = swap = 0 

1991 with open_binary( 

1992 "{}/{}/smaps_rollup".format(self._procfs_path, self.pid) 

1993 ) as f: 

1994 for line in f: 

1995 if line.startswith(b"Private_"): 

1996 # Private_Clean, Private_Dirty, Private_Hugetlb 

1997 uss += int(line.split()[1]) * 1024 

1998 elif line.startswith(b"Pss:"): 

1999 pss = int(line.split()[1]) * 1024 

2000 elif line.startswith(b"Swap:"): 

2001 swap = int(line.split()[1]) * 1024 

2002 return (uss, pss, swap) 

2003 

2004 @wrap_exceptions 

2005 def _parse_smaps( 

2006 self, 

2007 # Gets Private_Clean, Private_Dirty, Private_Hugetlb. 

2008 _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"), 

2009 _pss_re=re.compile(br"\nPss\:\s+(\d+)"), 

2010 _swap_re=re.compile(br"\nSwap\:\s+(\d+)"), 

2011 ): 

2012 # /proc/pid/smaps does not exist on kernels < 2.6.14 or if 

2013 # CONFIG_MMU kernel configuration option is not enabled. 

2014 

2015 # Note: using 3 regexes is faster than reading the file 

2016 # line by line. 

2017 # XXX: on Python 3 the 2 regexes are 30% slower than on 

2018 # Python 2 though. Figure out why. 

2019 # 

2020 # You might be tempted to calculate USS by subtracting 

2021 # the "shared" value from the "resident" value in 

2022 # /proc/<pid>/statm. But at least on Linux, statm's "shared" 

2023 # value actually counts pages backed by files, which has 

2024 # little to do with whether the pages are actually shared. 

2025 # /proc/self/smaps on the other hand appears to give us the 

2026 # correct information. 

2027 smaps_data = self._read_smaps_file() 

2028 # Note: smaps file can be empty for certain processes. 

2029 # The code below will not crash though and will result to 0. 

2030 uss = sum(map(int, _private_re.findall(smaps_data))) * 1024 

2031 pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024 

2032 swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024 

2033 return (uss, pss, swap) 

2034 

2035 @wrap_exceptions 

2036 def memory_full_info(self): 

2037 if HAS_PROC_SMAPS_ROLLUP: # faster 

2038 try: 

2039 uss, pss, swap = self._parse_smaps_rollup() 

2040 except (ProcessLookupError, FileNotFoundError): 

2041 uss, pss, swap = self._parse_smaps() 

2042 else: 

2043 uss, pss, swap = self._parse_smaps() 

2044 basic_mem = self.memory_info() 

2045 return pfullmem(*basic_mem + (uss, pss, swap)) 

2046 

2047 else: 

2048 memory_full_info = memory_info 

2049 

2050 if HAS_PROC_SMAPS: 

2051 

2052 @wrap_exceptions 

2053 def memory_maps(self): 

2054 """Return process's mapped memory regions as a list of named 

2055 tuples. Fields are explained in 'man proc'; here is an updated 

2056 (Apr 2012) version: http://goo.gl/fmebo. 

2057 

2058 /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if 

2059 CONFIG_MMU kernel configuration option is not enabled. 

2060 """ 

2061 

2062 def get_blocks(lines, current_block): 

2063 data = {} 

2064 for line in lines: 

2065 fields = line.split(None, 5) 

2066 if not fields[0].endswith(b':'): 

2067 # new block section 

2068 yield (current_block.pop(), data) 

2069 current_block.append(line) 

2070 else: 

2071 try: 

2072 data[fields[0]] = int(fields[1]) * 1024 

2073 except ValueError: 

2074 if fields[0].startswith(b'VmFlags:'): 

2075 # see issue #369 

2076 continue 

2077 else: 

2078 raise ValueError( 

2079 "don't know how to interpret line %r" 

2080 % line 

2081 ) 

2082 yield (current_block.pop(), data) 

2083 

2084 data = self._read_smaps_file() 

2085 # Note: smaps file can be empty for certain processes or for 

2086 # zombies. 

2087 if not data: 

2088 self._raise_if_zombie() 

2089 return [] 

2090 lines = data.split(b'\n') 

2091 ls = [] 

2092 first_line = lines.pop(0) 

2093 current_block = [first_line] 

2094 for header, data in get_blocks(lines, current_block): 

2095 hfields = header.split(None, 5) 

2096 try: 

2097 addr, perms, offset, dev, inode, path = hfields 

2098 except ValueError: 

2099 addr, perms, offset, dev, inode, path = hfields + [''] 

2100 if not path: 

2101 path = '[anon]' 

2102 else: 

2103 if PY3: 

2104 path = decode(path) 

2105 path = path.strip() 

2106 if path.endswith(' (deleted)') and not path_exists_strict( 

2107 path 

2108 ): 

2109 path = path[:-10] 

2110 item = ( 

2111 decode(addr), 

2112 decode(perms), 

2113 path, 

2114 data.get(b'Rss:', 0), 

2115 data.get(b'Size:', 0), 

2116 data.get(b'Pss:', 0), 

2117 data.get(b'Shared_Clean:', 0), 

2118 data.get(b'Shared_Dirty:', 0), 

2119 data.get(b'Private_Clean:', 0), 

2120 data.get(b'Private_Dirty:', 0), 

2121 data.get(b'Referenced:', 0), 

2122 data.get(b'Anonymous:', 0), 

2123 data.get(b'Swap:', 0), 

2124 ) 

2125 ls.append(item) 

2126 return ls 

2127 

2128 @wrap_exceptions 

2129 def cwd(self): 

2130 return readlink("%s/%s/cwd" % (self._procfs_path, self.pid)) 

2131 

2132 @wrap_exceptions 

2133 def num_ctx_switches( 

2134 self, _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)') 

2135 ): 

2136 data = self._read_status_file() 

2137 ctxsw = _ctxsw_re.findall(data) 

2138 if not ctxsw: 

2139 raise NotImplementedError( 

2140 "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'" 

2141 "lines were not found in %s/%s/status; the kernel is " 

2142 "probably older than 2.6.23" % (self._procfs_path, self.pid) 

2143 ) 

2144 else: 

2145 return _common.pctxsw(int(ctxsw[0]), int(ctxsw[1])) 

2146 

2147 @wrap_exceptions 

2148 def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')): 

2149 # Note: on Python 3 using a re is faster than iterating over file 

2150 # line by line. On Python 2 is the exact opposite, and iterating 

2151 # over a file on Python 3 is slower than on Python 2. 

2152 data = self._read_status_file() 

2153 return int(_num_threads_re.findall(data)[0]) 

2154 

2155 @wrap_exceptions 

2156 def threads(self): 

2157 thread_ids = os.listdir("%s/%s/task" % (self._procfs_path, self.pid)) 

2158 thread_ids.sort() 

2159 retlist = [] 

2160 hit_enoent = False 

2161 for thread_id in thread_ids: 

2162 fname = "%s/%s/task/%s/stat" % ( 

2163 self._procfs_path, 

2164 self.pid, 

2165 thread_id, 

2166 ) 

2167 try: 

2168 with open_binary(fname) as f: 

2169 st = f.read().strip() 

2170 except (FileNotFoundError, ProcessLookupError): 

2171 # no such file or directory or no such process; 

2172 # it means thread disappeared on us 

2173 hit_enoent = True 

2174 continue 

2175 # ignore the first two values ("pid (exe)") 

2176 st = st[st.find(b')') + 2 :] 

2177 values = st.split(b' ') 

2178 utime = float(values[11]) / CLOCK_TICKS 

2179 stime = float(values[12]) / CLOCK_TICKS 

2180 ntuple = _common.pthread(int(thread_id), utime, stime) 

2181 retlist.append(ntuple) 

2182 if hit_enoent: 

2183 self._raise_if_not_alive() 

2184 return retlist 

2185 

2186 @wrap_exceptions 

2187 def nice_get(self): 

2188 # with open_text('%s/%s/stat' % (self._procfs_path, self.pid)) as f: 

2189 # data = f.read() 

2190 # return int(data.split()[18]) 

2191 

2192 # Use C implementation 

2193 return cext_posix.getpriority(self.pid) 

2194 

2195 @wrap_exceptions 

2196 def nice_set(self, value): 

2197 return cext_posix.setpriority(self.pid, value) 

2198 

2199 # starting from CentOS 6. 

2200 if HAS_CPU_AFFINITY: 

2201 

2202 @wrap_exceptions 

2203 def cpu_affinity_get(self): 

2204 return cext.proc_cpu_affinity_get(self.pid) 

2205 

2206 def _get_eligible_cpus( 

2207 self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)") 

2208 ): 

2209 # See: https://github.com/giampaolo/psutil/issues/956 

2210 data = self._read_status_file() 

2211 match = _re.findall(data) 

2212 if match: 

2213 return list(range(int(match[0][0]), int(match[0][1]) + 1)) 

2214 else: 

2215 return list(range(len(per_cpu_times()))) 

2216 

2217 @wrap_exceptions 

2218 def cpu_affinity_set(self, cpus): 

2219 try: 

2220 cext.proc_cpu_affinity_set(self.pid, cpus) 

2221 except (OSError, ValueError) as err: 

2222 if isinstance(err, ValueError) or err.errno == errno.EINVAL: 

2223 eligible_cpus = self._get_eligible_cpus() 

2224 all_cpus = tuple(range(len(per_cpu_times()))) 

2225 for cpu in cpus: 

2226 if cpu not in all_cpus: 

2227 raise ValueError( 

2228 "invalid CPU number %r; choose between %s" 

2229 % (cpu, eligible_cpus) 

2230 ) 

2231 if cpu not in eligible_cpus: 

2232 raise ValueError( 

2233 "CPU number %r is not eligible; choose " 

2234 "between %s" % (cpu, eligible_cpus) 

2235 ) 

2236 raise 

2237 

2238 # only starting from kernel 2.6.13 

2239 if HAS_PROC_IO_PRIORITY: 

2240 

2241 @wrap_exceptions 

2242 def ionice_get(self): 

2243 ioclass, value = cext.proc_ioprio_get(self.pid) 

2244 if enum is not None: 

2245 ioclass = IOPriority(ioclass) 

2246 return _common.pionice(ioclass, value) 

2247 

2248 @wrap_exceptions 

2249 def ionice_set(self, ioclass, value): 

2250 if value is None: 

2251 value = 0 

2252 if value and ioclass in (IOPRIO_CLASS_IDLE, IOPRIO_CLASS_NONE): 

2253 raise ValueError("%r ioclass accepts no value" % ioclass) 

2254 if value < 0 or value > 7: 

2255 msg = "value not in 0-7 range" 

2256 raise ValueError(msg) 

2257 return cext.proc_ioprio_set(self.pid, ioclass, value) 

2258 

2259 if prlimit is not None: 

2260 

2261 @wrap_exceptions 

2262 def rlimit(self, resource_, limits=None): 

2263 # If pid is 0 prlimit() applies to the calling process and 

2264 # we don't want that. We should never get here though as 

2265 # PID 0 is not supported on Linux. 

2266 if self.pid == 0: 

2267 msg = "can't use prlimit() against PID 0 process" 

2268 raise ValueError(msg) 

2269 try: 

2270 if limits is None: 

2271 # get 

2272 return prlimit(self.pid, resource_) 

2273 else: 

2274 # set 

2275 if len(limits) != 2: 

2276 msg = ( 

2277 "second argument must be a (soft, hard) " 

2278 + "tuple, got %s" % repr(limits) 

2279 ) 

2280 raise ValueError(msg) 

2281 prlimit(self.pid, resource_, limits) 

2282 except OSError as err: 

2283 if err.errno == errno.ENOSYS: 

2284 # I saw this happening on Travis: 

2285 # https://travis-ci.org/giampaolo/psutil/jobs/51368273 

2286 self._raise_if_zombie() 

2287 raise 

2288 

2289 @wrap_exceptions 

2290 def status(self): 

2291 letter = self._parse_stat_file()['status'] 

2292 if PY3: 

2293 letter = letter.decode() 

2294 # XXX is '?' legit? (we're not supposed to return it anyway) 

2295 return PROC_STATUSES.get(letter, '?') 

2296 

2297 @wrap_exceptions 

2298 def open_files(self): 

2299 retlist = [] 

2300 files = os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)) 

2301 hit_enoent = False 

2302 for fd in files: 

2303 file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd) 

2304 try: 

2305 path = readlink(file) 

2306 except (FileNotFoundError, ProcessLookupError): 

2307 # ENOENT == file which is gone in the meantime 

2308 hit_enoent = True 

2309 continue 

2310 except OSError as err: 

2311 if err.errno == errno.EINVAL: 

2312 # not a link 

2313 continue 

2314 if err.errno == errno.ENAMETOOLONG: 

2315 # file name too long 

2316 debug(err) 

2317 continue 

2318 raise 

2319 else: 

2320 # If path is not an absolute there's no way to tell 

2321 # whether it's a regular file or not, so we skip it. 

2322 # A regular file is always supposed to be have an 

2323 # absolute path though. 

2324 if path.startswith('/') and isfile_strict(path): 

2325 # Get file position and flags. 

2326 file = "%s/%s/fdinfo/%s" % ( 

2327 self._procfs_path, 

2328 self.pid, 

2329 fd, 

2330 ) 

2331 try: 

2332 with open_binary(file) as f: 

2333 pos = int(f.readline().split()[1]) 

2334 flags = int(f.readline().split()[1], 8) 

2335 except (FileNotFoundError, ProcessLookupError): 

2336 # fd gone in the meantime; process may 

2337 # still be alive 

2338 hit_enoent = True 

2339 else: 

2340 mode = file_flags_to_mode(flags) 

2341 ntuple = popenfile( 

2342 path, int(fd), int(pos), mode, flags 

2343 ) 

2344 retlist.append(ntuple) 

2345 if hit_enoent: 

2346 self._raise_if_not_alive() 

2347 return retlist 

2348 

2349 @wrap_exceptions 

2350 def connections(self, kind='inet'): 

2351 ret = _connections.retrieve(kind, self.pid) 

2352 self._raise_if_not_alive() 

2353 return ret 

2354 

2355 @wrap_exceptions 

2356 def num_fds(self): 

2357 return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))) 

2358 

2359 @wrap_exceptions 

2360 def ppid(self): 

2361 return int(self._parse_stat_file()['ppid']) 

2362 

2363 @wrap_exceptions 

2364 def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')): 

2365 data = self._read_status_file() 

2366 real, effective, saved = _uids_re.findall(data)[0] 

2367 return _common.puids(int(real), int(effective), int(saved)) 

2368 

2369 @wrap_exceptions 

2370 def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')): 

2371 data = self._read_status_file() 

2372 real, effective, saved = _gids_re.findall(data)[0] 

2373 return _common.pgids(int(real), int(effective), int(saved))