1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Linux platform implementation."""
6
7import base64
8import collections
9import enum
10import errno
11import functools
12import glob
13import os
14import re
15import resource
16import socket
17import struct
18import sys
19import warnings
20from collections import defaultdict
21from collections import namedtuple
22
23from . import _common
24from . import _ntuples as ntp
25from . import _psposix
26from . import _psutil_linux as cext
27from ._common import ENCODING
28from ._common import NIC_DUPLEX_FULL
29from ._common import NIC_DUPLEX_HALF
30from ._common import NIC_DUPLEX_UNKNOWN
31from ._common import AccessDenied
32from ._common import NoSuchProcess
33from ._common import ZombieProcess
34from ._common import bcat
35from ._common import cat
36from ._common import debug
37from ._common import decode
38from ._common import get_procfs_path
39from ._common import isfile_strict
40from ._common import memoize
41from ._common import memoize_when_activated
42from ._common import open_binary
43from ._common import open_text
44from ._common import parse_environ_block
45from ._common import path_exists_strict
46from ._common import supports_ipv6
47from ._common import usage_percent
48
49# fmt: off
50__extra__all__ = [
51 'PROCFS_PATH',
52 # io prio constants
53 "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
54 "IOPRIO_CLASS_IDLE",
55 # connection status constants
56 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
57 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
58 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
59]
60# fmt: on
61
62
63# =====================================================================
64# --- globals
65# =====================================================================
66
67
68POWER_SUPPLY_PATH = "/sys/class/power_supply"
69HAS_PROC_SMAPS = os.path.exists(f"/proc/{os.getpid()}/smaps")
70HAS_PROC_SMAPS_ROLLUP = os.path.exists(f"/proc/{os.getpid()}/smaps_rollup")
71HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get")
72HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get")
73
74# Number of clock ticks per second
75CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
76PAGESIZE = cext.getpagesize()
77LITTLE_ENDIAN = sys.byteorder == 'little'
78UNSET = object()
79
80# "man iostat" states that sectors are equivalent with blocks and have
81# a size of 512 bytes. Despite this value can be queried at runtime
82# via /sys/block/{DISK}/queue/hw_sector_size and results may vary
83# between 1k, 2k, or 4k... 512 appears to be a magic constant used
84# throughout Linux source code:
85# * https://stackoverflow.com/a/38136179/376587
86# * https://lists.gt.net/linux/kernel/2241060
87# * https://github.com/giampaolo/psutil/issues/1305
88# * https://github.com/torvalds/linux/blob/
89# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99
90# * https://lkml.org/lkml/2015/8/17/234
91DISK_SECTOR_SIZE = 512
92
93AddressFamily = enum.IntEnum(
94 'AddressFamily', {'AF_LINK': int(socket.AF_PACKET)}
95)
96AF_LINK = AddressFamily.AF_LINK
97
98
99# ioprio_* constants http://linux.die.net/man/2/ioprio_get
100class IOPriority(enum.IntEnum):
101 IOPRIO_CLASS_NONE = 0
102 IOPRIO_CLASS_RT = 1
103 IOPRIO_CLASS_BE = 2
104 IOPRIO_CLASS_IDLE = 3
105
106
107globals().update(IOPriority.__members__)
108
109# See:
110# https://github.com/torvalds/linux/blame/master/fs/proc/array.c
111# ...and (TASK_* constants):
112# https://github.com/torvalds/linux/blob/master/include/linux/sched.h
113PROC_STATUSES = {
114 "R": _common.STATUS_RUNNING,
115 "S": _common.STATUS_SLEEPING,
116 "D": _common.STATUS_DISK_SLEEP,
117 "T": _common.STATUS_STOPPED,
118 "t": _common.STATUS_TRACING_STOP,
119 "Z": _common.STATUS_ZOMBIE,
120 "X": _common.STATUS_DEAD,
121 "x": _common.STATUS_DEAD,
122 "K": _common.STATUS_WAKE_KILL,
123 "W": _common.STATUS_WAKING,
124 "I": _common.STATUS_IDLE,
125 "P": _common.STATUS_PARKED,
126}
127
128# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h
129TCP_STATUSES = {
130 "01": _common.CONN_ESTABLISHED,
131 "02": _common.CONN_SYN_SENT,
132 "03": _common.CONN_SYN_RECV,
133 "04": _common.CONN_FIN_WAIT1,
134 "05": _common.CONN_FIN_WAIT2,
135 "06": _common.CONN_TIME_WAIT,
136 "07": _common.CONN_CLOSE,
137 "08": _common.CONN_CLOSE_WAIT,
138 "09": _common.CONN_LAST_ACK,
139 "0A": _common.CONN_LISTEN,
140 "0B": _common.CONN_CLOSING,
141}
142
143
144# =====================================================================
145# --- utils
146# =====================================================================
147
148
149def readlink(path):
150 """Wrapper around os.readlink()."""
151 assert isinstance(path, str), path
152 path = os.readlink(path)
153 # readlink() might return paths containing null bytes ('\x00')
154 # resulting in "TypeError: must be encoded string without NULL
155 # bytes, not str" errors when the string is passed to other
156 # fs-related functions (os.*, open(), ...).
157 # Apparently everything after '\x00' is garbage (we can have
158 # ' (deleted)', 'new' and possibly others), see:
159 # https://github.com/giampaolo/psutil/issues/717
160 path = path.split('\x00')[0]
161 # Certain paths have ' (deleted)' appended. Usually this is
162 # bogus as the file actually exists. Even if it doesn't we
163 # don't care.
164 if path.endswith(' (deleted)') and not path_exists_strict(path):
165 path = path[:-10]
166 return path
167
168
169def file_flags_to_mode(flags):
170 """Convert file's open() flags into a readable string.
171 Used by Process.open_files().
172 """
173 modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
174 mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
175 if flags & os.O_APPEND:
176 mode = mode.replace('w', 'a', 1)
177 mode = mode.replace('w+', 'r+')
178 # possible values: r, w, a, r+, a+
179 return mode
180
181
182def is_storage_device(name):
183 """Return True if the given name refers to a root device (e.g.
184 "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1",
185 "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram")
186 return True.
187 """
188 # Re-adapted from iostat source code, see:
189 # https://github.com/sysstat/sysstat/blob/
190 # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208
191 # Some devices may have a slash in their name (e.g. cciss/c0d0...).
192 name = name.replace('/', '!')
193 including_virtual = True
194 if including_virtual:
195 path = f"/sys/block/{name}"
196 else:
197 path = f"/sys/block/{name}/device"
198 return os.access(path, os.F_OK)
199
200
201@memoize
202def _scputimes_ntuple(procfs_path):
203 """Return a namedtuple of variable fields depending on the CPU times
204 available on this Linux kernel version which may be:
205 (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
206 [guest_nice]]])
207 Used by cpu_times() function.
208 """
209 with open_binary(f"{procfs_path}/stat") as f:
210 values = f.readline().split()[1:]
211 fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
212 vlen = len(values)
213 if vlen >= 8:
214 # Linux >= 2.6.11
215 fields.append('steal')
216 if vlen >= 9:
217 # Linux >= 2.6.24
218 fields.append('guest')
219 if vlen >= 10:
220 # Linux >= 3.2.0
221 fields.append('guest_nice')
222 return namedtuple('scputimes', fields)
223
224
225# Set it into _ntuples.py namespace.
226try:
227 ntp.scputimes = _scputimes_ntuple("/proc")
228except Exception as err: # noqa: BLE001
229 # Don't want to crash at import time.
230 debug(f"ignoring exception on import: {err!r}")
231 ntp.scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0)
232
233# XXX: must be available also at this module level in order to be
234# serialized (tests/test_misc.py::TestMisc::test_serialization).
235scputimes = ntp.scputimes
236
237
238# =====================================================================
239# --- system memory
240# =====================================================================
241
242
243def calculate_avail_vmem(mems):
244 """Fallback for kernels < 3.14 where /proc/meminfo does not provide
245 "MemAvailable", see:
246 https://blog.famzah.net/2014/09/24/.
247
248 This code reimplements the algorithm outlined here:
249 https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
250 commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
251
252 We use this function also when "MemAvailable" returns 0 (possibly a
253 kernel bug, see: https://github.com/giampaolo/psutil/issues/1915).
254 In that case this routine matches "free" CLI tool result ("available"
255 column).
256
257 XXX: on recent kernels this calculation may differ by ~1.5% compared
258 to "MemAvailable:", as it's calculated slightly differently.
259 It is still way more realistic than doing (free + cached) though.
260 See:
261 * https://gitlab.com/procps-ng/procps/issues/42
262 * https://github.com/famzah/linux-memavailable-procfs/issues/2
263 """
264 # Note about "fallback" value. According to:
265 # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
266 # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
267 # ...long ago "available" memory was calculated as (free + cached),
268 # We use fallback when one of these is missing from /proc/meminfo:
269 # "Active(file)": introduced in 2.6.28 / Dec 2008
270 # "Inactive(file)": introduced in 2.6.28 / Dec 2008
271 # "SReclaimable": introduced in 2.6.19 / Nov 2006
272 # /proc/zoneinfo: introduced in 2.6.13 / Aug 2005
273 free = mems[b'MemFree:']
274 fallback = free + mems.get(b"Cached:", 0)
275 try:
276 lru_active_file = mems[b'Active(file):']
277 lru_inactive_file = mems[b'Inactive(file):']
278 slab_reclaimable = mems[b'SReclaimable:']
279 except KeyError as err:
280 debug(
281 f"{err.args[0]} is missing from /proc/meminfo; using an"
282 " approximation for calculating available memory"
283 )
284 return fallback
285 try:
286 f = open_binary(f"{get_procfs_path()}/zoneinfo")
287 except OSError:
288 return fallback # kernel 2.6.13
289
290 watermark_low = 0
291 with f:
292 for line in f:
293 line = line.strip()
294 if line.startswith(b'low'):
295 watermark_low += int(line.split()[1])
296 watermark_low *= PAGESIZE
297
298 avail = free - watermark_low
299 pagecache = lru_active_file + lru_inactive_file
300 pagecache -= min(pagecache / 2, watermark_low)
301 avail += pagecache
302 avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low)
303 return int(avail)
304
305
306def virtual_memory():
307 """Report virtual memory stats.
308 This implementation mimics procps-ng-3.3.12, aka "free" CLI tool:
309 https://gitlab.com/procps-ng/procps/blob/
310 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L778-791
311 The returned values are supposed to match both "free" and "vmstat -s"
312 CLI tools.
313 """
314 missing_fields = []
315 mems = {}
316 with open_binary(f"{get_procfs_path()}/meminfo") as f:
317 for line in f:
318 fields = line.split()
319 mems[fields[0]] = int(fields[1]) * 1024
320
321 # /proc doc states that the available fields in /proc/meminfo vary
322 # by architecture and compile options, but these 3 values are also
323 # returned by sysinfo(2); as such we assume they are always there.
324 total = mems[b'MemTotal:']
325 free = mems[b'MemFree:']
326 try:
327 buffers = mems[b'Buffers:']
328 except KeyError:
329 # https://github.com/giampaolo/psutil/issues/1010
330 buffers = 0
331 missing_fields.append('buffers')
332 try:
333 cached = mems[b"Cached:"]
334 except KeyError:
335 cached = 0
336 missing_fields.append('cached')
337 else:
338 # "free" cmdline utility sums reclaimable to cached.
339 # Older versions of procps used to add slab memory instead.
340 # This got changed in:
341 # https://gitlab.com/procps-ng/procps/commit/
342 # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
343 cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19
344
345 try:
346 shared = mems[b'Shmem:'] # since kernel 2.6.32
347 except KeyError:
348 try:
349 shared = mems[b'MemShared:'] # kernels 2.4
350 except KeyError:
351 shared = 0
352 missing_fields.append('shared')
353
354 try:
355 active = mems[b"Active:"]
356 except KeyError:
357 active = 0
358 missing_fields.append('active')
359
360 try:
361 inactive = mems[b"Inactive:"]
362 except KeyError:
363 try:
364 inactive = (
365 mems[b"Inact_dirty:"]
366 + mems[b"Inact_clean:"]
367 + mems[b"Inact_laundry:"]
368 )
369 except KeyError:
370 inactive = 0
371 missing_fields.append('inactive')
372
373 try:
374 slab = mems[b"Slab:"]
375 except KeyError:
376 slab = 0
377
378 # - starting from 4.4.0 we match free's "available" column.
379 # Before 4.4.0 we calculated it as (free + buffers + cached)
380 # which matched htop.
381 # - free and htop available memory differs as per:
382 # http://askubuntu.com/a/369589
383 # http://unix.stackexchange.com/a/65852/168884
384 # - MemAvailable has been introduced in kernel 3.14
385 try:
386 avail = mems[b'MemAvailable:']
387 except KeyError:
388 avail = calculate_avail_vmem(mems)
389 else:
390 if avail == 0:
391 # Yes, it can happen (probably a kernel bug):
392 # https://github.com/giampaolo/psutil/issues/1915
393 # In this case "free" CLI tool makes an estimate. We do the same,
394 # and it matches "free" CLI tool.
395 avail = calculate_avail_vmem(mems)
396
397 if avail < 0:
398 avail = 0
399 missing_fields.append('available')
400 elif avail > total:
401 # If avail is greater than total or our calculation overflows,
402 # that's symptomatic of running within a LCX container where such
403 # values will be dramatically distorted over those of the host.
404 # https://gitlab.com/procps-ng/procps/blob/
405 # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764
406 avail = free
407
408 used = total - avail
409
410 percent = usage_percent((total - avail), total, round_=1)
411
412 # Warn about missing metrics which are set to 0.
413 if missing_fields:
414 msg = "{} memory stats couldn't be determined and {} set to 0".format(
415 ", ".join(missing_fields),
416 "was" if len(missing_fields) == 1 else "were",
417 )
418 warnings.warn(msg, RuntimeWarning, stacklevel=2)
419
420 return ntp.svmem(
421 total,
422 avail,
423 percent,
424 used,
425 free,
426 active,
427 inactive,
428 buffers,
429 cached,
430 shared,
431 slab,
432 )
433
434
435def swap_memory():
436 """Return swap memory metrics."""
437 mems = {}
438 with open_binary(f"{get_procfs_path()}/meminfo") as f:
439 for line in f:
440 fields = line.split()
441 mems[fields[0]] = int(fields[1]) * 1024
442 # We prefer /proc/meminfo over sysinfo() syscall so that
443 # psutil.PROCFS_PATH can be used in order to allow retrieval
444 # for linux containers, see:
445 # https://github.com/giampaolo/psutil/issues/1015
446 try:
447 total = mems[b'SwapTotal:']
448 free = mems[b'SwapFree:']
449 except KeyError:
450 _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo()
451 total *= unit_multiplier
452 free *= unit_multiplier
453
454 used = total - free
455 percent = usage_percent(used, total, round_=1)
456 # get pgin/pgouts
457 try:
458 f = open_binary(f"{get_procfs_path()}/vmstat")
459 except OSError as err:
460 # see https://github.com/giampaolo/psutil/issues/722
461 msg = (
462 "'sin' and 'sout' swap memory stats couldn't "
463 f"be determined and were set to 0 ({err})"
464 )
465 warnings.warn(msg, RuntimeWarning, stacklevel=2)
466 sin = sout = 0
467 else:
468 with f:
469 sin = sout = None
470 for line in f:
471 # values are expressed in 4 kilo bytes, we want
472 # bytes instead
473 if line.startswith(b'pswpin'):
474 sin = int(line.split(b' ')[1]) * 4 * 1024
475 elif line.startswith(b'pswpout'):
476 sout = int(line.split(b' ')[1]) * 4 * 1024
477 if sin is not None and sout is not None:
478 break
479 else:
480 # we might get here when dealing with exotic Linux
481 # flavors, see:
482 # https://github.com/giampaolo/psutil/issues/313
483 msg = "'sin' and 'sout' swap memory stats couldn't "
484 msg += "be determined and were set to 0"
485 warnings.warn(msg, RuntimeWarning, stacklevel=2)
486 sin = sout = 0
487 return ntp.sswap(total, used, free, percent, sin, sout)
488
489
490# malloc / heap functions; require glibc
491if hasattr(cext, "heap_info"):
492 heap_info = cext.heap_info
493 heap_trim = cext.heap_trim
494
495
496# =====================================================================
497# --- CPU
498# =====================================================================
499
500
501def cpu_times():
502 """Return a named tuple representing the following system-wide
503 CPU times:
504 (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
505 [guest_nice]]])
506 Last 3 fields may not be available on all Linux kernel versions.
507 """
508 procfs_path = get_procfs_path()
509 with open_binary(f"{procfs_path}/stat") as f:
510 values = f.readline().split()
511 fields = values[1 : len(ntp.scputimes._fields) + 1]
512 fields = [float(x) / CLOCK_TICKS for x in fields]
513 return ntp.scputimes(*fields)
514
515
516def per_cpu_times():
517 """Return a list of namedtuple representing the CPU times
518 for every CPU available on the system.
519 """
520 procfs_path = get_procfs_path()
521 cpus = []
522 with open_binary(f"{procfs_path}/stat") as f:
523 # get rid of the first line which refers to system wide CPU stats
524 f.readline()
525 for line in f:
526 if line.startswith(b'cpu'):
527 values = line.split()
528 fields = values[1 : len(ntp.scputimes._fields) + 1]
529 fields = [float(x) / CLOCK_TICKS for x in fields]
530 entry = ntp.scputimes(*fields)
531 cpus.append(entry)
532 return cpus
533
534
535def cpu_count_logical():
536 """Return the number of logical CPUs in the system."""
537 try:
538 return os.sysconf("SC_NPROCESSORS_ONLN")
539 except ValueError:
540 # as a second fallback we try to parse /proc/cpuinfo
541 num = 0
542 with open_binary(f"{get_procfs_path()}/cpuinfo") as f:
543 for line in f:
544 if line.lower().startswith(b'processor'):
545 num += 1
546
547 # unknown format (e.g. amrel/sparc architectures), see:
548 # https://github.com/giampaolo/psutil/issues/200
549 # try to parse /proc/stat as a last resort
550 if num == 0:
551 search = re.compile(r'cpu\d')
552 with open_text(f"{get_procfs_path()}/stat") as f:
553 for line in f:
554 line = line.split(' ')[0]
555 if search.match(line):
556 num += 1
557
558 if num == 0:
559 # mimic os.cpu_count()
560 return None
561 return num
562
563
564def cpu_count_cores():
565 """Return the number of CPU cores in the system."""
566 # Method #1
567 ls = set()
568 # These 2 files are the same but */core_cpus_list is newer while
569 # */thread_siblings_list is deprecated and may disappear in the future.
570 # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst
571 # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964
572 # https://lkml.org/lkml/2019/2/26/41
573 p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list"
574 p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"
575 for path in glob.glob(p1) or glob.glob(p2):
576 with open_binary(path) as f:
577 ls.add(f.read().strip())
578 result = len(ls)
579 if result != 0:
580 return result
581
582 # Method #2
583 mapping = {}
584 current_info = {}
585 with open_binary(f"{get_procfs_path()}/cpuinfo") as f:
586 for line in f:
587 line = line.strip().lower()
588 if not line:
589 # new section
590 try:
591 mapping[current_info[b'physical id']] = current_info[
592 b'cpu cores'
593 ]
594 except KeyError:
595 pass
596 current_info = {}
597 elif line.startswith((b'physical id', b'cpu cores')):
598 # ongoing section
599 key, value = line.split(b'\t:', 1)
600 current_info[key] = int(value)
601
602 result = sum(mapping.values())
603 return result or None # mimic os.cpu_count()
604
605
606def cpu_stats():
607 """Return various CPU stats as a named tuple."""
608 with open_binary(f"{get_procfs_path()}/stat") as f:
609 ctx_switches = None
610 interrupts = None
611 soft_interrupts = None
612 for line in f:
613 if line.startswith(b'ctxt'):
614 ctx_switches = int(line.split()[1])
615 elif line.startswith(b'intr'):
616 interrupts = int(line.split()[1])
617 elif line.startswith(b'softirq'):
618 soft_interrupts = int(line.split()[1])
619 if (
620 ctx_switches is not None
621 and soft_interrupts is not None
622 and interrupts is not None
623 ):
624 break
625 syscalls = 0
626 return ntp.scpustats(ctx_switches, interrupts, soft_interrupts, syscalls)
627
628
629def _cpu_get_cpuinfo_freq():
630 """Return current CPU frequency from cpuinfo if available."""
631 with open_binary(f"{get_procfs_path()}/cpuinfo") as f:
632 return [
633 float(line.split(b':', 1)[1])
634 for line in f
635 if line.lower().startswith(b'cpu mhz')
636 ]
637
638
639if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or os.path.exists(
640 "/sys/devices/system/cpu/cpu0/cpufreq"
641):
642
643 def cpu_freq():
644 """Return frequency metrics for all CPUs.
645 Contrarily to other OSes, Linux updates these values in
646 real-time.
647 """
648 cpuinfo_freqs = _cpu_get_cpuinfo_freq()
649 paths = glob.glob(
650 "/sys/devices/system/cpu/cpufreq/policy[0-9]*"
651 ) or glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq")
652 paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group()))
653 ret = []
654 pjoin = os.path.join
655 for i, path in enumerate(paths):
656 if len(paths) == len(cpuinfo_freqs):
657 # take cached value from cpuinfo if available, see:
658 # https://github.com/giampaolo/psutil/issues/1851
659 curr = cpuinfo_freqs[i] * 1000
660 else:
661 curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None)
662 if curr is None:
663 # Likely an old RedHat, see:
664 # https://github.com/giampaolo/psutil/issues/1071
665 curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None)
666 if curr is None:
667 online_path = f"/sys/devices/system/cpu/cpu{i}/online"
668 # if cpu core is offline, set to all zeroes
669 if cat(online_path, fallback=None) == "0\n":
670 ret.append(ntp.scpufreq(0.0, 0.0, 0.0))
671 continue
672 msg = "can't find current frequency file"
673 raise NotImplementedError(msg)
674 curr = int(curr) / 1000
675 max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000
676 min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000
677 ret.append(ntp.scpufreq(curr, min_, max_))
678 return ret
679
680else:
681
682 def cpu_freq():
683 """Alternate implementation using /proc/cpuinfo.
684 min and max frequencies are not available and are set to None.
685 """
686 return [ntp.scpufreq(x, 0.0, 0.0) for x in _cpu_get_cpuinfo_freq()]
687
688
689# =====================================================================
690# --- network
691# =====================================================================
692
693
694net_if_addrs = cext.net_if_addrs
695
696
697class _Ipv6UnsupportedError(Exception):
698 pass
699
700
701class NetConnections:
702 """A wrapper on top of /proc/net/* files, retrieving per-process
703 and system-wide open connections (TCP, UDP, UNIX) similarly to
704 "netstat -an".
705
706 Note: in case of UNIX sockets we're only able to determine the
707 local endpoint/path, not the one it's connected to.
708 According to [1] it would be possible but not easily.
709
710 [1] http://serverfault.com/a/417946
711 """
712
713 def __init__(self):
714 # The string represents the basename of the corresponding
715 # /proc/net/{proto_name} file.
716 tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
717 tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
718 udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
719 udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
720 unix = ("unix", socket.AF_UNIX, None)
721 self.tmap = {
722 "all": (tcp4, tcp6, udp4, udp6, unix),
723 "tcp": (tcp4, tcp6),
724 "tcp4": (tcp4,),
725 "tcp6": (tcp6,),
726 "udp": (udp4, udp6),
727 "udp4": (udp4,),
728 "udp6": (udp6,),
729 "unix": (unix,),
730 "inet": (tcp4, tcp6, udp4, udp6),
731 "inet4": (tcp4, udp4),
732 "inet6": (tcp6, udp6),
733 }
734 self._procfs_path = None
735
736 def get_proc_inodes(self, pid):
737 inodes = defaultdict(list)
738 for fd in os.listdir(f"{self._procfs_path}/{pid}/fd"):
739 try:
740 inode = readlink(f"{self._procfs_path}/{pid}/fd/{fd}")
741 except (FileNotFoundError, ProcessLookupError):
742 # ENOENT == file which is gone in the meantime;
743 # os.stat(f"/proc/{self.pid}") will be done later
744 # to force NSP (if it's the case)
745 continue
746 except OSError as err:
747 if err.errno == errno.EINVAL:
748 # not a link
749 continue
750 if err.errno == errno.ENAMETOOLONG:
751 # file name too long
752 debug(err)
753 continue
754 raise
755 else:
756 if inode.startswith('socket:['):
757 # the process is using a socket
758 inode = inode[8:][:-1]
759 inodes[inode].append((pid, int(fd)))
760 return inodes
761
762 def get_all_inodes(self):
763 inodes = {}
764 for pid in pids():
765 try:
766 inodes.update(self.get_proc_inodes(pid))
767 except (FileNotFoundError, ProcessLookupError, PermissionError):
768 # os.listdir() is gonna raise a lot of access denied
769 # exceptions in case of unprivileged user; that's fine
770 # as we'll just end up returning a connection with PID
771 # and fd set to None anyway.
772 # Both netstat -an and lsof does the same so it's
773 # unlikely we can do any better.
774 # ENOENT just means a PID disappeared on us.
775 continue
776 return inodes
777
778 @staticmethod
779 def decode_address(addr, family):
780 """Accept an "ip:port" address as displayed in /proc/net/*
781 and convert it into a human readable form, like:
782
783 "0500000A:0016" -> ("10.0.0.5", 22)
784 "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
785
786 The IP address portion is a little or big endian four-byte
787 hexadecimal number; that is, the least significant byte is listed
788 first, so we need to reverse the order of the bytes to convert it
789 to an IP address.
790 The port is represented as a two-byte hexadecimal number.
791
792 Reference:
793 http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
794 """
795 ip, port = addr.split(':')
796 port = int(port, 16)
797 # this usually refers to a local socket in listen mode with
798 # no end-points connected
799 if not port:
800 return ()
801 ip = ip.encode('ascii')
802 if family == socket.AF_INET:
803 # see: https://github.com/giampaolo/psutil/issues/201
804 if LITTLE_ENDIAN:
805 ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
806 else:
807 ip = socket.inet_ntop(family, base64.b16decode(ip))
808 else: # IPv6
809 ip = base64.b16decode(ip)
810 try:
811 # see: https://github.com/giampaolo/psutil/issues/201
812 if LITTLE_ENDIAN:
813 ip = socket.inet_ntop(
814 socket.AF_INET6,
815 struct.pack('>4I', *struct.unpack('<4I', ip)),
816 )
817 else:
818 ip = socket.inet_ntop(
819 socket.AF_INET6,
820 struct.pack('<4I', *struct.unpack('<4I', ip)),
821 )
822 except ValueError:
823 # see: https://github.com/giampaolo/psutil/issues/623
824 if not supports_ipv6():
825 raise _Ipv6UnsupportedError from None
826 raise
827 return ntp.addr(ip, port)
828
829 @staticmethod
830 def process_inet(file, family, type_, inodes, filter_pid=None):
831 """Parse /proc/net/tcp* and /proc/net/udp* files."""
832 if file.endswith('6') and not os.path.exists(file):
833 # IPv6 not supported
834 return
835 with open_text(file) as f:
836 f.readline() # skip the first line
837 for lineno, line in enumerate(f, 1):
838 try:
839 _, laddr, raddr, status, _, _, _, _, _, inode = (
840 line.split()[:10]
841 )
842 except ValueError:
843 msg = (
844 f"error while parsing {file}; malformed line"
845 f" {lineno} {line!r}"
846 )
847 raise RuntimeError(msg) from None
848 if inode in inodes:
849 # # We assume inet sockets are unique, so we error
850 # # out if there are multiple references to the
851 # # same inode. We won't do this for UNIX sockets.
852 # if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
853 # raise ValueError("ambiguous inode with multiple "
854 # "PIDs references")
855 pid, fd = inodes[inode][0]
856 else:
857 pid, fd = None, -1
858 if filter_pid is not None and filter_pid != pid:
859 continue
860 else:
861 if type_ == socket.SOCK_STREAM:
862 status = TCP_STATUSES[status]
863 else:
864 status = _common.CONN_NONE
865 try:
866 laddr = NetConnections.decode_address(laddr, family)
867 raddr = NetConnections.decode_address(raddr, family)
868 except _Ipv6UnsupportedError:
869 continue
870 yield (fd, family, type_, laddr, raddr, status, pid)
871
872 @staticmethod
873 def process_unix(file, family, inodes, filter_pid=None):
874 """Parse /proc/net/unix files."""
875 with open_text(file) as f:
876 f.readline() # skip the first line
877 for line in f:
878 tokens = line.split()
879 try:
880 _, _, _, _, type_, _, inode = tokens[0:7]
881 except ValueError:
882 if ' ' not in line:
883 # see: https://github.com/giampaolo/psutil/issues/766
884 continue
885 msg = (
886 f"error while parsing {file}; malformed line {line!r}"
887 )
888 raise RuntimeError(msg) # noqa: B904
889 if inode in inodes: # noqa: SIM108
890 # With UNIX sockets we can have a single inode
891 # referencing many file descriptors.
892 pairs = inodes[inode]
893 else:
894 pairs = [(None, -1)]
895 for pid, fd in pairs:
896 if filter_pid is not None and filter_pid != pid:
897 continue
898 else:
899 path = tokens[-1] if len(tokens) == 8 else ''
900 type_ = _common.socktype_to_enum(int(type_))
901 # XXX: determining the remote endpoint of a
902 # UNIX socket on Linux is not possible, see:
903 # https://serverfault.com/questions/252723/
904 raddr = ""
905 status = _common.CONN_NONE
906 yield (fd, family, type_, path, raddr, status, pid)
907
908 def retrieve(self, kind, pid=None):
909 self._procfs_path = get_procfs_path()
910 if pid is not None:
911 inodes = self.get_proc_inodes(pid)
912 if not inodes:
913 # no connections for this process
914 return []
915 else:
916 inodes = self.get_all_inodes()
917 ret = set()
918 for proto_name, family, type_ in self.tmap[kind]:
919 path = f"{self._procfs_path}/net/{proto_name}"
920 if family in {socket.AF_INET, socket.AF_INET6}:
921 ls = self.process_inet(
922 path, family, type_, inodes, filter_pid=pid
923 )
924 else:
925 ls = self.process_unix(path, family, inodes, filter_pid=pid)
926 for fd, family, type_, laddr, raddr, status, bound_pid in ls:
927 if pid:
928 conn = ntp.pconn(fd, family, type_, laddr, raddr, status)
929 else:
930 conn = ntp.sconn(
931 fd, family, type_, laddr, raddr, status, bound_pid
932 )
933 ret.add(conn)
934 return list(ret)
935
936
937_net_connections = NetConnections()
938
939
940def net_connections(kind='inet'):
941 """Return system-wide open connections."""
942 return _net_connections.retrieve(kind)
943
944
945def net_io_counters():
946 """Return network I/O statistics for every network interface
947 installed on the system as a dict of raw tuples.
948 """
949 with open_text(f"{get_procfs_path()}/net/dev") as f:
950 lines = f.readlines()
951 retdict = {}
952 for line in lines[2:]:
953 colon = line.rfind(':')
954 assert colon > 0, repr(line)
955 name = line[:colon].strip()
956 fields = line[colon + 1 :].strip().split()
957
958 (
959 # in
960 bytes_recv,
961 packets_recv,
962 errin,
963 dropin,
964 _fifoin, # unused
965 _framein, # unused
966 _compressedin, # unused
967 _multicastin, # unused
968 # out
969 bytes_sent,
970 packets_sent,
971 errout,
972 dropout,
973 _fifoout, # unused
974 _collisionsout, # unused
975 _carrierout, # unused
976 _compressedout, # unused
977 ) = map(int, fields)
978
979 retdict[name] = (
980 bytes_sent,
981 bytes_recv,
982 packets_sent,
983 packets_recv,
984 errin,
985 errout,
986 dropin,
987 dropout,
988 )
989 return retdict
990
991
992def net_if_stats():
993 """Get NIC stats (isup, duplex, speed, mtu)."""
994 duplex_map = {
995 cext.DUPLEX_FULL: NIC_DUPLEX_FULL,
996 cext.DUPLEX_HALF: NIC_DUPLEX_HALF,
997 cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN,
998 }
999 names = net_io_counters().keys()
1000 ret = {}
1001 for name in names:
1002 try:
1003 mtu = cext.net_if_mtu(name)
1004 flags = cext.net_if_flags(name)
1005 duplex, speed = cext.net_if_duplex_speed(name)
1006 except OSError as err:
1007 # https://github.com/giampaolo/psutil/issues/1279
1008 if err.errno != errno.ENODEV:
1009 raise
1010 debug(err)
1011 else:
1012 output_flags = ','.join(flags)
1013 isup = 'running' in flags
1014 ret[name] = ntp.snicstats(
1015 isup, duplex_map[duplex], speed, mtu, output_flags
1016 )
1017 return ret
1018
1019
1020# =====================================================================
1021# --- disks
1022# =====================================================================
1023
1024
1025disk_usage = _psposix.disk_usage
1026
1027
1028def disk_io_counters(perdisk=False):
1029 """Return disk I/O statistics for every disk installed on the
1030 system as a dict of raw tuples.
1031 """
1032
1033 def read_procfs():
1034 # OK, this is a bit confusing. The format of /proc/diskstats can
1035 # have 3 variations.
1036 # On Linux 2.4 each line has always 15 fields, e.g.:
1037 # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8"
1038 # On Linux 2.6+ each line *usually* has 14 fields, and the disk
1039 # name is in another position, like this:
1040 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8"
1041 # ...unless (Linux 2.6) the line refers to a partition instead
1042 # of a disk, in which case the line has less fields (7):
1043 # "3 1 hda1 8 8 8 8"
1044 # 4.18+ has 4 fields added:
1045 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0"
1046 # 5.5 has 2 more fields.
1047 # See:
1048 # https://www.kernel.org/doc/Documentation/iostats.txt
1049 # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
1050 with open_text(f"{get_procfs_path()}/diskstats") as f:
1051 lines = f.readlines()
1052 for line in lines:
1053 fields = line.split()
1054 flen = len(fields)
1055 # fmt: off
1056 if flen == 15:
1057 # Linux 2.4
1058 name = fields[3]
1059 reads = int(fields[2])
1060 (reads_merged, rbytes, rtime, writes, writes_merged,
1061 wbytes, wtime, _, busy_time, _) = map(int, fields[4:14])
1062 elif flen == 14 or flen >= 18:
1063 # Linux 2.6+, line referring to a disk
1064 name = fields[2]
1065 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1066 wbytes, wtime, _, busy_time, _) = map(int, fields[3:14])
1067 elif flen == 7:
1068 # Linux 2.6+, line referring to a partition
1069 name = fields[2]
1070 reads, rbytes, writes, wbytes = map(int, fields[3:])
1071 rtime = wtime = reads_merged = writes_merged = busy_time = 0
1072 else:
1073 msg = f"not sure how to interpret line {line!r}"
1074 raise ValueError(msg)
1075 yield (name, reads, writes, rbytes, wbytes, rtime, wtime,
1076 reads_merged, writes_merged, busy_time)
1077 # fmt: on
1078
1079 def read_sysfs():
1080 for block in os.listdir('/sys/block'):
1081 for root, _, files in os.walk(os.path.join('/sys/block', block)):
1082 if 'stat' not in files:
1083 continue
1084 with open_text(os.path.join(root, 'stat')) as f:
1085 fields = f.read().strip().split()
1086 name = os.path.basename(root)
1087 # fmt: off
1088 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1089 wbytes, wtime, _, busy_time) = map(int, fields[:10])
1090 yield (name, reads, writes, rbytes, wbytes, rtime,
1091 wtime, reads_merged, writes_merged, busy_time)
1092 # fmt: on
1093
1094 if os.path.exists(f"{get_procfs_path()}/diskstats"):
1095 gen = read_procfs()
1096 elif os.path.exists('/sys/block'):
1097 gen = read_sysfs()
1098 else:
1099 msg = (
1100 f"{get_procfs_path()}/diskstats nor /sys/block are available on"
1101 " this system"
1102 )
1103 raise NotImplementedError(msg)
1104
1105 retdict = {}
1106 for entry in gen:
1107 # fmt: off
1108 (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged,
1109 writes_merged, busy_time) = entry
1110 if not perdisk and not is_storage_device(name):
1111 # perdisk=False means we want to calculate totals so we skip
1112 # partitions (e.g. 'sda1', 'nvme0n1p1') and only include
1113 # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks
1114 # include a total of all their partitions + some extra size
1115 # of their own:
1116 # $ cat /proc/diskstats
1117 # 259 0 sda 10485760 ...
1118 # 259 1 sda1 5186039 ...
1119 # 259 1 sda2 5082039 ...
1120 # See:
1121 # https://github.com/giampaolo/psutil/pull/1313
1122 continue
1123
1124 rbytes *= DISK_SECTOR_SIZE
1125 wbytes *= DISK_SECTOR_SIZE
1126 retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime,
1127 reads_merged, writes_merged, busy_time)
1128 # fmt: on
1129
1130 return retdict
1131
1132
1133class RootFsDeviceFinder:
1134 """disk_partitions() may return partitions with device == "/dev/root"
1135 or "rootfs". This container class uses different strategies to try to
1136 obtain the real device path. Resources:
1137 https://bootlin.com/blog/find-root-device/
1138 https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/.
1139 """
1140
1141 __slots__ = ['major', 'minor']
1142
1143 def __init__(self):
1144 dev = os.stat("/").st_dev
1145 self.major = os.major(dev)
1146 self.minor = os.minor(dev)
1147
1148 def ask_proc_partitions(self):
1149 with open_text(f"{get_procfs_path()}/partitions") as f:
1150 for line in f.readlines()[2:]:
1151 fields = line.split()
1152 if len(fields) < 4: # just for extra safety
1153 continue
1154 major = int(fields[0]) if fields[0].isdigit() else None
1155 minor = int(fields[1]) if fields[1].isdigit() else None
1156 name = fields[3]
1157 if major == self.major and minor == self.minor:
1158 if name: # just for extra safety
1159 return f"/dev/{name}"
1160
1161 def ask_sys_dev_block(self):
1162 path = f"/sys/dev/block/{self.major}:{self.minor}/uevent"
1163 with open_text(path) as f:
1164 for line in f:
1165 if line.startswith("DEVNAME="):
1166 name = line.strip().rpartition("DEVNAME=")[2]
1167 if name: # just for extra safety
1168 return f"/dev/{name}"
1169
1170 def ask_sys_class_block(self):
1171 needle = f"{self.major}:{self.minor}"
1172 files = glob.iglob("/sys/class/block/*/dev")
1173 for file in files:
1174 try:
1175 f = open_text(file)
1176 except FileNotFoundError: # race condition
1177 continue
1178 else:
1179 with f:
1180 data = f.read().strip()
1181 if data == needle:
1182 name = os.path.basename(os.path.dirname(file))
1183 return f"/dev/{name}"
1184
1185 def find(self):
1186 path = None
1187 if path is None:
1188 try:
1189 path = self.ask_proc_partitions()
1190 except OSError as err:
1191 debug(err)
1192 if path is None:
1193 try:
1194 path = self.ask_sys_dev_block()
1195 except OSError as err:
1196 debug(err)
1197 if path is None:
1198 try:
1199 path = self.ask_sys_class_block()
1200 except OSError as err:
1201 debug(err)
1202 # We use exists() because the "/dev/*" part of the path is hard
1203 # coded, so we want to be sure.
1204 if path is not None and os.path.exists(path):
1205 return path
1206
1207
1208def disk_partitions(all=False):
1209 """Return mounted disk partitions as a list of namedtuples."""
1210 fstypes = set()
1211 procfs_path = get_procfs_path()
1212 if not all:
1213 with open_text(f"{procfs_path}/filesystems") as f:
1214 for line in f:
1215 line = line.strip()
1216 if not line.startswith("nodev"):
1217 fstypes.add(line.strip())
1218 else:
1219 # ignore all lines starting with "nodev" except "nodev zfs"
1220 fstype = line.split("\t")[1]
1221 if fstype == "zfs":
1222 fstypes.add("zfs")
1223
1224 # See: https://github.com/giampaolo/psutil/issues/1307
1225 if procfs_path == "/proc" and os.path.isfile('/etc/mtab'):
1226 mounts_path = os.path.realpath("/etc/mtab")
1227 else:
1228 mounts_path = os.path.realpath(f"{procfs_path}/self/mounts")
1229
1230 retlist = []
1231 partitions = cext.disk_partitions(mounts_path)
1232 for partition in partitions:
1233 device, mountpoint, fstype, opts = partition
1234 if device == 'none':
1235 device = ''
1236 if device in {"/dev/root", "rootfs"}:
1237 device = RootFsDeviceFinder().find() or device
1238 if not all:
1239 if not device or fstype not in fstypes:
1240 continue
1241 ntuple = ntp.sdiskpart(device, mountpoint, fstype, opts)
1242 retlist.append(ntuple)
1243
1244 return retlist
1245
1246
1247# =====================================================================
1248# --- sensors
1249# =====================================================================
1250
1251
1252def sensors_temperatures():
1253 """Return hardware (CPU and others) temperatures as a dict
1254 including hardware name, label, current, max and critical
1255 temperatures.
1256
1257 Implementation notes:
1258 - /sys/class/hwmon looks like the most recent interface to
1259 retrieve this info, and this implementation relies on it
1260 only (old distros will probably use something else)
1261 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1262 - /sys/class/thermal/thermal_zone* is another one but it's more
1263 difficult to parse
1264 """
1265 ret = collections.defaultdict(list)
1266 basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*')
1267 # CentOS has an intermediate /device directory:
1268 # https://github.com/giampaolo/psutil/issues/971
1269 # https://github.com/nicolargo/glances/issues/1060
1270 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*'))
1271 basenames = sorted({x.split('_')[0] for x in basenames})
1272
1273 # Only add the coretemp hwmon entries if they're not already in
1274 # /sys/class/hwmon/
1275 # https://github.com/giampaolo/psutil/issues/1708
1276 # https://github.com/giampaolo/psutil/pull/1648
1277 basenames2 = glob.glob(
1278 '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*'
1279 )
1280 repl = re.compile(r"/sys/devices/platform/coretemp.*/hwmon/")
1281 for name in basenames2:
1282 altname = repl.sub('/sys/class/hwmon/', name)
1283 if altname not in basenames:
1284 basenames.append(name)
1285
1286 for base in basenames:
1287 try:
1288 path = base + '_input'
1289 current = float(bcat(path)) / 1000.0
1290 path = os.path.join(os.path.dirname(base), 'name')
1291 unit_name = cat(path).strip()
1292 except (OSError, ValueError):
1293 # A lot of things can go wrong here, so let's just skip the
1294 # whole entry. Sure thing is Linux's /sys/class/hwmon really
1295 # is a stinky broken mess.
1296 # https://github.com/giampaolo/psutil/issues/1009
1297 # https://github.com/giampaolo/psutil/issues/1101
1298 # https://github.com/giampaolo/psutil/issues/1129
1299 # https://github.com/giampaolo/psutil/issues/1245
1300 # https://github.com/giampaolo/psutil/issues/1323
1301 continue
1302
1303 high = bcat(base + '_max', fallback=None)
1304 critical = bcat(base + '_crit', fallback=None)
1305 label = cat(base + '_label', fallback='').strip()
1306
1307 if high is not None:
1308 try:
1309 high = float(high) / 1000.0
1310 except ValueError:
1311 high = None
1312 if critical is not None:
1313 try:
1314 critical = float(critical) / 1000.0
1315 except ValueError:
1316 critical = None
1317
1318 ret[unit_name].append((label, current, high, critical))
1319
1320 # Indication that no sensors were detected in /sys/class/hwmon/
1321 if not basenames:
1322 basenames = glob.glob('/sys/class/thermal/thermal_zone*')
1323 basenames = sorted(set(basenames))
1324
1325 for base in basenames:
1326 try:
1327 path = os.path.join(base, 'temp')
1328 current = float(bcat(path)) / 1000.0
1329 path = os.path.join(base, 'type')
1330 unit_name = cat(path).strip()
1331 except (OSError, ValueError) as err:
1332 debug(err)
1333 continue
1334
1335 trip_paths = glob.glob(base + '/trip_point*')
1336 trip_points = {
1337 '_'.join(os.path.basename(p).split('_')[0:3])
1338 for p in trip_paths
1339 }
1340 critical = None
1341 high = None
1342 for trip_point in trip_points:
1343 path = os.path.join(base, trip_point + "_type")
1344 trip_type = cat(path, fallback='').strip()
1345 if trip_type == 'critical':
1346 critical = bcat(
1347 os.path.join(base, trip_point + "_temp"), fallback=None
1348 )
1349 elif trip_type == 'high':
1350 high = bcat(
1351 os.path.join(base, trip_point + "_temp"), fallback=None
1352 )
1353
1354 if high is not None:
1355 try:
1356 high = float(high) / 1000.0
1357 except ValueError:
1358 high = None
1359 if critical is not None:
1360 try:
1361 critical = float(critical) / 1000.0
1362 except ValueError:
1363 critical = None
1364
1365 ret[unit_name].append(('', current, high, critical))
1366
1367 return dict(ret)
1368
1369
1370def sensors_fans():
1371 """Return hardware fans info (for CPU and other peripherals) as a
1372 dict including hardware label and current speed.
1373
1374 Implementation notes:
1375 - /sys/class/hwmon looks like the most recent interface to
1376 retrieve this info, and this implementation relies on it
1377 only (old distros will probably use something else)
1378 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1379 """
1380 ret = collections.defaultdict(list)
1381 basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
1382 if not basenames:
1383 # CentOS has an intermediate /device directory:
1384 # https://github.com/giampaolo/psutil/issues/971
1385 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')
1386
1387 basenames = sorted({x.split("_")[0] for x in basenames})
1388 for base in basenames:
1389 try:
1390 current = int(bcat(base + '_input'))
1391 except OSError as err:
1392 debug(err)
1393 continue
1394 unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip()
1395 label = cat(base + '_label', fallback='').strip()
1396 ret[unit_name].append(ntp.sfan(label, current))
1397
1398 return dict(ret)
1399
1400
1401def sensors_battery():
1402 """Return battery information.
1403 Implementation note: it appears /sys/class/power_supply/BAT0/
1404 directory structure may vary and provide files with the same
1405 meaning but under different names, see:
1406 https://github.com/giampaolo/psutil/issues/966.
1407 """
1408 null = object()
1409
1410 def multi_bcat(*paths):
1411 """Attempt to read the content of multiple files which may
1412 not exist. If none of them exist return None.
1413 """
1414 for path in paths:
1415 ret = bcat(path, fallback=null)
1416 if ret != null:
1417 try:
1418 return int(ret)
1419 except ValueError:
1420 return ret.strip()
1421 return None
1422
1423 bats = [
1424 x
1425 for x in os.listdir(POWER_SUPPLY_PATH)
1426 if x.startswith('BAT') or 'battery' in x.lower()
1427 ]
1428 if not bats:
1429 return None
1430 # Get the first available battery. Usually this is "BAT0", except
1431 # some rare exceptions:
1432 # https://github.com/giampaolo/psutil/issues/1238
1433 root = os.path.join(POWER_SUPPLY_PATH, min(bats))
1434
1435 # Base metrics.
1436 energy_now = multi_bcat(root + "/energy_now", root + "/charge_now")
1437 power_now = multi_bcat(root + "/power_now", root + "/current_now")
1438 energy_full = multi_bcat(root + "/energy_full", root + "/charge_full")
1439 time_to_empty = multi_bcat(root + "/time_to_empty_now")
1440
1441 # Percent. If we have energy_full the percentage will be more
1442 # accurate compared to reading /capacity file (float vs. int).
1443 if energy_full is not None and energy_now is not None:
1444 try:
1445 percent = 100.0 * energy_now / energy_full
1446 except ZeroDivisionError:
1447 percent = 0.0
1448 else:
1449 percent = int(cat(root + "/capacity", fallback=-1))
1450 if percent == -1:
1451 return None
1452
1453 # Is AC power cable plugged in?
1454 # Note: AC0 is not always available and sometimes (e.g. CentOS7)
1455 # it's called "AC".
1456 power_plugged = None
1457 online = multi_bcat(
1458 os.path.join(POWER_SUPPLY_PATH, "AC0/online"),
1459 os.path.join(POWER_SUPPLY_PATH, "AC/online"),
1460 )
1461 if online is not None:
1462 power_plugged = online == 1
1463 else:
1464 status = cat(root + "/status", fallback="").strip().lower()
1465 if status == "discharging":
1466 power_plugged = False
1467 elif status in {"charging", "full"}:
1468 power_plugged = True
1469
1470 # Seconds left.
1471 # Note to self: we may also calculate the charging ETA as per:
1472 # https://github.com/thialfihar/dotfiles/blob/
1473 # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55
1474 if power_plugged:
1475 secsleft = _common.POWER_TIME_UNLIMITED
1476 elif energy_now is not None and power_now is not None:
1477 try:
1478 secsleft = int(energy_now / abs(power_now) * 3600)
1479 except ZeroDivisionError:
1480 secsleft = _common.POWER_TIME_UNKNOWN
1481 elif time_to_empty is not None:
1482 secsleft = int(time_to_empty * 60)
1483 if secsleft < 0:
1484 secsleft = _common.POWER_TIME_UNKNOWN
1485 else:
1486 secsleft = _common.POWER_TIME_UNKNOWN
1487
1488 return ntp.sbattery(percent, secsleft, power_plugged)
1489
1490
1491# =====================================================================
1492# --- other system functions
1493# =====================================================================
1494
1495
1496def users():
1497 """Return currently connected users as a list of namedtuples."""
1498 retlist = []
1499 rawlist = cext.users()
1500 for item in rawlist:
1501 user, tty, hostname, tstamp, pid = item
1502 nt = ntp.suser(user, tty or None, hostname, tstamp, pid)
1503 retlist.append(nt)
1504 return retlist
1505
1506
1507def boot_time():
1508 """Return the system boot time expressed in seconds since the epoch."""
1509 path = f"{get_procfs_path()}/stat"
1510 with open_binary(path) as f:
1511 for line in f:
1512 if line.startswith(b'btime'):
1513 return float(line.strip().split()[1])
1514 msg = f"line 'btime' not found in {path}"
1515 raise RuntimeError(msg)
1516
1517
1518# =====================================================================
1519# --- processes
1520# =====================================================================
1521
1522
1523def pids():
1524 """Returns a list of PIDs currently running on the system."""
1525 path = get_procfs_path().encode(ENCODING)
1526 return [int(x) for x in os.listdir(path) if x.isdigit()]
1527
1528
1529def pid_exists(pid):
1530 """Check for the existence of a unix PID. Linux TIDs are not
1531 supported (always return False).
1532 """
1533 if not _psposix.pid_exists(pid):
1534 return False
1535 else:
1536 # Linux's apparently does not distinguish between PIDs and TIDs
1537 # (thread IDs).
1538 # listdir("/proc") won't show any TID (only PIDs) but
1539 # os.stat("/proc/{tid}") will succeed if {tid} exists.
1540 # os.kill() can also be passed a TID. This is quite confusing.
1541 # In here we want to enforce this distinction and support PIDs
1542 # only, see:
1543 # https://github.com/giampaolo/psutil/issues/687
1544 try:
1545 # Note: already checked that this is faster than using a
1546 # regular expr. Also (a lot) faster than doing
1547 # 'return pid in pids()'
1548 path = f"{get_procfs_path()}/{pid}/status"
1549 with open_binary(path) as f:
1550 for line in f:
1551 if line.startswith(b"Tgid:"):
1552 tgid = int(line.split()[1])
1553 # If tgid and pid are the same then we're
1554 # dealing with a process PID.
1555 return tgid == pid
1556 msg = f"'Tgid' line not found in {path}"
1557 raise ValueError(msg)
1558 except (OSError, ValueError):
1559 return pid in pids()
1560
1561
1562def ppid_map():
1563 """Obtain a {pid: ppid, ...} dict for all running processes in
1564 one shot. Used to speed up Process.children().
1565 """
1566 ret = {}
1567 procfs_path = get_procfs_path()
1568 for pid in pids():
1569 try:
1570 with open_binary(f"{procfs_path}/{pid}/stat") as f:
1571 data = f.read()
1572 except (FileNotFoundError, ProcessLookupError):
1573 pass
1574 except PermissionError as err:
1575 raise AccessDenied(pid) from err
1576 else:
1577 rpar = data.rfind(b')')
1578 dset = data[rpar + 2 :].split()
1579 ppid = int(dset[1])
1580 ret[pid] = ppid
1581 return ret
1582
1583
1584def wrap_exceptions(fun):
1585 """Decorator which translates bare OSError and OSError exceptions
1586 into NoSuchProcess and AccessDenied.
1587 """
1588
1589 @functools.wraps(fun)
1590 def wrapper(self, *args, **kwargs):
1591 pid, name = self.pid, self._name
1592 try:
1593 return fun(self, *args, **kwargs)
1594 except PermissionError as err:
1595 raise AccessDenied(pid, name) from err
1596 except ProcessLookupError as err:
1597 self._raise_if_zombie()
1598 raise NoSuchProcess(pid, name) from err
1599 except FileNotFoundError as err:
1600 self._raise_if_zombie()
1601 # /proc/PID directory may still exist, but the files within
1602 # it may not, indicating the process is gone, see:
1603 # https://github.com/giampaolo/psutil/issues/2418
1604 if not os.path.exists(f"{self._procfs_path}/{pid}/stat"):
1605 raise NoSuchProcess(pid, name) from err
1606 raise
1607
1608 return wrapper
1609
1610
1611class Process:
1612 """Linux process implementation."""
1613
1614 __slots__ = [
1615 "_cache",
1616 "_ctime",
1617 "_name",
1618 "_ppid",
1619 "_procfs_path",
1620 "pid",
1621 ]
1622
1623 def __init__(self, pid):
1624 self.pid = pid
1625 self._name = None
1626 self._ppid = None
1627 self._ctime = None
1628 self._procfs_path = get_procfs_path()
1629
1630 def _is_zombie(self):
1631 # Note: most of the times Linux is able to return info about the
1632 # process even if it's a zombie, and /proc/{pid} will exist.
1633 # There are some exceptions though, like exe(), cmdline() and
1634 # memory_maps(). In these cases /proc/{pid}/{file} exists but
1635 # it's empty. Instead of returning a "null" value we'll raise an
1636 # exception.
1637 try:
1638 data = bcat(f"{self._procfs_path}/{self.pid}/stat")
1639 except OSError:
1640 return False
1641 else:
1642 rpar = data.rfind(b')')
1643 status = data[rpar + 2 : rpar + 3]
1644 return status == b"Z"
1645
1646 def _raise_if_zombie(self):
1647 if self._is_zombie():
1648 raise ZombieProcess(self.pid, self._name, self._ppid)
1649
1650 def _raise_if_not_alive(self):
1651 """Raise NSP if the process disappeared on us."""
1652 # For those C function who do not raise NSP, possibly returning
1653 # incorrect or incomplete result.
1654 os.stat(f"{self._procfs_path}/{self.pid}")
1655
1656 def _readlink(self, path, fallback=UNSET):
1657 # * https://github.com/giampaolo/psutil/issues/503
1658 # os.readlink('/proc/pid/exe') may raise ESRCH (ProcessLookupError)
1659 # instead of ENOENT (FileNotFoundError) when it races.
1660 # * ENOENT may occur also if the path actually exists if PID is
1661 # a low PID (~0-20 range).
1662 # * https://github.com/giampaolo/psutil/issues/2514
1663 try:
1664 return readlink(path)
1665 except (FileNotFoundError, ProcessLookupError):
1666 if os.path.lexists(f"{self._procfs_path}/{self.pid}"):
1667 self._raise_if_zombie()
1668 if fallback is not UNSET:
1669 return fallback
1670 raise
1671
1672 @wrap_exceptions
1673 @memoize_when_activated
1674 def _parse_stat_file(self):
1675 """Parse /proc/{pid}/stat file and return a dict with various
1676 process info.
1677 Using "man proc" as a reference: where "man proc" refers to
1678 position N always subtract 3 (e.g ppid position 4 in
1679 'man proc' == position 1 in here).
1680 The return value is cached in case oneshot() ctx manager is
1681 in use.
1682 """
1683 data = bcat(f"{self._procfs_path}/{self.pid}/stat")
1684 # Process name is between parentheses. It can contain spaces and
1685 # other parentheses. This is taken into account by looking for
1686 # the first occurrence of "(" and the last occurrence of ")".
1687 rpar = data.rfind(b')')
1688 name = data[data.find(b'(') + 1 : rpar]
1689 fields = data[rpar + 2 :].split()
1690
1691 ret = {}
1692 ret['name'] = name
1693 ret['status'] = fields[0]
1694 ret['ppid'] = fields[1]
1695 ret['ttynr'] = fields[4]
1696 ret['utime'] = fields[11]
1697 ret['stime'] = fields[12]
1698 ret['children_utime'] = fields[13]
1699 ret['children_stime'] = fields[14]
1700 ret['create_time'] = fields[19]
1701 ret['cpu_num'] = fields[36]
1702 try:
1703 ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks'
1704 except IndexError:
1705 # https://github.com/giampaolo/psutil/issues/2455
1706 debug("can't get blkio_ticks, set iowait to 0")
1707 ret['blkio_ticks'] = 0
1708
1709 return ret
1710
1711 @wrap_exceptions
1712 @memoize_when_activated
1713 def _read_status_file(self):
1714 """Read /proc/{pid}/stat file and return its content.
1715 The return value is cached in case oneshot() ctx manager is
1716 in use.
1717 """
1718 with open_binary(f"{self._procfs_path}/{self.pid}/status") as f:
1719 return f.read()
1720
1721 @wrap_exceptions
1722 @memoize_when_activated
1723 def _read_smaps_file(self):
1724 with open_binary(f"{self._procfs_path}/{self.pid}/smaps") as f:
1725 return f.read().strip()
1726
1727 def oneshot_enter(self):
1728 self._parse_stat_file.cache_activate(self)
1729 self._read_status_file.cache_activate(self)
1730 self._read_smaps_file.cache_activate(self)
1731
1732 def oneshot_exit(self):
1733 self._parse_stat_file.cache_deactivate(self)
1734 self._read_status_file.cache_deactivate(self)
1735 self._read_smaps_file.cache_deactivate(self)
1736
1737 @wrap_exceptions
1738 def name(self):
1739 # XXX - gets changed later and probably needs refactoring
1740 return decode(self._parse_stat_file()['name'])
1741
1742 @wrap_exceptions
1743 def exe(self):
1744 return self._readlink(
1745 f"{self._procfs_path}/{self.pid}/exe", fallback=""
1746 )
1747
1748 @wrap_exceptions
1749 def cmdline(self):
1750 with open_text(f"{self._procfs_path}/{self.pid}/cmdline") as f:
1751 data = f.read()
1752 if not data:
1753 # may happen in case of zombie process
1754 self._raise_if_zombie()
1755 return []
1756 # 'man proc' states that args are separated by null bytes '\0'
1757 # and last char is supposed to be a null byte. Nevertheless
1758 # some processes may change their cmdline after being started
1759 # (via setproctitle() or similar), they are usually not
1760 # compliant with this rule and use spaces instead. Google
1761 # Chrome process is an example. See:
1762 # https://github.com/giampaolo/psutil/issues/1179
1763 sep = '\x00' if data.endswith('\x00') else ' '
1764 if data.endswith(sep):
1765 data = data[:-1]
1766 cmdline = data.split(sep)
1767 # Sometimes last char is a null byte '\0' but the args are
1768 # separated by spaces, see: https://github.com/giampaolo/psutil/
1769 # issues/1179#issuecomment-552984549
1770 if sep == '\x00' and len(cmdline) == 1 and ' ' in data:
1771 cmdline = data.split(' ')
1772 return cmdline
1773
1774 @wrap_exceptions
1775 def environ(self):
1776 with open_text(f"{self._procfs_path}/{self.pid}/environ") as f:
1777 data = f.read()
1778 return parse_environ_block(data)
1779
1780 @wrap_exceptions
1781 def terminal(self):
1782 tty_nr = int(self._parse_stat_file()['ttynr'])
1783 tmap = _psposix.get_terminal_map()
1784 try:
1785 return tmap[tty_nr]
1786 except KeyError:
1787 return None
1788
1789 # May not be available on old kernels.
1790 if os.path.exists(f"/proc/{os.getpid()}/io"):
1791
1792 @wrap_exceptions
1793 def io_counters(self):
1794 fname = f"{self._procfs_path}/{self.pid}/io"
1795 fields = {}
1796 with open_binary(fname) as f:
1797 for line in f:
1798 # https://github.com/giampaolo/psutil/issues/1004
1799 line = line.strip()
1800 if line:
1801 try:
1802 name, value = line.split(b': ')
1803 except ValueError:
1804 # https://github.com/giampaolo/psutil/issues/1004
1805 continue
1806 else:
1807 fields[name] = int(value)
1808 if not fields:
1809 msg = f"{fname} file was empty"
1810 raise RuntimeError(msg)
1811 try:
1812 return ntp.pio(
1813 fields[b'syscr'], # read syscalls
1814 fields[b'syscw'], # write syscalls
1815 fields[b'read_bytes'], # read bytes
1816 fields[b'write_bytes'], # write bytes
1817 fields[b'rchar'], # read chars
1818 fields[b'wchar'], # write chars
1819 )
1820 except KeyError as err:
1821 msg = (
1822 f"{err.args[0]!r} field was not found in {fname}; found"
1823 f" fields are {fields!r}"
1824 )
1825 raise ValueError(msg) from None
1826
1827 @wrap_exceptions
1828 def cpu_times(self):
1829 values = self._parse_stat_file()
1830 utime = float(values['utime']) / CLOCK_TICKS
1831 stime = float(values['stime']) / CLOCK_TICKS
1832 children_utime = float(values['children_utime']) / CLOCK_TICKS
1833 children_stime = float(values['children_stime']) / CLOCK_TICKS
1834 iowait = float(values['blkio_ticks']) / CLOCK_TICKS
1835 return ntp.pcputimes(
1836 utime, stime, children_utime, children_stime, iowait
1837 )
1838
1839 @wrap_exceptions
1840 def cpu_num(self):
1841 """What CPU the process is on."""
1842 return int(self._parse_stat_file()['cpu_num'])
1843
1844 @wrap_exceptions
1845 def wait(self, timeout=None):
1846 return _psposix.wait_pid(self.pid, timeout, self._name)
1847
1848 @wrap_exceptions
1849 def create_time(self, monotonic=False):
1850 # The 'starttime' field in /proc/[pid]/stat is expressed in
1851 # jiffies (clock ticks per second), a relative value which
1852 # represents the number of clock ticks that have passed since
1853 # the system booted until the process was created. It never
1854 # changes and is unaffected by system clock updates.
1855 if self._ctime is None:
1856 self._ctime = (
1857 float(self._parse_stat_file()['create_time']) / CLOCK_TICKS
1858 )
1859 if monotonic:
1860 return self._ctime
1861 # Add the boot time, returning time expressed in seconds since
1862 # the epoch. This is subject to system clock updates.
1863 return self._ctime + boot_time()
1864
1865 @wrap_exceptions
1866 def memory_info(self):
1867 # ============================================================
1868 # | FIELD | DESCRIPTION | AKA | TOP |
1869 # ============================================================
1870 # | rss | resident set size | | RES |
1871 # | vms | total program size | size | VIRT |
1872 # | shared | shared pages (from shared mappings) | | SHR |
1873 # | text | text ('code') | trs | CODE |
1874 # | lib | library (unused in Linux 2.6) | lrs | |
1875 # | data | data + stack | drs | DATA |
1876 # | dirty | dirty pages (unused in Linux 2.6) | dt | |
1877 # ============================================================
1878 with open_binary(f"{self._procfs_path}/{self.pid}/statm") as f:
1879 vms, rss, shared, text, lib, data, dirty = (
1880 int(x) * PAGESIZE for x in f.readline().split()[:7]
1881 )
1882 return ntp.pmem(rss, vms, shared, text, lib, data, dirty)
1883
1884 if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS:
1885
1886 def _parse_smaps_rollup(self):
1887 # /proc/pid/smaps_rollup was added to Linux in 2017. Faster
1888 # than /proc/pid/smaps. It reports higher PSS than */smaps
1889 # (from 1k up to 200k higher; tested against all processes).
1890 # IMPORTANT: /proc/pid/smaps_rollup is weird, because it
1891 # raises ESRCH / ENOENT for many PIDs, even if they're alive
1892 # (also as root). In that case we'll use /proc/pid/smaps as
1893 # fallback, which is slower but has a +50% success rate
1894 # compared to /proc/pid/smaps_rollup.
1895 uss = pss = swap = 0
1896 with open_binary(
1897 f"{self._procfs_path}/{self.pid}/smaps_rollup"
1898 ) as f:
1899 for line in f:
1900 if line.startswith(b"Private_"):
1901 # Private_Clean, Private_Dirty, Private_Hugetlb
1902 uss += int(line.split()[1]) * 1024
1903 elif line.startswith(b"Pss:"):
1904 pss = int(line.split()[1]) * 1024
1905 elif line.startswith(b"Swap:"):
1906 swap = int(line.split()[1]) * 1024
1907 return (uss, pss, swap)
1908
1909 @wrap_exceptions
1910 def _parse_smaps(
1911 self,
1912 # Gets Private_Clean, Private_Dirty, Private_Hugetlb.
1913 _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"),
1914 _pss_re=re.compile(br"\nPss\:\s+(\d+)"),
1915 _swap_re=re.compile(br"\nSwap\:\s+(\d+)"),
1916 ):
1917 # /proc/pid/smaps does not exist on kernels < 2.6.14 or if
1918 # CONFIG_MMU kernel configuration option is not enabled.
1919
1920 # Note: using 3 regexes is faster than reading the file
1921 # line by line.
1922 #
1923 # You might be tempted to calculate USS by subtracting
1924 # the "shared" value from the "resident" value in
1925 # /proc/<pid>/statm. But at least on Linux, statm's "shared"
1926 # value actually counts pages backed by files, which has
1927 # little to do with whether the pages are actually shared.
1928 # /proc/self/smaps on the other hand appears to give us the
1929 # correct information.
1930 smaps_data = self._read_smaps_file()
1931 # Note: smaps file can be empty for certain processes.
1932 # The code below will not crash though and will result to 0.
1933 uss = sum(map(int, _private_re.findall(smaps_data))) * 1024
1934 pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024
1935 swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024
1936 return (uss, pss, swap)
1937
1938 @wrap_exceptions
1939 def memory_full_info(self):
1940 if HAS_PROC_SMAPS_ROLLUP: # faster
1941 try:
1942 uss, pss, swap = self._parse_smaps_rollup()
1943 except (ProcessLookupError, FileNotFoundError):
1944 uss, pss, swap = self._parse_smaps()
1945 else:
1946 uss, pss, swap = self._parse_smaps()
1947 basic_mem = self.memory_info()
1948 return ntp.pfullmem(*basic_mem + (uss, pss, swap))
1949
1950 else:
1951 memory_full_info = memory_info
1952
1953 if HAS_PROC_SMAPS:
1954
1955 @wrap_exceptions
1956 def memory_maps(self):
1957 """Return process's mapped memory regions as a list of named
1958 tuples. Fields are explained in 'man proc'; here is an updated
1959 (Apr 2012) version: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/proc.txt?id=b76437579d1344b612cf1851ae610c636cec7db0.
1960
1961 /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if
1962 CONFIG_MMU kernel configuration option is not enabled.
1963 """
1964
1965 def get_blocks(lines, current_block):
1966 data = {}
1967 for line in lines:
1968 fields = line.split(None, 5)
1969 if not fields[0].endswith(b':'):
1970 # new block section
1971 yield (current_block.pop(), data)
1972 current_block.append(line)
1973 else:
1974 try:
1975 data[fields[0]] = int(fields[1]) * 1024
1976 except (ValueError, IndexError):
1977 if fields[0].startswith(b'VmFlags:'):
1978 # see issue #369
1979 continue
1980 msg = f"don't know how to interpret line {line!r}"
1981 raise ValueError(msg) from None
1982 yield (current_block.pop(), data)
1983
1984 data = self._read_smaps_file()
1985 # Note: smaps file can be empty for certain processes or for
1986 # zombies.
1987 if not data:
1988 self._raise_if_zombie()
1989 return []
1990 lines = data.split(b'\n')
1991 ls = []
1992 first_line = lines.pop(0)
1993 current_block = [first_line]
1994 for header, data in get_blocks(lines, current_block):
1995 hfields = header.split(None, 5)
1996 try:
1997 addr, perms, _offset, _dev, _inode, path = hfields
1998 except ValueError:
1999 addr, perms, _offset, _dev, _inode, path = hfields + ['']
2000 if not path:
2001 path = '[anon]'
2002 else:
2003 path = decode(path)
2004 path = path.strip()
2005 if path.endswith(' (deleted)') and not path_exists_strict(
2006 path
2007 ):
2008 path = path[:-10]
2009 item = (
2010 decode(addr),
2011 decode(perms),
2012 path,
2013 data.get(b'Rss:', 0),
2014 data.get(b'Size:', 0),
2015 data.get(b'Pss:', 0),
2016 data.get(b'Shared_Clean:', 0),
2017 data.get(b'Shared_Dirty:', 0),
2018 data.get(b'Private_Clean:', 0),
2019 data.get(b'Private_Dirty:', 0),
2020 data.get(b'Referenced:', 0),
2021 data.get(b'Anonymous:', 0),
2022 data.get(b'Swap:', 0),
2023 )
2024 ls.append(item)
2025 return ls
2026
2027 @wrap_exceptions
2028 def cwd(self):
2029 return self._readlink(
2030 f"{self._procfs_path}/{self.pid}/cwd", fallback=""
2031 )
2032
2033 @wrap_exceptions
2034 def num_ctx_switches(
2035 self, _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)')
2036 ):
2037 data = self._read_status_file()
2038 ctxsw = _ctxsw_re.findall(data)
2039 if not ctxsw:
2040 msg = (
2041 "'voluntary_ctxt_switches' and"
2042 " 'nonvoluntary_ctxt_switches'lines were not found in"
2043 f" {self._procfs_path}/{self.pid}/status; the kernel is"
2044 " probably older than 2.6.23"
2045 )
2046 raise NotImplementedError(msg)
2047 return ntp.pctxsw(int(ctxsw[0]), int(ctxsw[1]))
2048
2049 @wrap_exceptions
2050 def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')):
2051 # Using a re is faster than iterating over file line by line.
2052 data = self._read_status_file()
2053 return int(_num_threads_re.findall(data)[0])
2054
2055 @wrap_exceptions
2056 def threads(self):
2057 thread_ids = os.listdir(f"{self._procfs_path}/{self.pid}/task")
2058 thread_ids.sort()
2059 retlist = []
2060 hit_enoent = False
2061 for thread_id in thread_ids:
2062 fname = f"{self._procfs_path}/{self.pid}/task/{thread_id}/stat"
2063 try:
2064 with open_binary(fname) as f:
2065 st = f.read().strip()
2066 except (FileNotFoundError, ProcessLookupError):
2067 # no such file or directory or no such process;
2068 # it means thread disappeared on us
2069 hit_enoent = True
2070 continue
2071 # ignore the first two values ("pid (exe)")
2072 st = st[st.find(b')') + 2 :]
2073 values = st.split(b' ')
2074 utime = float(values[11]) / CLOCK_TICKS
2075 stime = float(values[12]) / CLOCK_TICKS
2076 ntuple = ntp.pthread(int(thread_id), utime, stime)
2077 retlist.append(ntuple)
2078 if hit_enoent:
2079 self._raise_if_not_alive()
2080 return retlist
2081
2082 @wrap_exceptions
2083 def nice_get(self):
2084 # with open_text(f"{self._procfs_path}/{self.pid}/stat") as f:
2085 # data = f.read()
2086 # return int(data.split()[18])
2087
2088 # Use C implementation
2089 return cext.proc_priority_get(self.pid)
2090
2091 @wrap_exceptions
2092 def nice_set(self, value):
2093 return cext.proc_priority_set(self.pid, value)
2094
2095 # starting from CentOS 6.
2096 if HAS_CPU_AFFINITY:
2097
2098 @wrap_exceptions
2099 def cpu_affinity_get(self):
2100 return cext.proc_cpu_affinity_get(self.pid)
2101
2102 def _get_eligible_cpus(
2103 self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")
2104 ):
2105 # See: https://github.com/giampaolo/psutil/issues/956
2106 data = self._read_status_file()
2107 match = _re.findall(data)
2108 if match:
2109 return list(range(int(match[0][0]), int(match[0][1]) + 1))
2110 else:
2111 return list(range(len(per_cpu_times())))
2112
2113 @wrap_exceptions
2114 def cpu_affinity_set(self, cpus):
2115 try:
2116 cext.proc_cpu_affinity_set(self.pid, cpus)
2117 except (OSError, ValueError) as err:
2118 if isinstance(err, ValueError) or err.errno == errno.EINVAL:
2119 eligible_cpus = self._get_eligible_cpus()
2120 all_cpus = tuple(range(len(per_cpu_times())))
2121 for cpu in cpus:
2122 if cpu not in all_cpus:
2123 msg = (
2124 f"invalid CPU {cpu!r}; choose between"
2125 f" {eligible_cpus!r}"
2126 )
2127 raise ValueError(msg) from None
2128 if cpu not in eligible_cpus:
2129 msg = (
2130 f"CPU number {cpu} is not eligible; choose"
2131 f" between {eligible_cpus}"
2132 )
2133 raise ValueError(msg) from err
2134 raise
2135
2136 # only starting from kernel 2.6.13
2137 if HAS_PROC_IO_PRIORITY:
2138
2139 @wrap_exceptions
2140 def ionice_get(self):
2141 ioclass, value = cext.proc_ioprio_get(self.pid)
2142 ioclass = IOPriority(ioclass)
2143 return ntp.pionice(ioclass, value)
2144
2145 @wrap_exceptions
2146 def ionice_set(self, ioclass, value):
2147 if value is None:
2148 value = 0
2149 if value and ioclass in {
2150 IOPriority.IOPRIO_CLASS_IDLE,
2151 IOPriority.IOPRIO_CLASS_NONE,
2152 }:
2153 msg = f"{ioclass!r} ioclass accepts no value"
2154 raise ValueError(msg)
2155 if value < 0 or value > 7:
2156 msg = "value not in 0-7 range"
2157 raise ValueError(msg)
2158 return cext.proc_ioprio_set(self.pid, ioclass, value)
2159
2160 if hasattr(resource, "prlimit"):
2161
2162 @wrap_exceptions
2163 def rlimit(self, resource_, limits=None):
2164 # If pid is 0 prlimit() applies to the calling process and
2165 # we don't want that. We should never get here though as
2166 # PID 0 is not supported on Linux.
2167 if self.pid == 0:
2168 msg = "can't use prlimit() against PID 0 process"
2169 raise ValueError(msg)
2170 try:
2171 if limits is None:
2172 # get
2173 return resource.prlimit(self.pid, resource_)
2174 else:
2175 # set
2176 if len(limits) != 2:
2177 msg = (
2178 "second argument must be a (soft, hard) "
2179 f"tuple, got {limits!r}"
2180 )
2181 raise ValueError(msg)
2182 resource.prlimit(self.pid, resource_, limits)
2183 except OSError as err:
2184 if err.errno == errno.ENOSYS:
2185 # I saw this happening on Travis:
2186 # https://travis-ci.org/giampaolo/psutil/jobs/51368273
2187 self._raise_if_zombie()
2188 raise
2189
2190 @wrap_exceptions
2191 def status(self):
2192 letter = self._parse_stat_file()['status']
2193 letter = letter.decode()
2194 # XXX is '?' legit? (we're not supposed to return it anyway)
2195 return PROC_STATUSES.get(letter, '?')
2196
2197 @wrap_exceptions
2198 def open_files(self):
2199 retlist = []
2200 files = os.listdir(f"{self._procfs_path}/{self.pid}/fd")
2201 hit_enoent = False
2202 for fd in files:
2203 file = f"{self._procfs_path}/{self.pid}/fd/{fd}"
2204 try:
2205 path = readlink(file)
2206 except (FileNotFoundError, ProcessLookupError):
2207 # ENOENT == file which is gone in the meantime
2208 hit_enoent = True
2209 continue
2210 except OSError as err:
2211 if err.errno == errno.EINVAL:
2212 # not a link
2213 continue
2214 if err.errno == errno.ENAMETOOLONG:
2215 # file name too long
2216 debug(err)
2217 continue
2218 raise
2219 else:
2220 # If path is not an absolute there's no way to tell
2221 # whether it's a regular file or not, so we skip it.
2222 # A regular file is always supposed to be have an
2223 # absolute path though.
2224 if path.startswith('/') and isfile_strict(path):
2225 # Get file position and flags.
2226 file = f"{self._procfs_path}/{self.pid}/fdinfo/{fd}"
2227 try:
2228 with open_binary(file) as f:
2229 pos = int(f.readline().split()[1])
2230 flags = int(f.readline().split()[1], 8)
2231 except (FileNotFoundError, ProcessLookupError):
2232 # fd gone in the meantime; process may
2233 # still be alive
2234 hit_enoent = True
2235 else:
2236 mode = file_flags_to_mode(flags)
2237 ntuple = ntp.popenfile(
2238 path, int(fd), int(pos), mode, flags
2239 )
2240 retlist.append(ntuple)
2241 if hit_enoent:
2242 self._raise_if_not_alive()
2243 return retlist
2244
2245 @wrap_exceptions
2246 def net_connections(self, kind='inet'):
2247 ret = _net_connections.retrieve(kind, self.pid)
2248 self._raise_if_not_alive()
2249 return ret
2250
2251 @wrap_exceptions
2252 def num_fds(self):
2253 return len(os.listdir(f"{self._procfs_path}/{self.pid}/fd"))
2254
2255 @wrap_exceptions
2256 def ppid(self):
2257 return int(self._parse_stat_file()['ppid'])
2258
2259 @wrap_exceptions
2260 def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')):
2261 data = self._read_status_file()
2262 real, effective, saved = _uids_re.findall(data)[0]
2263 return ntp.puids(int(real), int(effective), int(saved))
2264
2265 @wrap_exceptions
2266 def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')):
2267 data = self._read_status_file()
2268 real, effective, saved = _gids_re.findall(data)[0]
2269 return ntp.pgids(int(real), int(effective), int(saved))