1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
4
5"""Linux platform implementation."""
6
7import base64
8import collections
9import enum
10import errno
11import functools
12import glob
13import os
14import re
15import resource
16import socket
17import struct
18import sys
19import warnings
20from collections import defaultdict
21from collections import namedtuple
22
23from . import _common
24from . import _ntuples as ntp
25from . import _psposix
26from . import _psutil_linux as cext
27from ._common import ENCODING
28from ._common import NIC_DUPLEX_FULL
29from ._common import NIC_DUPLEX_HALF
30from ._common import NIC_DUPLEX_UNKNOWN
31from ._common import AccessDenied
32from ._common import NoSuchProcess
33from ._common import ZombieProcess
34from ._common import bcat
35from ._common import cat
36from ._common import debug
37from ._common import decode
38from ._common import get_procfs_path
39from ._common import isfile_strict
40from ._common import memoize
41from ._common import memoize_when_activated
42from ._common import open_binary
43from ._common import open_text
44from ._common import parse_environ_block
45from ._common import path_exists_strict
46from ._common import supports_ipv6
47from ._common import usage_percent
48
49# fmt: off
50__extra__all__ = [
51 'PROCFS_PATH',
52 # io prio constants
53 "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
54 "IOPRIO_CLASS_IDLE",
55 # connection status constants
56 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
57 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
58 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING",
59]
60# fmt: on
61
62
63# =====================================================================
64# --- globals
65# =====================================================================
66
67
68POWER_SUPPLY_PATH = "/sys/class/power_supply"
69HAS_PROC_SMAPS = os.path.exists(f"/proc/{os.getpid()}/smaps")
70HAS_PROC_SMAPS_ROLLUP = os.path.exists(f"/proc/{os.getpid()}/smaps_rollup")
71HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get")
72HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get")
73
74# Number of clock ticks per second
75CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
76PAGESIZE = cext.getpagesize()
77LITTLE_ENDIAN = sys.byteorder == 'little'
78UNSET = object()
79
80# "man iostat" states that sectors are equivalent with blocks and have
81# a size of 512 bytes. Despite this value can be queried at runtime
82# via /sys/block/{DISK}/queue/hw_sector_size and results may vary
83# between 1k, 2k, or 4k... 512 appears to be a magic constant used
84# throughout Linux source code:
85# * https://stackoverflow.com/a/38136179/376587
86# * https://lists.gt.net/linux/kernel/2241060
87# * https://github.com/giampaolo/psutil/issues/1305
88# * https://github.com/torvalds/linux/blob/
89# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99
90# * https://lkml.org/lkml/2015/8/17/234
91DISK_SECTOR_SIZE = 512
92
93AddressFamily = enum.IntEnum(
94 'AddressFamily', {'AF_LINK': int(socket.AF_PACKET)}
95)
96AF_LINK = AddressFamily.AF_LINK
97
98
99# ioprio_* constants http://linux.die.net/man/2/ioprio_get
100class IOPriority(enum.IntEnum):
101 IOPRIO_CLASS_NONE = 0
102 IOPRIO_CLASS_RT = 1
103 IOPRIO_CLASS_BE = 2
104 IOPRIO_CLASS_IDLE = 3
105
106
107globals().update(IOPriority.__members__)
108
109# See:
110# https://github.com/torvalds/linux/blame/master/fs/proc/array.c
111# ...and (TASK_* constants):
112# https://github.com/torvalds/linux/blob/master/include/linux/sched.h
113PROC_STATUSES = {
114 "R": _common.STATUS_RUNNING,
115 "S": _common.STATUS_SLEEPING,
116 "D": _common.STATUS_DISK_SLEEP,
117 "T": _common.STATUS_STOPPED,
118 "t": _common.STATUS_TRACING_STOP,
119 "Z": _common.STATUS_ZOMBIE,
120 "X": _common.STATUS_DEAD,
121 "x": _common.STATUS_DEAD,
122 "K": _common.STATUS_WAKE_KILL,
123 "W": _common.STATUS_WAKING,
124 "I": _common.STATUS_IDLE,
125 "P": _common.STATUS_PARKED,
126}
127
128# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h
129TCP_STATUSES = {
130 "01": _common.CONN_ESTABLISHED,
131 "02": _common.CONN_SYN_SENT,
132 "03": _common.CONN_SYN_RECV,
133 "04": _common.CONN_FIN_WAIT1,
134 "05": _common.CONN_FIN_WAIT2,
135 "06": _common.CONN_TIME_WAIT,
136 "07": _common.CONN_CLOSE,
137 "08": _common.CONN_CLOSE_WAIT,
138 "09": _common.CONN_LAST_ACK,
139 "0A": _common.CONN_LISTEN,
140 "0B": _common.CONN_CLOSING,
141}
142
143
144# =====================================================================
145# --- utils
146# =====================================================================
147
148
149def readlink(path):
150 """Wrapper around os.readlink()."""
151 assert isinstance(path, str), path
152 path = os.readlink(path)
153 # readlink() might return paths containing null bytes ('\x00')
154 # resulting in "TypeError: must be encoded string without NULL
155 # bytes, not str" errors when the string is passed to other
156 # fs-related functions (os.*, open(), ...).
157 # Apparently everything after '\x00' is garbage (we can have
158 # ' (deleted)', 'new' and possibly others), see:
159 # https://github.com/giampaolo/psutil/issues/717
160 path = path.split('\x00')[0]
161 # Certain paths have ' (deleted)' appended. Usually this is
162 # bogus as the file actually exists. Even if it doesn't we
163 # don't care.
164 if path.endswith(' (deleted)') and not path_exists_strict(path):
165 path = path[:-10]
166 return path
167
168
169def file_flags_to_mode(flags):
170 """Convert file's open() flags into a readable string.
171 Used by Process.open_files().
172 """
173 modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
174 mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
175 if flags & os.O_APPEND:
176 mode = mode.replace('w', 'a', 1)
177 mode = mode.replace('w+', 'r+')
178 # possible values: r, w, a, r+, a+
179 return mode
180
181
182def is_storage_device(name):
183 """Return True if the given name refers to a root device (e.g.
184 "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1",
185 "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram")
186 return True.
187 """
188 # Re-adapted from iostat source code, see:
189 # https://github.com/sysstat/sysstat/blob/
190 # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208
191 # Some devices may have a slash in their name (e.g. cciss/c0d0...).
192 name = name.replace('/', '!')
193 including_virtual = True
194 if including_virtual:
195 path = f"/sys/block/{name}"
196 else:
197 path = f"/sys/block/{name}/device"
198 return os.access(path, os.F_OK)
199
200
201@memoize
202def _scputimes_ntuple(procfs_path):
203 """Return a namedtuple of variable fields depending on the CPU times
204 available on this Linux kernel version which may be:
205 (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
206 [guest_nice]]])
207 Used by cpu_times() function.
208 """
209 with open_binary(f"{procfs_path}/stat") as f:
210 values = f.readline().split()[1:]
211 fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
212 vlen = len(values)
213 if vlen >= 8:
214 # Linux >= 2.6.11
215 fields.append('steal')
216 if vlen >= 9:
217 # Linux >= 2.6.24
218 fields.append('guest')
219 if vlen >= 10:
220 # Linux >= 3.2.0
221 fields.append('guest_nice')
222 return namedtuple('scputimes', fields)
223
224
225# Set it into _ntuples.py namespace.
226try:
227 ntp.scputimes = _scputimes_ntuple("/proc")
228except Exception as err: # noqa: BLE001
229 # Don't want to crash at import time.
230 debug(f"ignoring exception on import: {err!r}")
231 ntp.scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0)
232
233# XXX: must be available also at this module level in order to be
234# serialized (tests/test_misc.py::TestMisc::test_serialization).
235scputimes = ntp.scputimes
236
237
238# =====================================================================
239# --- system memory
240# =====================================================================
241
242
243def calculate_avail_vmem(mems):
244 """Fallback for kernels < 3.14 where /proc/meminfo does not provide
245 "MemAvailable", see:
246 https://blog.famzah.net/2014/09/24/.
247
248 This code reimplements the algorithm outlined here:
249 https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
250 commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
251
252 We use this function also when "MemAvailable" returns 0 (possibly a
253 kernel bug, see: https://github.com/giampaolo/psutil/issues/1915).
254 In that case this routine matches "free" CLI tool result ("available"
255 column).
256
257 XXX: on recent kernels this calculation may differ by ~1.5% compared
258 to "MemAvailable:", as it's calculated slightly differently.
259 It is still way more realistic than doing (free + cached) though.
260 See:
261 * https://gitlab.com/procps-ng/procps/issues/42
262 * https://github.com/famzah/linux-memavailable-procfs/issues/2
263 """
264 # Note about "fallback" value. According to:
265 # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
266 # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
267 # ...long ago "available" memory was calculated as (free + cached),
268 # We use fallback when one of these is missing from /proc/meminfo:
269 # "Active(file)": introduced in 2.6.28 / Dec 2008
270 # "Inactive(file)": introduced in 2.6.28 / Dec 2008
271 # "SReclaimable": introduced in 2.6.19 / Nov 2006
272 # /proc/zoneinfo: introduced in 2.6.13 / Aug 2005
273 free = mems[b'MemFree:']
274 fallback = free + mems.get(b"Cached:", 0)
275 try:
276 lru_active_file = mems[b'Active(file):']
277 lru_inactive_file = mems[b'Inactive(file):']
278 slab_reclaimable = mems[b'SReclaimable:']
279 except KeyError as err:
280 debug(
281 f"{err.args[0]} is missing from /proc/meminfo; using an"
282 " approximation for calculating available memory"
283 )
284 return fallback
285 try:
286 f = open_binary(f"{get_procfs_path()}/zoneinfo")
287 except OSError:
288 return fallback # kernel 2.6.13
289
290 watermark_low = 0
291 with f:
292 for line in f:
293 line = line.strip()
294 if line.startswith(b'low'):
295 watermark_low += int(line.split()[1])
296 watermark_low *= PAGESIZE
297
298 avail = free - watermark_low
299 pagecache = lru_active_file + lru_inactive_file
300 pagecache -= min(pagecache / 2, watermark_low)
301 avail += pagecache
302 avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low)
303 return int(avail)
304
305
306def virtual_memory():
307 """Report virtual memory stats.
308 This implementation mimics procps-ng-3.3.12, aka "free" CLI tool:
309 https://gitlab.com/procps-ng/procps/blob/
310 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L778-791
311 The returned values are supposed to match both "free" and "vmstat -s"
312 CLI tools.
313 """
314 missing_fields = []
315 mems = {}
316 with open_binary(f"{get_procfs_path()}/meminfo") as f:
317 for line in f:
318 fields = line.split()
319 mems[fields[0]] = int(fields[1]) * 1024
320
321 # /proc doc states that the available fields in /proc/meminfo vary
322 # by architecture and compile options, but these 3 values are also
323 # returned by sysinfo(2); as such we assume they are always there.
324 total = mems[b'MemTotal:']
325 free = mems[b'MemFree:']
326 try:
327 buffers = mems[b'Buffers:']
328 except KeyError:
329 # https://github.com/giampaolo/psutil/issues/1010
330 buffers = 0
331 missing_fields.append('buffers')
332 try:
333 cached = mems[b"Cached:"]
334 except KeyError:
335 cached = 0
336 missing_fields.append('cached')
337 else:
338 # "free" cmdline utility sums reclaimable to cached.
339 # Older versions of procps used to add slab memory instead.
340 # This got changed in:
341 # https://gitlab.com/procps-ng/procps/commit/
342 # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
343 cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19
344
345 try:
346 shared = mems[b'Shmem:'] # since kernel 2.6.32
347 except KeyError:
348 try:
349 shared = mems[b'MemShared:'] # kernels 2.4
350 except KeyError:
351 shared = 0
352 missing_fields.append('shared')
353
354 try:
355 active = mems[b"Active:"]
356 except KeyError:
357 active = 0
358 missing_fields.append('active')
359
360 try:
361 inactive = mems[b"Inactive:"]
362 except KeyError:
363 try:
364 inactive = (
365 mems[b"Inact_dirty:"]
366 + mems[b"Inact_clean:"]
367 + mems[b"Inact_laundry:"]
368 )
369 except KeyError:
370 inactive = 0
371 missing_fields.append('inactive')
372
373 try:
374 slab = mems[b"Slab:"]
375 except KeyError:
376 slab = 0
377
378 # - starting from 4.4.0 we match free's "available" column.
379 # Before 4.4.0 we calculated it as (free + buffers + cached)
380 # which matched htop.
381 # - free and htop available memory differs as per:
382 # http://askubuntu.com/a/369589
383 # http://unix.stackexchange.com/a/65852/168884
384 # - MemAvailable has been introduced in kernel 3.14
385 try:
386 avail = mems[b'MemAvailable:']
387 except KeyError:
388 avail = calculate_avail_vmem(mems)
389 else:
390 if avail == 0:
391 # Yes, it can happen (probably a kernel bug):
392 # https://github.com/giampaolo/psutil/issues/1915
393 # In this case "free" CLI tool makes an estimate. We do the same,
394 # and it matches "free" CLI tool.
395 avail = calculate_avail_vmem(mems)
396
397 if avail < 0:
398 avail = 0
399 missing_fields.append('available')
400 elif avail > total:
401 # If avail is greater than total or our calculation overflows,
402 # that's symptomatic of running within a LCX container where such
403 # values will be dramatically distorted over those of the host.
404 # https://gitlab.com/procps-ng/procps/blob/
405 # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764
406 avail = free
407
408 used = total - avail
409
410 percent = usage_percent((total - avail), total, round_=1)
411
412 # Warn about missing metrics which are set to 0.
413 if missing_fields:
414 msg = "{} memory stats couldn't be determined and {} set to 0".format(
415 ", ".join(missing_fields),
416 "was" if len(missing_fields) == 1 else "were",
417 )
418 warnings.warn(msg, RuntimeWarning, stacklevel=2)
419
420 return ntp.svmem(
421 total,
422 avail,
423 percent,
424 used,
425 free,
426 active,
427 inactive,
428 buffers,
429 cached,
430 shared,
431 slab,
432 )
433
434
435def swap_memory():
436 """Return swap memory metrics."""
437 mems = {}
438 with open_binary(f"{get_procfs_path()}/meminfo") as f:
439 for line in f:
440 fields = line.split()
441 mems[fields[0]] = int(fields[1]) * 1024
442 # We prefer /proc/meminfo over sysinfo() syscall so that
443 # psutil.PROCFS_PATH can be used in order to allow retrieval
444 # for linux containers, see:
445 # https://github.com/giampaolo/psutil/issues/1015
446 try:
447 total = mems[b'SwapTotal:']
448 free = mems[b'SwapFree:']
449 except KeyError:
450 _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo()
451 total *= unit_multiplier
452 free *= unit_multiplier
453
454 used = total - free
455 percent = usage_percent(used, total, round_=1)
456 # get pgin/pgouts
457 try:
458 f = open_binary(f"{get_procfs_path()}/vmstat")
459 except OSError as err:
460 # see https://github.com/giampaolo/psutil/issues/722
461 msg = (
462 "'sin' and 'sout' swap memory stats couldn't "
463 f"be determined and were set to 0 ({err})"
464 )
465 warnings.warn(msg, RuntimeWarning, stacklevel=2)
466 sin = sout = 0
467 else:
468 with f:
469 sin = sout = None
470 for line in f:
471 # values are expressed in 4 kilo bytes, we want
472 # bytes instead
473 if line.startswith(b'pswpin'):
474 sin = int(line.split(b' ')[1]) * 4 * 1024
475 elif line.startswith(b'pswpout'):
476 sout = int(line.split(b' ')[1]) * 4 * 1024
477 if sin is not None and sout is not None:
478 break
479 else:
480 # we might get here when dealing with exotic Linux
481 # flavors, see:
482 # https://github.com/giampaolo/psutil/issues/313
483 msg = "'sin' and 'sout' swap memory stats couldn't "
484 msg += "be determined and were set to 0"
485 warnings.warn(msg, RuntimeWarning, stacklevel=2)
486 sin = sout = 0
487 return ntp.sswap(total, used, free, percent, sin, sout)
488
489
490# =====================================================================
491# --- CPU
492# =====================================================================
493
494
495def cpu_times():
496 """Return a named tuple representing the following system-wide
497 CPU times:
498 (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
499 [guest_nice]]])
500 Last 3 fields may not be available on all Linux kernel versions.
501 """
502 procfs_path = get_procfs_path()
503 with open_binary(f"{procfs_path}/stat") as f:
504 values = f.readline().split()
505 fields = values[1 : len(ntp.scputimes._fields) + 1]
506 fields = [float(x) / CLOCK_TICKS for x in fields]
507 return ntp.scputimes(*fields)
508
509
510def per_cpu_times():
511 """Return a list of namedtuple representing the CPU times
512 for every CPU available on the system.
513 """
514 procfs_path = get_procfs_path()
515 cpus = []
516 with open_binary(f"{procfs_path}/stat") as f:
517 # get rid of the first line which refers to system wide CPU stats
518 f.readline()
519 for line in f:
520 if line.startswith(b'cpu'):
521 values = line.split()
522 fields = values[1 : len(ntp.scputimes._fields) + 1]
523 fields = [float(x) / CLOCK_TICKS for x in fields]
524 entry = ntp.scputimes(*fields)
525 cpus.append(entry)
526 return cpus
527
528
529def cpu_count_logical():
530 """Return the number of logical CPUs in the system."""
531 try:
532 return os.sysconf("SC_NPROCESSORS_ONLN")
533 except ValueError:
534 # as a second fallback we try to parse /proc/cpuinfo
535 num = 0
536 with open_binary(f"{get_procfs_path()}/cpuinfo") as f:
537 for line in f:
538 if line.lower().startswith(b'processor'):
539 num += 1
540
541 # unknown format (e.g. amrel/sparc architectures), see:
542 # https://github.com/giampaolo/psutil/issues/200
543 # try to parse /proc/stat as a last resort
544 if num == 0:
545 search = re.compile(r'cpu\d')
546 with open_text(f"{get_procfs_path()}/stat") as f:
547 for line in f:
548 line = line.split(' ')[0]
549 if search.match(line):
550 num += 1
551
552 if num == 0:
553 # mimic os.cpu_count()
554 return None
555 return num
556
557
558def cpu_count_cores():
559 """Return the number of CPU cores in the system."""
560 # Method #1
561 ls = set()
562 # These 2 files are the same but */core_cpus_list is newer while
563 # */thread_siblings_list is deprecated and may disappear in the future.
564 # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst
565 # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964
566 # https://lkml.org/lkml/2019/2/26/41
567 p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list"
568 p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"
569 for path in glob.glob(p1) or glob.glob(p2):
570 with open_binary(path) as f:
571 ls.add(f.read().strip())
572 result = len(ls)
573 if result != 0:
574 return result
575
576 # Method #2
577 mapping = {}
578 current_info = {}
579 with open_binary(f"{get_procfs_path()}/cpuinfo") as f:
580 for line in f:
581 line = line.strip().lower()
582 if not line:
583 # new section
584 try:
585 mapping[current_info[b'physical id']] = current_info[
586 b'cpu cores'
587 ]
588 except KeyError:
589 pass
590 current_info = {}
591 elif line.startswith((b'physical id', b'cpu cores')):
592 # ongoing section
593 key, value = line.split(b'\t:', 1)
594 current_info[key] = int(value)
595
596 result = sum(mapping.values())
597 return result or None # mimic os.cpu_count()
598
599
600def cpu_stats():
601 """Return various CPU stats as a named tuple."""
602 with open_binary(f"{get_procfs_path()}/stat") as f:
603 ctx_switches = None
604 interrupts = None
605 soft_interrupts = None
606 for line in f:
607 if line.startswith(b'ctxt'):
608 ctx_switches = int(line.split()[1])
609 elif line.startswith(b'intr'):
610 interrupts = int(line.split()[1])
611 elif line.startswith(b'softirq'):
612 soft_interrupts = int(line.split()[1])
613 if (
614 ctx_switches is not None
615 and soft_interrupts is not None
616 and interrupts is not None
617 ):
618 break
619 syscalls = 0
620 return ntp.scpustats(ctx_switches, interrupts, soft_interrupts, syscalls)
621
622
623def _cpu_get_cpuinfo_freq():
624 """Return current CPU frequency from cpuinfo if available."""
625 with open_binary(f"{get_procfs_path()}/cpuinfo") as f:
626 return [
627 float(line.split(b':', 1)[1])
628 for line in f
629 if line.lower().startswith(b'cpu mhz')
630 ]
631
632
633if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or os.path.exists(
634 "/sys/devices/system/cpu/cpu0/cpufreq"
635):
636
637 def cpu_freq():
638 """Return frequency metrics for all CPUs.
639 Contrarily to other OSes, Linux updates these values in
640 real-time.
641 """
642 cpuinfo_freqs = _cpu_get_cpuinfo_freq()
643 paths = glob.glob(
644 "/sys/devices/system/cpu/cpufreq/policy[0-9]*"
645 ) or glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq")
646 paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group()))
647 ret = []
648 pjoin = os.path.join
649 for i, path in enumerate(paths):
650 if len(paths) == len(cpuinfo_freqs):
651 # take cached value from cpuinfo if available, see:
652 # https://github.com/giampaolo/psutil/issues/1851
653 curr = cpuinfo_freqs[i] * 1000
654 else:
655 curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None)
656 if curr is None:
657 # Likely an old RedHat, see:
658 # https://github.com/giampaolo/psutil/issues/1071
659 curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None)
660 if curr is None:
661 online_path = f"/sys/devices/system/cpu/cpu{i}/online"
662 # if cpu core is offline, set to all zeroes
663 if cat(online_path, fallback=None) == "0\n":
664 ret.append(ntp.scpufreq(0.0, 0.0, 0.0))
665 continue
666 msg = "can't find current frequency file"
667 raise NotImplementedError(msg)
668 curr = int(curr) / 1000
669 max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000
670 min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000
671 ret.append(ntp.scpufreq(curr, min_, max_))
672 return ret
673
674else:
675
676 def cpu_freq():
677 """Alternate implementation using /proc/cpuinfo.
678 min and max frequencies are not available and are set to None.
679 """
680 return [ntp.scpufreq(x, 0.0, 0.0) for x in _cpu_get_cpuinfo_freq()]
681
682
683# =====================================================================
684# --- network
685# =====================================================================
686
687
688net_if_addrs = cext.net_if_addrs
689
690
691class _Ipv6UnsupportedError(Exception):
692 pass
693
694
695class NetConnections:
696 """A wrapper on top of /proc/net/* files, retrieving per-process
697 and system-wide open connections (TCP, UDP, UNIX) similarly to
698 "netstat -an".
699
700 Note: in case of UNIX sockets we're only able to determine the
701 local endpoint/path, not the one it's connected to.
702 According to [1] it would be possible but not easily.
703
704 [1] http://serverfault.com/a/417946
705 """
706
707 def __init__(self):
708 # The string represents the basename of the corresponding
709 # /proc/net/{proto_name} file.
710 tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
711 tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
712 udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
713 udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
714 unix = ("unix", socket.AF_UNIX, None)
715 self.tmap = {
716 "all": (tcp4, tcp6, udp4, udp6, unix),
717 "tcp": (tcp4, tcp6),
718 "tcp4": (tcp4,),
719 "tcp6": (tcp6,),
720 "udp": (udp4, udp6),
721 "udp4": (udp4,),
722 "udp6": (udp6,),
723 "unix": (unix,),
724 "inet": (tcp4, tcp6, udp4, udp6),
725 "inet4": (tcp4, udp4),
726 "inet6": (tcp6, udp6),
727 }
728 self._procfs_path = None
729
730 def get_proc_inodes(self, pid):
731 inodes = defaultdict(list)
732 for fd in os.listdir(f"{self._procfs_path}/{pid}/fd"):
733 try:
734 inode = readlink(f"{self._procfs_path}/{pid}/fd/{fd}")
735 except (FileNotFoundError, ProcessLookupError):
736 # ENOENT == file which is gone in the meantime;
737 # os.stat(f"/proc/{self.pid}") will be done later
738 # to force NSP (if it's the case)
739 continue
740 except OSError as err:
741 if err.errno == errno.EINVAL:
742 # not a link
743 continue
744 if err.errno == errno.ENAMETOOLONG:
745 # file name too long
746 debug(err)
747 continue
748 raise
749 else:
750 if inode.startswith('socket:['):
751 # the process is using a socket
752 inode = inode[8:][:-1]
753 inodes[inode].append((pid, int(fd)))
754 return inodes
755
756 def get_all_inodes(self):
757 inodes = {}
758 for pid in pids():
759 try:
760 inodes.update(self.get_proc_inodes(pid))
761 except (FileNotFoundError, ProcessLookupError, PermissionError):
762 # os.listdir() is gonna raise a lot of access denied
763 # exceptions in case of unprivileged user; that's fine
764 # as we'll just end up returning a connection with PID
765 # and fd set to None anyway.
766 # Both netstat -an and lsof does the same so it's
767 # unlikely we can do any better.
768 # ENOENT just means a PID disappeared on us.
769 continue
770 return inodes
771
772 @staticmethod
773 def decode_address(addr, family):
774 """Accept an "ip:port" address as displayed in /proc/net/*
775 and convert it into a human readable form, like:
776
777 "0500000A:0016" -> ("10.0.0.5", 22)
778 "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
779
780 The IP address portion is a little or big endian four-byte
781 hexadecimal number; that is, the least significant byte is listed
782 first, so we need to reverse the order of the bytes to convert it
783 to an IP address.
784 The port is represented as a two-byte hexadecimal number.
785
786 Reference:
787 http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
788 """
789 ip, port = addr.split(':')
790 port = int(port, 16)
791 # this usually refers to a local socket in listen mode with
792 # no end-points connected
793 if not port:
794 return ()
795 ip = ip.encode('ascii')
796 if family == socket.AF_INET:
797 # see: https://github.com/giampaolo/psutil/issues/201
798 if LITTLE_ENDIAN:
799 ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
800 else:
801 ip = socket.inet_ntop(family, base64.b16decode(ip))
802 else: # IPv6
803 ip = base64.b16decode(ip)
804 try:
805 # see: https://github.com/giampaolo/psutil/issues/201
806 if LITTLE_ENDIAN:
807 ip = socket.inet_ntop(
808 socket.AF_INET6,
809 struct.pack('>4I', *struct.unpack('<4I', ip)),
810 )
811 else:
812 ip = socket.inet_ntop(
813 socket.AF_INET6,
814 struct.pack('<4I', *struct.unpack('<4I', ip)),
815 )
816 except ValueError:
817 # see: https://github.com/giampaolo/psutil/issues/623
818 if not supports_ipv6():
819 raise _Ipv6UnsupportedError from None
820 raise
821 return ntp.addr(ip, port)
822
823 @staticmethod
824 def process_inet(file, family, type_, inodes, filter_pid=None):
825 """Parse /proc/net/tcp* and /proc/net/udp* files."""
826 if file.endswith('6') and not os.path.exists(file):
827 # IPv6 not supported
828 return
829 with open_text(file) as f:
830 f.readline() # skip the first line
831 for lineno, line in enumerate(f, 1):
832 try:
833 _, laddr, raddr, status, _, _, _, _, _, inode = (
834 line.split()[:10]
835 )
836 except ValueError:
837 msg = (
838 f"error while parsing {file}; malformed line"
839 f" {lineno} {line!r}"
840 )
841 raise RuntimeError(msg) from None
842 if inode in inodes:
843 # # We assume inet sockets are unique, so we error
844 # # out if there are multiple references to the
845 # # same inode. We won't do this for UNIX sockets.
846 # if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
847 # raise ValueError("ambiguous inode with multiple "
848 # "PIDs references")
849 pid, fd = inodes[inode][0]
850 else:
851 pid, fd = None, -1
852 if filter_pid is not None and filter_pid != pid:
853 continue
854 else:
855 if type_ == socket.SOCK_STREAM:
856 status = TCP_STATUSES[status]
857 else:
858 status = _common.CONN_NONE
859 try:
860 laddr = NetConnections.decode_address(laddr, family)
861 raddr = NetConnections.decode_address(raddr, family)
862 except _Ipv6UnsupportedError:
863 continue
864 yield (fd, family, type_, laddr, raddr, status, pid)
865
866 @staticmethod
867 def process_unix(file, family, inodes, filter_pid=None):
868 """Parse /proc/net/unix files."""
869 with open_text(file) as f:
870 f.readline() # skip the first line
871 for line in f:
872 tokens = line.split()
873 try:
874 _, _, _, _, type_, _, inode = tokens[0:7]
875 except ValueError:
876 if ' ' not in line:
877 # see: https://github.com/giampaolo/psutil/issues/766
878 continue
879 msg = (
880 f"error while parsing {file}; malformed line {line!r}"
881 )
882 raise RuntimeError(msg) # noqa: B904
883 if inode in inodes: # noqa: SIM108
884 # With UNIX sockets we can have a single inode
885 # referencing many file descriptors.
886 pairs = inodes[inode]
887 else:
888 pairs = [(None, -1)]
889 for pid, fd in pairs:
890 if filter_pid is not None and filter_pid != pid:
891 continue
892 else:
893 path = tokens[-1] if len(tokens) == 8 else ''
894 type_ = _common.socktype_to_enum(int(type_))
895 # XXX: determining the remote endpoint of a
896 # UNIX socket on Linux is not possible, see:
897 # https://serverfault.com/questions/252723/
898 raddr = ""
899 status = _common.CONN_NONE
900 yield (fd, family, type_, path, raddr, status, pid)
901
902 def retrieve(self, kind, pid=None):
903 self._procfs_path = get_procfs_path()
904 if pid is not None:
905 inodes = self.get_proc_inodes(pid)
906 if not inodes:
907 # no connections for this process
908 return []
909 else:
910 inodes = self.get_all_inodes()
911 ret = set()
912 for proto_name, family, type_ in self.tmap[kind]:
913 path = f"{self._procfs_path}/net/{proto_name}"
914 if family in {socket.AF_INET, socket.AF_INET6}:
915 ls = self.process_inet(
916 path, family, type_, inodes, filter_pid=pid
917 )
918 else:
919 ls = self.process_unix(path, family, inodes, filter_pid=pid)
920 for fd, family, type_, laddr, raddr, status, bound_pid in ls:
921 if pid:
922 conn = ntp.pconn(fd, family, type_, laddr, raddr, status)
923 else:
924 conn = ntp.sconn(
925 fd, family, type_, laddr, raddr, status, bound_pid
926 )
927 ret.add(conn)
928 return list(ret)
929
930
931_net_connections = NetConnections()
932
933
934def net_connections(kind='inet'):
935 """Return system-wide open connections."""
936 return _net_connections.retrieve(kind)
937
938
939def net_io_counters():
940 """Return network I/O statistics for every network interface
941 installed on the system as a dict of raw tuples.
942 """
943 with open_text(f"{get_procfs_path()}/net/dev") as f:
944 lines = f.readlines()
945 retdict = {}
946 for line in lines[2:]:
947 colon = line.rfind(':')
948 assert colon > 0, repr(line)
949 name = line[:colon].strip()
950 fields = line[colon + 1 :].strip().split()
951
952 (
953 # in
954 bytes_recv,
955 packets_recv,
956 errin,
957 dropin,
958 _fifoin, # unused
959 _framein, # unused
960 _compressedin, # unused
961 _multicastin, # unused
962 # out
963 bytes_sent,
964 packets_sent,
965 errout,
966 dropout,
967 _fifoout, # unused
968 _collisionsout, # unused
969 _carrierout, # unused
970 _compressedout, # unused
971 ) = map(int, fields)
972
973 retdict[name] = (
974 bytes_sent,
975 bytes_recv,
976 packets_sent,
977 packets_recv,
978 errin,
979 errout,
980 dropin,
981 dropout,
982 )
983 return retdict
984
985
986def net_if_stats():
987 """Get NIC stats (isup, duplex, speed, mtu)."""
988 duplex_map = {
989 cext.DUPLEX_FULL: NIC_DUPLEX_FULL,
990 cext.DUPLEX_HALF: NIC_DUPLEX_HALF,
991 cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN,
992 }
993 names = net_io_counters().keys()
994 ret = {}
995 for name in names:
996 try:
997 mtu = cext.net_if_mtu(name)
998 flags = cext.net_if_flags(name)
999 duplex, speed = cext.net_if_duplex_speed(name)
1000 except OSError as err:
1001 # https://github.com/giampaolo/psutil/issues/1279
1002 if err.errno != errno.ENODEV:
1003 raise
1004 debug(err)
1005 else:
1006 output_flags = ','.join(flags)
1007 isup = 'running' in flags
1008 ret[name] = ntp.snicstats(
1009 isup, duplex_map[duplex], speed, mtu, output_flags
1010 )
1011 return ret
1012
1013
1014# =====================================================================
1015# --- disks
1016# =====================================================================
1017
1018
1019disk_usage = _psposix.disk_usage
1020
1021
1022def disk_io_counters(perdisk=False):
1023 """Return disk I/O statistics for every disk installed on the
1024 system as a dict of raw tuples.
1025 """
1026
1027 def read_procfs():
1028 # OK, this is a bit confusing. The format of /proc/diskstats can
1029 # have 3 variations.
1030 # On Linux 2.4 each line has always 15 fields, e.g.:
1031 # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8"
1032 # On Linux 2.6+ each line *usually* has 14 fields, and the disk
1033 # name is in another position, like this:
1034 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8"
1035 # ...unless (Linux 2.6) the line refers to a partition instead
1036 # of a disk, in which case the line has less fields (7):
1037 # "3 1 hda1 8 8 8 8"
1038 # 4.18+ has 4 fields added:
1039 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0"
1040 # 5.5 has 2 more fields.
1041 # See:
1042 # https://www.kernel.org/doc/Documentation/iostats.txt
1043 # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
1044 with open_text(f"{get_procfs_path()}/diskstats") as f:
1045 lines = f.readlines()
1046 for line in lines:
1047 fields = line.split()
1048 flen = len(fields)
1049 # fmt: off
1050 if flen == 15:
1051 # Linux 2.4
1052 name = fields[3]
1053 reads = int(fields[2])
1054 (reads_merged, rbytes, rtime, writes, writes_merged,
1055 wbytes, wtime, _, busy_time, _) = map(int, fields[4:14])
1056 elif flen == 14 or flen >= 18:
1057 # Linux 2.6+, line referring to a disk
1058 name = fields[2]
1059 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1060 wbytes, wtime, _, busy_time, _) = map(int, fields[3:14])
1061 elif flen == 7:
1062 # Linux 2.6+, line referring to a partition
1063 name = fields[2]
1064 reads, rbytes, writes, wbytes = map(int, fields[3:])
1065 rtime = wtime = reads_merged = writes_merged = busy_time = 0
1066 else:
1067 msg = f"not sure how to interpret line {line!r}"
1068 raise ValueError(msg)
1069 yield (name, reads, writes, rbytes, wbytes, rtime, wtime,
1070 reads_merged, writes_merged, busy_time)
1071 # fmt: on
1072
1073 def read_sysfs():
1074 for block in os.listdir('/sys/block'):
1075 for root, _, files in os.walk(os.path.join('/sys/block', block)):
1076 if 'stat' not in files:
1077 continue
1078 with open_text(os.path.join(root, 'stat')) as f:
1079 fields = f.read().strip().split()
1080 name = os.path.basename(root)
1081 # fmt: off
1082 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1083 wbytes, wtime, _, busy_time) = map(int, fields[:10])
1084 yield (name, reads, writes, rbytes, wbytes, rtime,
1085 wtime, reads_merged, writes_merged, busy_time)
1086 # fmt: on
1087
1088 if os.path.exists(f"{get_procfs_path()}/diskstats"):
1089 gen = read_procfs()
1090 elif os.path.exists('/sys/block'):
1091 gen = read_sysfs()
1092 else:
1093 msg = (
1094 f"{get_procfs_path()}/diskstats nor /sys/block are available on"
1095 " this system"
1096 )
1097 raise NotImplementedError(msg)
1098
1099 retdict = {}
1100 for entry in gen:
1101 # fmt: off
1102 (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged,
1103 writes_merged, busy_time) = entry
1104 if not perdisk and not is_storage_device(name):
1105 # perdisk=False means we want to calculate totals so we skip
1106 # partitions (e.g. 'sda1', 'nvme0n1p1') and only include
1107 # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks
1108 # include a total of all their partitions + some extra size
1109 # of their own:
1110 # $ cat /proc/diskstats
1111 # 259 0 sda 10485760 ...
1112 # 259 1 sda1 5186039 ...
1113 # 259 1 sda2 5082039 ...
1114 # See:
1115 # https://github.com/giampaolo/psutil/pull/1313
1116 continue
1117
1118 rbytes *= DISK_SECTOR_SIZE
1119 wbytes *= DISK_SECTOR_SIZE
1120 retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime,
1121 reads_merged, writes_merged, busy_time)
1122 # fmt: on
1123
1124 return retdict
1125
1126
1127class RootFsDeviceFinder:
1128 """disk_partitions() may return partitions with device == "/dev/root"
1129 or "rootfs". This container class uses different strategies to try to
1130 obtain the real device path. Resources:
1131 https://bootlin.com/blog/find-root-device/
1132 https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/.
1133 """
1134
1135 __slots__ = ['major', 'minor']
1136
1137 def __init__(self):
1138 dev = os.stat("/").st_dev
1139 self.major = os.major(dev)
1140 self.minor = os.minor(dev)
1141
1142 def ask_proc_partitions(self):
1143 with open_text(f"{get_procfs_path()}/partitions") as f:
1144 for line in f.readlines()[2:]:
1145 fields = line.split()
1146 if len(fields) < 4: # just for extra safety
1147 continue
1148 major = int(fields[0]) if fields[0].isdigit() else None
1149 minor = int(fields[1]) if fields[1].isdigit() else None
1150 name = fields[3]
1151 if major == self.major and minor == self.minor:
1152 if name: # just for extra safety
1153 return f"/dev/{name}"
1154
1155 def ask_sys_dev_block(self):
1156 path = f"/sys/dev/block/{self.major}:{self.minor}/uevent"
1157 with open_text(path) as f:
1158 for line in f:
1159 if line.startswith("DEVNAME="):
1160 name = line.strip().rpartition("DEVNAME=")[2]
1161 if name: # just for extra safety
1162 return f"/dev/{name}"
1163
1164 def ask_sys_class_block(self):
1165 needle = f"{self.major}:{self.minor}"
1166 files = glob.iglob("/sys/class/block/*/dev")
1167 for file in files:
1168 try:
1169 f = open_text(file)
1170 except FileNotFoundError: # race condition
1171 continue
1172 else:
1173 with f:
1174 data = f.read().strip()
1175 if data == needle:
1176 name = os.path.basename(os.path.dirname(file))
1177 return f"/dev/{name}"
1178
1179 def find(self):
1180 path = None
1181 if path is None:
1182 try:
1183 path = self.ask_proc_partitions()
1184 except OSError as err:
1185 debug(err)
1186 if path is None:
1187 try:
1188 path = self.ask_sys_dev_block()
1189 except OSError as err:
1190 debug(err)
1191 if path is None:
1192 try:
1193 path = self.ask_sys_class_block()
1194 except OSError as err:
1195 debug(err)
1196 # We use exists() because the "/dev/*" part of the path is hard
1197 # coded, so we want to be sure.
1198 if path is not None and os.path.exists(path):
1199 return path
1200
1201
1202def disk_partitions(all=False):
1203 """Return mounted disk partitions as a list of namedtuples."""
1204 fstypes = set()
1205 procfs_path = get_procfs_path()
1206 if not all:
1207 with open_text(f"{procfs_path}/filesystems") as f:
1208 for line in f:
1209 line = line.strip()
1210 if not line.startswith("nodev"):
1211 fstypes.add(line.strip())
1212 else:
1213 # ignore all lines starting with "nodev" except "nodev zfs"
1214 fstype = line.split("\t")[1]
1215 if fstype == "zfs":
1216 fstypes.add("zfs")
1217
1218 # See: https://github.com/giampaolo/psutil/issues/1307
1219 if procfs_path == "/proc" and os.path.isfile('/etc/mtab'):
1220 mounts_path = os.path.realpath("/etc/mtab")
1221 else:
1222 mounts_path = os.path.realpath(f"{procfs_path}/self/mounts")
1223
1224 retlist = []
1225 partitions = cext.disk_partitions(mounts_path)
1226 for partition in partitions:
1227 device, mountpoint, fstype, opts = partition
1228 if device == 'none':
1229 device = ''
1230 if device in {"/dev/root", "rootfs"}:
1231 device = RootFsDeviceFinder().find() or device
1232 if not all:
1233 if not device or fstype not in fstypes:
1234 continue
1235 ntuple = ntp.sdiskpart(device, mountpoint, fstype, opts)
1236 retlist.append(ntuple)
1237
1238 return retlist
1239
1240
1241# =====================================================================
1242# --- sensors
1243# =====================================================================
1244
1245
1246def sensors_temperatures():
1247 """Return hardware (CPU and others) temperatures as a dict
1248 including hardware name, label, current, max and critical
1249 temperatures.
1250
1251 Implementation notes:
1252 - /sys/class/hwmon looks like the most recent interface to
1253 retrieve this info, and this implementation relies on it
1254 only (old distros will probably use something else)
1255 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1256 - /sys/class/thermal/thermal_zone* is another one but it's more
1257 difficult to parse
1258 """
1259 ret = collections.defaultdict(list)
1260 basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*')
1261 # CentOS has an intermediate /device directory:
1262 # https://github.com/giampaolo/psutil/issues/971
1263 # https://github.com/nicolargo/glances/issues/1060
1264 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*'))
1265 basenames = sorted({x.split('_')[0] for x in basenames})
1266
1267 # Only add the coretemp hwmon entries if they're not already in
1268 # /sys/class/hwmon/
1269 # https://github.com/giampaolo/psutil/issues/1708
1270 # https://github.com/giampaolo/psutil/pull/1648
1271 basenames2 = glob.glob(
1272 '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*'
1273 )
1274 repl = re.compile(r"/sys/devices/platform/coretemp.*/hwmon/")
1275 for name in basenames2:
1276 altname = repl.sub('/sys/class/hwmon/', name)
1277 if altname not in basenames:
1278 basenames.append(name)
1279
1280 for base in basenames:
1281 try:
1282 path = base + '_input'
1283 current = float(bcat(path)) / 1000.0
1284 path = os.path.join(os.path.dirname(base), 'name')
1285 unit_name = cat(path).strip()
1286 except (OSError, ValueError):
1287 # A lot of things can go wrong here, so let's just skip the
1288 # whole entry. Sure thing is Linux's /sys/class/hwmon really
1289 # is a stinky broken mess.
1290 # https://github.com/giampaolo/psutil/issues/1009
1291 # https://github.com/giampaolo/psutil/issues/1101
1292 # https://github.com/giampaolo/psutil/issues/1129
1293 # https://github.com/giampaolo/psutil/issues/1245
1294 # https://github.com/giampaolo/psutil/issues/1323
1295 continue
1296
1297 high = bcat(base + '_max', fallback=None)
1298 critical = bcat(base + '_crit', fallback=None)
1299 label = cat(base + '_label', fallback='').strip()
1300
1301 if high is not None:
1302 try:
1303 high = float(high) / 1000.0
1304 except ValueError:
1305 high = None
1306 if critical is not None:
1307 try:
1308 critical = float(critical) / 1000.0
1309 except ValueError:
1310 critical = None
1311
1312 ret[unit_name].append((label, current, high, critical))
1313
1314 # Indication that no sensors were detected in /sys/class/hwmon/
1315 if not basenames:
1316 basenames = glob.glob('/sys/class/thermal/thermal_zone*')
1317 basenames = sorted(set(basenames))
1318
1319 for base in basenames:
1320 try:
1321 path = os.path.join(base, 'temp')
1322 current = float(bcat(path)) / 1000.0
1323 path = os.path.join(base, 'type')
1324 unit_name = cat(path).strip()
1325 except (OSError, ValueError) as err:
1326 debug(err)
1327 continue
1328
1329 trip_paths = glob.glob(base + '/trip_point*')
1330 trip_points = {
1331 '_'.join(os.path.basename(p).split('_')[0:3])
1332 for p in trip_paths
1333 }
1334 critical = None
1335 high = None
1336 for trip_point in trip_points:
1337 path = os.path.join(base, trip_point + "_type")
1338 trip_type = cat(path, fallback='').strip()
1339 if trip_type == 'critical':
1340 critical = bcat(
1341 os.path.join(base, trip_point + "_temp"), fallback=None
1342 )
1343 elif trip_type == 'high':
1344 high = bcat(
1345 os.path.join(base, trip_point + "_temp"), fallback=None
1346 )
1347
1348 if high is not None:
1349 try:
1350 high = float(high) / 1000.0
1351 except ValueError:
1352 high = None
1353 if critical is not None:
1354 try:
1355 critical = float(critical) / 1000.0
1356 except ValueError:
1357 critical = None
1358
1359 ret[unit_name].append(('', current, high, critical))
1360
1361 return dict(ret)
1362
1363
1364def sensors_fans():
1365 """Return hardware fans info (for CPU and other peripherals) as a
1366 dict including hardware label and current speed.
1367
1368 Implementation notes:
1369 - /sys/class/hwmon looks like the most recent interface to
1370 retrieve this info, and this implementation relies on it
1371 only (old distros will probably use something else)
1372 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1373 """
1374 ret = collections.defaultdict(list)
1375 basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
1376 if not basenames:
1377 # CentOS has an intermediate /device directory:
1378 # https://github.com/giampaolo/psutil/issues/971
1379 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')
1380
1381 basenames = sorted({x.split("_")[0] for x in basenames})
1382 for base in basenames:
1383 try:
1384 current = int(bcat(base + '_input'))
1385 except OSError as err:
1386 debug(err)
1387 continue
1388 unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip()
1389 label = cat(base + '_label', fallback='').strip()
1390 ret[unit_name].append(ntp.sfan(label, current))
1391
1392 return dict(ret)
1393
1394
1395def sensors_battery():
1396 """Return battery information.
1397 Implementation note: it appears /sys/class/power_supply/BAT0/
1398 directory structure may vary and provide files with the same
1399 meaning but under different names, see:
1400 https://github.com/giampaolo/psutil/issues/966.
1401 """
1402 null = object()
1403
1404 def multi_bcat(*paths):
1405 """Attempt to read the content of multiple files which may
1406 not exist. If none of them exist return None.
1407 """
1408 for path in paths:
1409 ret = bcat(path, fallback=null)
1410 if ret != null:
1411 try:
1412 return int(ret)
1413 except ValueError:
1414 return ret.strip()
1415 return None
1416
1417 bats = [
1418 x
1419 for x in os.listdir(POWER_SUPPLY_PATH)
1420 if x.startswith('BAT') or 'battery' in x.lower()
1421 ]
1422 if not bats:
1423 return None
1424 # Get the first available battery. Usually this is "BAT0", except
1425 # some rare exceptions:
1426 # https://github.com/giampaolo/psutil/issues/1238
1427 root = os.path.join(POWER_SUPPLY_PATH, min(bats))
1428
1429 # Base metrics.
1430 energy_now = multi_bcat(root + "/energy_now", root + "/charge_now")
1431 power_now = multi_bcat(root + "/power_now", root + "/current_now")
1432 energy_full = multi_bcat(root + "/energy_full", root + "/charge_full")
1433 time_to_empty = multi_bcat(root + "/time_to_empty_now")
1434
1435 # Percent. If we have energy_full the percentage will be more
1436 # accurate compared to reading /capacity file (float vs. int).
1437 if energy_full is not None and energy_now is not None:
1438 try:
1439 percent = 100.0 * energy_now / energy_full
1440 except ZeroDivisionError:
1441 percent = 0.0
1442 else:
1443 percent = int(cat(root + "/capacity", fallback=-1))
1444 if percent == -1:
1445 return None
1446
1447 # Is AC power cable plugged in?
1448 # Note: AC0 is not always available and sometimes (e.g. CentOS7)
1449 # it's called "AC".
1450 power_plugged = None
1451 online = multi_bcat(
1452 os.path.join(POWER_SUPPLY_PATH, "AC0/online"),
1453 os.path.join(POWER_SUPPLY_PATH, "AC/online"),
1454 )
1455 if online is not None:
1456 power_plugged = online == 1
1457 else:
1458 status = cat(root + "/status", fallback="").strip().lower()
1459 if status == "discharging":
1460 power_plugged = False
1461 elif status in {"charging", "full"}:
1462 power_plugged = True
1463
1464 # Seconds left.
1465 # Note to self: we may also calculate the charging ETA as per:
1466 # https://github.com/thialfihar/dotfiles/blob/
1467 # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55
1468 if power_plugged:
1469 secsleft = _common.POWER_TIME_UNLIMITED
1470 elif energy_now is not None and power_now is not None:
1471 try:
1472 secsleft = int(energy_now / abs(power_now) * 3600)
1473 except ZeroDivisionError:
1474 secsleft = _common.POWER_TIME_UNKNOWN
1475 elif time_to_empty is not None:
1476 secsleft = int(time_to_empty * 60)
1477 if secsleft < 0:
1478 secsleft = _common.POWER_TIME_UNKNOWN
1479 else:
1480 secsleft = _common.POWER_TIME_UNKNOWN
1481
1482 return ntp.sbattery(percent, secsleft, power_plugged)
1483
1484
1485# =====================================================================
1486# --- other system functions
1487# =====================================================================
1488
1489
1490def users():
1491 """Return currently connected users as a list of namedtuples."""
1492 retlist = []
1493 rawlist = cext.users()
1494 for item in rawlist:
1495 user, tty, hostname, tstamp, pid = item
1496 nt = ntp.suser(user, tty or None, hostname, tstamp, pid)
1497 retlist.append(nt)
1498 return retlist
1499
1500
1501def boot_time():
1502 """Return the system boot time expressed in seconds since the epoch."""
1503 path = f"{get_procfs_path()}/stat"
1504 with open_binary(path) as f:
1505 for line in f:
1506 if line.startswith(b'btime'):
1507 return float(line.strip().split()[1])
1508 msg = f"line 'btime' not found in {path}"
1509 raise RuntimeError(msg)
1510
1511
1512# =====================================================================
1513# --- processes
1514# =====================================================================
1515
1516
1517def pids():
1518 """Returns a list of PIDs currently running on the system."""
1519 path = get_procfs_path().encode(ENCODING)
1520 return [int(x) for x in os.listdir(path) if x.isdigit()]
1521
1522
1523def pid_exists(pid):
1524 """Check for the existence of a unix PID. Linux TIDs are not
1525 supported (always return False).
1526 """
1527 if not _psposix.pid_exists(pid):
1528 return False
1529 else:
1530 # Linux's apparently does not distinguish between PIDs and TIDs
1531 # (thread IDs).
1532 # listdir("/proc") won't show any TID (only PIDs) but
1533 # os.stat("/proc/{tid}") will succeed if {tid} exists.
1534 # os.kill() can also be passed a TID. This is quite confusing.
1535 # In here we want to enforce this distinction and support PIDs
1536 # only, see:
1537 # https://github.com/giampaolo/psutil/issues/687
1538 try:
1539 # Note: already checked that this is faster than using a
1540 # regular expr. Also (a lot) faster than doing
1541 # 'return pid in pids()'
1542 path = f"{get_procfs_path()}/{pid}/status"
1543 with open_binary(path) as f:
1544 for line in f:
1545 if line.startswith(b"Tgid:"):
1546 tgid = int(line.split()[1])
1547 # If tgid and pid are the same then we're
1548 # dealing with a process PID.
1549 return tgid == pid
1550 msg = f"'Tgid' line not found in {path}"
1551 raise ValueError(msg)
1552 except (OSError, ValueError):
1553 return pid in pids()
1554
1555
1556def ppid_map():
1557 """Obtain a {pid: ppid, ...} dict for all running processes in
1558 one shot. Used to speed up Process.children().
1559 """
1560 ret = {}
1561 procfs_path = get_procfs_path()
1562 for pid in pids():
1563 try:
1564 with open_binary(f"{procfs_path}/{pid}/stat") as f:
1565 data = f.read()
1566 except (FileNotFoundError, ProcessLookupError):
1567 pass
1568 except PermissionError as err:
1569 raise AccessDenied(pid) from err
1570 else:
1571 rpar = data.rfind(b')')
1572 dset = data[rpar + 2 :].split()
1573 ppid = int(dset[1])
1574 ret[pid] = ppid
1575 return ret
1576
1577
1578def wrap_exceptions(fun):
1579 """Decorator which translates bare OSError and OSError exceptions
1580 into NoSuchProcess and AccessDenied.
1581 """
1582
1583 @functools.wraps(fun)
1584 def wrapper(self, *args, **kwargs):
1585 pid, name = self.pid, self._name
1586 try:
1587 return fun(self, *args, **kwargs)
1588 except PermissionError as err:
1589 raise AccessDenied(pid, name) from err
1590 except ProcessLookupError as err:
1591 self._raise_if_zombie()
1592 raise NoSuchProcess(pid, name) from err
1593 except FileNotFoundError as err:
1594 self._raise_if_zombie()
1595 # /proc/PID directory may still exist, but the files within
1596 # it may not, indicating the process is gone, see:
1597 # https://github.com/giampaolo/psutil/issues/2418
1598 if not os.path.exists(f"{self._procfs_path}/{pid}/stat"):
1599 raise NoSuchProcess(pid, name) from err
1600 raise
1601
1602 return wrapper
1603
1604
1605class Process:
1606 """Linux process implementation."""
1607
1608 __slots__ = [
1609 "_cache",
1610 "_ctime",
1611 "_name",
1612 "_ppid",
1613 "_procfs_path",
1614 "pid",
1615 ]
1616
1617 def __init__(self, pid):
1618 self.pid = pid
1619 self._name = None
1620 self._ppid = None
1621 self._ctime = None
1622 self._procfs_path = get_procfs_path()
1623
1624 def _is_zombie(self):
1625 # Note: most of the times Linux is able to return info about the
1626 # process even if it's a zombie, and /proc/{pid} will exist.
1627 # There are some exceptions though, like exe(), cmdline() and
1628 # memory_maps(). In these cases /proc/{pid}/{file} exists but
1629 # it's empty. Instead of returning a "null" value we'll raise an
1630 # exception.
1631 try:
1632 data = bcat(f"{self._procfs_path}/{self.pid}/stat")
1633 except OSError:
1634 return False
1635 else:
1636 rpar = data.rfind(b')')
1637 status = data[rpar + 2 : rpar + 3]
1638 return status == b"Z"
1639
1640 def _raise_if_zombie(self):
1641 if self._is_zombie():
1642 raise ZombieProcess(self.pid, self._name, self._ppid)
1643
1644 def _raise_if_not_alive(self):
1645 """Raise NSP if the process disappeared on us."""
1646 # For those C function who do not raise NSP, possibly returning
1647 # incorrect or incomplete result.
1648 os.stat(f"{self._procfs_path}/{self.pid}")
1649
1650 def _readlink(self, path, fallback=UNSET):
1651 # * https://github.com/giampaolo/psutil/issues/503
1652 # os.readlink('/proc/pid/exe') may raise ESRCH (ProcessLookupError)
1653 # instead of ENOENT (FileNotFoundError) when it races.
1654 # * ENOENT may occur also if the path actually exists if PID is
1655 # a low PID (~0-20 range).
1656 # * https://github.com/giampaolo/psutil/issues/2514
1657 try:
1658 return readlink(path)
1659 except (FileNotFoundError, ProcessLookupError):
1660 if os.path.lexists(f"{self._procfs_path}/{self.pid}"):
1661 self._raise_if_zombie()
1662 if fallback is not UNSET:
1663 return fallback
1664 raise
1665
1666 @wrap_exceptions
1667 @memoize_when_activated
1668 def _parse_stat_file(self):
1669 """Parse /proc/{pid}/stat file and return a dict with various
1670 process info.
1671 Using "man proc" as a reference: where "man proc" refers to
1672 position N always subtract 3 (e.g ppid position 4 in
1673 'man proc' == position 1 in here).
1674 The return value is cached in case oneshot() ctx manager is
1675 in use.
1676 """
1677 data = bcat(f"{self._procfs_path}/{self.pid}/stat")
1678 # Process name is between parentheses. It can contain spaces and
1679 # other parentheses. This is taken into account by looking for
1680 # the first occurrence of "(" and the last occurrence of ")".
1681 rpar = data.rfind(b')')
1682 name = data[data.find(b'(') + 1 : rpar]
1683 fields = data[rpar + 2 :].split()
1684
1685 ret = {}
1686 ret['name'] = name
1687 ret['status'] = fields[0]
1688 ret['ppid'] = fields[1]
1689 ret['ttynr'] = fields[4]
1690 ret['utime'] = fields[11]
1691 ret['stime'] = fields[12]
1692 ret['children_utime'] = fields[13]
1693 ret['children_stime'] = fields[14]
1694 ret['create_time'] = fields[19]
1695 ret['cpu_num'] = fields[36]
1696 try:
1697 ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks'
1698 except IndexError:
1699 # https://github.com/giampaolo/psutil/issues/2455
1700 debug("can't get blkio_ticks, set iowait to 0")
1701 ret['blkio_ticks'] = 0
1702
1703 return ret
1704
1705 @wrap_exceptions
1706 @memoize_when_activated
1707 def _read_status_file(self):
1708 """Read /proc/{pid}/stat file and return its content.
1709 The return value is cached in case oneshot() ctx manager is
1710 in use.
1711 """
1712 with open_binary(f"{self._procfs_path}/{self.pid}/status") as f:
1713 return f.read()
1714
1715 @wrap_exceptions
1716 @memoize_when_activated
1717 def _read_smaps_file(self):
1718 with open_binary(f"{self._procfs_path}/{self.pid}/smaps") as f:
1719 return f.read().strip()
1720
1721 def oneshot_enter(self):
1722 self._parse_stat_file.cache_activate(self)
1723 self._read_status_file.cache_activate(self)
1724 self._read_smaps_file.cache_activate(self)
1725
1726 def oneshot_exit(self):
1727 self._parse_stat_file.cache_deactivate(self)
1728 self._read_status_file.cache_deactivate(self)
1729 self._read_smaps_file.cache_deactivate(self)
1730
1731 @wrap_exceptions
1732 def name(self):
1733 # XXX - gets changed later and probably needs refactoring
1734 return decode(self._parse_stat_file()['name'])
1735
1736 @wrap_exceptions
1737 def exe(self):
1738 return self._readlink(
1739 f"{self._procfs_path}/{self.pid}/exe", fallback=""
1740 )
1741
1742 @wrap_exceptions
1743 def cmdline(self):
1744 with open_text(f"{self._procfs_path}/{self.pid}/cmdline") as f:
1745 data = f.read()
1746 if not data:
1747 # may happen in case of zombie process
1748 self._raise_if_zombie()
1749 return []
1750 # 'man proc' states that args are separated by null bytes '\0'
1751 # and last char is supposed to be a null byte. Nevertheless
1752 # some processes may change their cmdline after being started
1753 # (via setproctitle() or similar), they are usually not
1754 # compliant with this rule and use spaces instead. Google
1755 # Chrome process is an example. See:
1756 # https://github.com/giampaolo/psutil/issues/1179
1757 sep = '\x00' if data.endswith('\x00') else ' '
1758 if data.endswith(sep):
1759 data = data[:-1]
1760 cmdline = data.split(sep)
1761 # Sometimes last char is a null byte '\0' but the args are
1762 # separated by spaces, see: https://github.com/giampaolo/psutil/
1763 # issues/1179#issuecomment-552984549
1764 if sep == '\x00' and len(cmdline) == 1 and ' ' in data:
1765 cmdline = data.split(' ')
1766 return cmdline
1767
1768 @wrap_exceptions
1769 def environ(self):
1770 with open_text(f"{self._procfs_path}/{self.pid}/environ") as f:
1771 data = f.read()
1772 return parse_environ_block(data)
1773
1774 @wrap_exceptions
1775 def terminal(self):
1776 tty_nr = int(self._parse_stat_file()['ttynr'])
1777 tmap = _psposix.get_terminal_map()
1778 try:
1779 return tmap[tty_nr]
1780 except KeyError:
1781 return None
1782
1783 # May not be available on old kernels.
1784 if os.path.exists(f"/proc/{os.getpid()}/io"):
1785
1786 @wrap_exceptions
1787 def io_counters(self):
1788 fname = f"{self._procfs_path}/{self.pid}/io"
1789 fields = {}
1790 with open_binary(fname) as f:
1791 for line in f:
1792 # https://github.com/giampaolo/psutil/issues/1004
1793 line = line.strip()
1794 if line:
1795 try:
1796 name, value = line.split(b': ')
1797 except ValueError:
1798 # https://github.com/giampaolo/psutil/issues/1004
1799 continue
1800 else:
1801 fields[name] = int(value)
1802 if not fields:
1803 msg = f"{fname} file was empty"
1804 raise RuntimeError(msg)
1805 try:
1806 return ntp.pio(
1807 fields[b'syscr'], # read syscalls
1808 fields[b'syscw'], # write syscalls
1809 fields[b'read_bytes'], # read bytes
1810 fields[b'write_bytes'], # write bytes
1811 fields[b'rchar'], # read chars
1812 fields[b'wchar'], # write chars
1813 )
1814 except KeyError as err:
1815 msg = (
1816 f"{err.args[0]!r} field was not found in {fname}; found"
1817 f" fields are {fields!r}"
1818 )
1819 raise ValueError(msg) from None
1820
1821 @wrap_exceptions
1822 def cpu_times(self):
1823 values = self._parse_stat_file()
1824 utime = float(values['utime']) / CLOCK_TICKS
1825 stime = float(values['stime']) / CLOCK_TICKS
1826 children_utime = float(values['children_utime']) / CLOCK_TICKS
1827 children_stime = float(values['children_stime']) / CLOCK_TICKS
1828 iowait = float(values['blkio_ticks']) / CLOCK_TICKS
1829 return ntp.pcputimes(
1830 utime, stime, children_utime, children_stime, iowait
1831 )
1832
1833 @wrap_exceptions
1834 def cpu_num(self):
1835 """What CPU the process is on."""
1836 return int(self._parse_stat_file()['cpu_num'])
1837
1838 @wrap_exceptions
1839 def wait(self, timeout=None):
1840 return _psposix.wait_pid(self.pid, timeout, self._name)
1841
1842 @wrap_exceptions
1843 def create_time(self, monotonic=False):
1844 # The 'starttime' field in /proc/[pid]/stat is expressed in
1845 # jiffies (clock ticks per second), a relative value which
1846 # represents the number of clock ticks that have passed since
1847 # the system booted until the process was created. It never
1848 # changes and is unaffected by system clock updates.
1849 if self._ctime is None:
1850 self._ctime = (
1851 float(self._parse_stat_file()['create_time']) / CLOCK_TICKS
1852 )
1853 if monotonic:
1854 return self._ctime
1855 # Add the boot time, returning time expressed in seconds since
1856 # the epoch. This is subject to system clock updates.
1857 return self._ctime + boot_time()
1858
1859 @wrap_exceptions
1860 def memory_info(self):
1861 # ============================================================
1862 # | FIELD | DESCRIPTION | AKA | TOP |
1863 # ============================================================
1864 # | rss | resident set size | | RES |
1865 # | vms | total program size | size | VIRT |
1866 # | shared | shared pages (from shared mappings) | | SHR |
1867 # | text | text ('code') | trs | CODE |
1868 # | lib | library (unused in Linux 2.6) | lrs | |
1869 # | data | data + stack | drs | DATA |
1870 # | dirty | dirty pages (unused in Linux 2.6) | dt | |
1871 # ============================================================
1872 with open_binary(f"{self._procfs_path}/{self.pid}/statm") as f:
1873 vms, rss, shared, text, lib, data, dirty = (
1874 int(x) * PAGESIZE for x in f.readline().split()[:7]
1875 )
1876 return ntp.pmem(rss, vms, shared, text, lib, data, dirty)
1877
1878 if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS:
1879
1880 def _parse_smaps_rollup(self):
1881 # /proc/pid/smaps_rollup was added to Linux in 2017. Faster
1882 # than /proc/pid/smaps. It reports higher PSS than */smaps
1883 # (from 1k up to 200k higher; tested against all processes).
1884 # IMPORTANT: /proc/pid/smaps_rollup is weird, because it
1885 # raises ESRCH / ENOENT for many PIDs, even if they're alive
1886 # (also as root). In that case we'll use /proc/pid/smaps as
1887 # fallback, which is slower but has a +50% success rate
1888 # compared to /proc/pid/smaps_rollup.
1889 uss = pss = swap = 0
1890 with open_binary(
1891 f"{self._procfs_path}/{self.pid}/smaps_rollup"
1892 ) as f:
1893 for line in f:
1894 if line.startswith(b"Private_"):
1895 # Private_Clean, Private_Dirty, Private_Hugetlb
1896 uss += int(line.split()[1]) * 1024
1897 elif line.startswith(b"Pss:"):
1898 pss = int(line.split()[1]) * 1024
1899 elif line.startswith(b"Swap:"):
1900 swap = int(line.split()[1]) * 1024
1901 return (uss, pss, swap)
1902
1903 @wrap_exceptions
1904 def _parse_smaps(
1905 self,
1906 # Gets Private_Clean, Private_Dirty, Private_Hugetlb.
1907 _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"),
1908 _pss_re=re.compile(br"\nPss\:\s+(\d+)"),
1909 _swap_re=re.compile(br"\nSwap\:\s+(\d+)"),
1910 ):
1911 # /proc/pid/smaps does not exist on kernels < 2.6.14 or if
1912 # CONFIG_MMU kernel configuration option is not enabled.
1913
1914 # Note: using 3 regexes is faster than reading the file
1915 # line by line.
1916 #
1917 # You might be tempted to calculate USS by subtracting
1918 # the "shared" value from the "resident" value in
1919 # /proc/<pid>/statm. But at least on Linux, statm's "shared"
1920 # value actually counts pages backed by files, which has
1921 # little to do with whether the pages are actually shared.
1922 # /proc/self/smaps on the other hand appears to give us the
1923 # correct information.
1924 smaps_data = self._read_smaps_file()
1925 # Note: smaps file can be empty for certain processes.
1926 # The code below will not crash though and will result to 0.
1927 uss = sum(map(int, _private_re.findall(smaps_data))) * 1024
1928 pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024
1929 swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024
1930 return (uss, pss, swap)
1931
1932 @wrap_exceptions
1933 def memory_full_info(self):
1934 if HAS_PROC_SMAPS_ROLLUP: # faster
1935 try:
1936 uss, pss, swap = self._parse_smaps_rollup()
1937 except (ProcessLookupError, FileNotFoundError):
1938 uss, pss, swap = self._parse_smaps()
1939 else:
1940 uss, pss, swap = self._parse_smaps()
1941 basic_mem = self.memory_info()
1942 return ntp.pfullmem(*basic_mem + (uss, pss, swap))
1943
1944 else:
1945 memory_full_info = memory_info
1946
1947 if HAS_PROC_SMAPS:
1948
1949 @wrap_exceptions
1950 def memory_maps(self):
1951 """Return process's mapped memory regions as a list of named
1952 tuples. Fields are explained in 'man proc'; here is an updated
1953 (Apr 2012) version: https://git.kernel.org/pub/scm/linux/kernel/git/torvalds/linux.git/tree/Documentation/filesystems/proc.txt?id=b76437579d1344b612cf1851ae610c636cec7db0.
1954
1955 /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if
1956 CONFIG_MMU kernel configuration option is not enabled.
1957 """
1958
1959 def get_blocks(lines, current_block):
1960 data = {}
1961 for line in lines:
1962 fields = line.split(None, 5)
1963 if not fields[0].endswith(b':'):
1964 # new block section
1965 yield (current_block.pop(), data)
1966 current_block.append(line)
1967 else:
1968 try:
1969 data[fields[0]] = int(fields[1]) * 1024
1970 except (ValueError, IndexError):
1971 if fields[0].startswith(b'VmFlags:'):
1972 # see issue #369
1973 continue
1974 msg = f"don't know how to interpret line {line!r}"
1975 raise ValueError(msg) from None
1976 yield (current_block.pop(), data)
1977
1978 data = self._read_smaps_file()
1979 # Note: smaps file can be empty for certain processes or for
1980 # zombies.
1981 if not data:
1982 self._raise_if_zombie()
1983 return []
1984 lines = data.split(b'\n')
1985 ls = []
1986 first_line = lines.pop(0)
1987 current_block = [first_line]
1988 for header, data in get_blocks(lines, current_block):
1989 hfields = header.split(None, 5)
1990 try:
1991 addr, perms, _offset, _dev, _inode, path = hfields
1992 except ValueError:
1993 addr, perms, _offset, _dev, _inode, path = hfields + ['']
1994 if not path:
1995 path = '[anon]'
1996 else:
1997 path = decode(path)
1998 path = path.strip()
1999 if path.endswith(' (deleted)') and not path_exists_strict(
2000 path
2001 ):
2002 path = path[:-10]
2003 item = (
2004 decode(addr),
2005 decode(perms),
2006 path,
2007 data.get(b'Rss:', 0),
2008 data.get(b'Size:', 0),
2009 data.get(b'Pss:', 0),
2010 data.get(b'Shared_Clean:', 0),
2011 data.get(b'Shared_Dirty:', 0),
2012 data.get(b'Private_Clean:', 0),
2013 data.get(b'Private_Dirty:', 0),
2014 data.get(b'Referenced:', 0),
2015 data.get(b'Anonymous:', 0),
2016 data.get(b'Swap:', 0),
2017 )
2018 ls.append(item)
2019 return ls
2020
2021 @wrap_exceptions
2022 def cwd(self):
2023 return self._readlink(
2024 f"{self._procfs_path}/{self.pid}/cwd", fallback=""
2025 )
2026
2027 @wrap_exceptions
2028 def num_ctx_switches(
2029 self, _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)')
2030 ):
2031 data = self._read_status_file()
2032 ctxsw = _ctxsw_re.findall(data)
2033 if not ctxsw:
2034 msg = (
2035 "'voluntary_ctxt_switches' and"
2036 " 'nonvoluntary_ctxt_switches'lines were not found in"
2037 f" {self._procfs_path}/{self.pid}/status; the kernel is"
2038 " probably older than 2.6.23"
2039 )
2040 raise NotImplementedError(msg)
2041 return ntp.pctxsw(int(ctxsw[0]), int(ctxsw[1]))
2042
2043 @wrap_exceptions
2044 def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')):
2045 # Using a re is faster than iterating over file line by line.
2046 data = self._read_status_file()
2047 return int(_num_threads_re.findall(data)[0])
2048
2049 @wrap_exceptions
2050 def threads(self):
2051 thread_ids = os.listdir(f"{self._procfs_path}/{self.pid}/task")
2052 thread_ids.sort()
2053 retlist = []
2054 hit_enoent = False
2055 for thread_id in thread_ids:
2056 fname = f"{self._procfs_path}/{self.pid}/task/{thread_id}/stat"
2057 try:
2058 with open_binary(fname) as f:
2059 st = f.read().strip()
2060 except (FileNotFoundError, ProcessLookupError):
2061 # no such file or directory or no such process;
2062 # it means thread disappeared on us
2063 hit_enoent = True
2064 continue
2065 # ignore the first two values ("pid (exe)")
2066 st = st[st.find(b')') + 2 :]
2067 values = st.split(b' ')
2068 utime = float(values[11]) / CLOCK_TICKS
2069 stime = float(values[12]) / CLOCK_TICKS
2070 ntuple = ntp.pthread(int(thread_id), utime, stime)
2071 retlist.append(ntuple)
2072 if hit_enoent:
2073 self._raise_if_not_alive()
2074 return retlist
2075
2076 @wrap_exceptions
2077 def nice_get(self):
2078 # with open_text(f"{self._procfs_path}/{self.pid}/stat") as f:
2079 # data = f.read()
2080 # return int(data.split()[18])
2081
2082 # Use C implementation
2083 return cext.proc_priority_get(self.pid)
2084
2085 @wrap_exceptions
2086 def nice_set(self, value):
2087 return cext.proc_priority_set(self.pid, value)
2088
2089 # starting from CentOS 6.
2090 if HAS_CPU_AFFINITY:
2091
2092 @wrap_exceptions
2093 def cpu_affinity_get(self):
2094 return cext.proc_cpu_affinity_get(self.pid)
2095
2096 def _get_eligible_cpus(
2097 self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")
2098 ):
2099 # See: https://github.com/giampaolo/psutil/issues/956
2100 data = self._read_status_file()
2101 match = _re.findall(data)
2102 if match:
2103 return list(range(int(match[0][0]), int(match[0][1]) + 1))
2104 else:
2105 return list(range(len(per_cpu_times())))
2106
2107 @wrap_exceptions
2108 def cpu_affinity_set(self, cpus):
2109 try:
2110 cext.proc_cpu_affinity_set(self.pid, cpus)
2111 except (OSError, ValueError) as err:
2112 if isinstance(err, ValueError) or err.errno == errno.EINVAL:
2113 eligible_cpus = self._get_eligible_cpus()
2114 all_cpus = tuple(range(len(per_cpu_times())))
2115 for cpu in cpus:
2116 if cpu not in all_cpus:
2117 msg = (
2118 f"invalid CPU {cpu!r}; choose between"
2119 f" {eligible_cpus!r}"
2120 )
2121 raise ValueError(msg) from None
2122 if cpu not in eligible_cpus:
2123 msg = (
2124 f"CPU number {cpu} is not eligible; choose"
2125 f" between {eligible_cpus}"
2126 )
2127 raise ValueError(msg) from err
2128 raise
2129
2130 # only starting from kernel 2.6.13
2131 if HAS_PROC_IO_PRIORITY:
2132
2133 @wrap_exceptions
2134 def ionice_get(self):
2135 ioclass, value = cext.proc_ioprio_get(self.pid)
2136 ioclass = IOPriority(ioclass)
2137 return ntp.pionice(ioclass, value)
2138
2139 @wrap_exceptions
2140 def ionice_set(self, ioclass, value):
2141 if value is None:
2142 value = 0
2143 if value and ioclass in {
2144 IOPriority.IOPRIO_CLASS_IDLE,
2145 IOPriority.IOPRIO_CLASS_NONE,
2146 }:
2147 msg = f"{ioclass!r} ioclass accepts no value"
2148 raise ValueError(msg)
2149 if value < 0 or value > 7:
2150 msg = "value not in 0-7 range"
2151 raise ValueError(msg)
2152 return cext.proc_ioprio_set(self.pid, ioclass, value)
2153
2154 if hasattr(resource, "prlimit"):
2155
2156 @wrap_exceptions
2157 def rlimit(self, resource_, limits=None):
2158 # If pid is 0 prlimit() applies to the calling process and
2159 # we don't want that. We should never get here though as
2160 # PID 0 is not supported on Linux.
2161 if self.pid == 0:
2162 msg = "can't use prlimit() against PID 0 process"
2163 raise ValueError(msg)
2164 try:
2165 if limits is None:
2166 # get
2167 return resource.prlimit(self.pid, resource_)
2168 else:
2169 # set
2170 if len(limits) != 2:
2171 msg = (
2172 "second argument must be a (soft, hard) "
2173 f"tuple, got {limits!r}"
2174 )
2175 raise ValueError(msg)
2176 resource.prlimit(self.pid, resource_, limits)
2177 except OSError as err:
2178 if err.errno == errno.ENOSYS:
2179 # I saw this happening on Travis:
2180 # https://travis-ci.org/giampaolo/psutil/jobs/51368273
2181 self._raise_if_zombie()
2182 raise
2183
2184 @wrap_exceptions
2185 def status(self):
2186 letter = self._parse_stat_file()['status']
2187 letter = letter.decode()
2188 # XXX is '?' legit? (we're not supposed to return it anyway)
2189 return PROC_STATUSES.get(letter, '?')
2190
2191 @wrap_exceptions
2192 def open_files(self):
2193 retlist = []
2194 files = os.listdir(f"{self._procfs_path}/{self.pid}/fd")
2195 hit_enoent = False
2196 for fd in files:
2197 file = f"{self._procfs_path}/{self.pid}/fd/{fd}"
2198 try:
2199 path = readlink(file)
2200 except (FileNotFoundError, ProcessLookupError):
2201 # ENOENT == file which is gone in the meantime
2202 hit_enoent = True
2203 continue
2204 except OSError as err:
2205 if err.errno == errno.EINVAL:
2206 # not a link
2207 continue
2208 if err.errno == errno.ENAMETOOLONG:
2209 # file name too long
2210 debug(err)
2211 continue
2212 raise
2213 else:
2214 # If path is not an absolute there's no way to tell
2215 # whether it's a regular file or not, so we skip it.
2216 # A regular file is always supposed to be have an
2217 # absolute path though.
2218 if path.startswith('/') and isfile_strict(path):
2219 # Get file position and flags.
2220 file = f"{self._procfs_path}/{self.pid}/fdinfo/{fd}"
2221 try:
2222 with open_binary(file) as f:
2223 pos = int(f.readline().split()[1])
2224 flags = int(f.readline().split()[1], 8)
2225 except (FileNotFoundError, ProcessLookupError):
2226 # fd gone in the meantime; process may
2227 # still be alive
2228 hit_enoent = True
2229 else:
2230 mode = file_flags_to_mode(flags)
2231 ntuple = ntp.popenfile(
2232 path, int(fd), int(pos), mode, flags
2233 )
2234 retlist.append(ntuple)
2235 if hit_enoent:
2236 self._raise_if_not_alive()
2237 return retlist
2238
2239 @wrap_exceptions
2240 def net_connections(self, kind='inet'):
2241 ret = _net_connections.retrieve(kind, self.pid)
2242 self._raise_if_not_alive()
2243 return ret
2244
2245 @wrap_exceptions
2246 def num_fds(self):
2247 return len(os.listdir(f"{self._procfs_path}/{self.pid}/fd"))
2248
2249 @wrap_exceptions
2250 def ppid(self):
2251 return int(self._parse_stat_file()['ppid'])
2252
2253 @wrap_exceptions
2254 def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')):
2255 data = self._read_status_file()
2256 real, effective, saved = _uids_re.findall(data)[0]
2257 return ntp.puids(int(real), int(effective), int(saved))
2258
2259 @wrap_exceptions
2260 def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')):
2261 data = self._read_status_file()
2262 real, effective, saved = _gids_re.findall(data)[0]
2263 return ntp.pgids(int(real), int(effective), int(saved))