Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/psutil/_pslinux.py: 21%
1244 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-07 06:35 +0000
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-07 06:35 +0000
1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
5"""Linux platform implementation."""
7from __future__ import division
9import base64
10import collections
11import errno
12import functools
13import glob
14import os
15import re
16import socket
17import struct
18import sys
19import traceback
20import warnings
21from collections import defaultdict
22from collections import namedtuple
24from . import _common
25from . import _psposix
26from . import _psutil_linux as cext
27from . import _psutil_posix as cext_posix
28from ._common import NIC_DUPLEX_FULL
29from ._common import NIC_DUPLEX_HALF
30from ._common import NIC_DUPLEX_UNKNOWN
31from ._common import AccessDenied
32from ._common import NoSuchProcess
33from ._common import ZombieProcess
34from ._common import bcat
35from ._common import cat
36from ._common import debug
37from ._common import decode
38from ._common import get_procfs_path
39from ._common import isfile_strict
40from ._common import memoize
41from ._common import memoize_when_activated
42from ._common import open_binary
43from ._common import open_text
44from ._common import parse_environ_block
45from ._common import path_exists_strict
46from ._common import supports_ipv6
47from ._common import usage_percent
48from ._compat import PY3
49from ._compat import FileNotFoundError
50from ._compat import PermissionError
51from ._compat import ProcessLookupError
52from ._compat import b
53from ._compat import basestring
56if sys.version_info >= (3, 4):
57 import enum
58else:
59 enum = None
62__extra__all__ = [
63 #
64 'PROCFS_PATH',
65 # io prio constants
66 "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
67 "IOPRIO_CLASS_IDLE",
68 # connection status constants
69 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
70 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
71 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", ]
74# =====================================================================
75# --- globals
76# =====================================================================
79POWER_SUPPLY_PATH = "/sys/class/power_supply"
80HAS_PROC_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid())
81HAS_PROC_SMAPS_ROLLUP = os.path.exists('/proc/%s/smaps_rollup' % os.getpid())
82HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get")
83HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get")
85# Number of clock ticks per second
86CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
87PAGESIZE = cext_posix.getpagesize()
88BOOT_TIME = None # set later
89LITTLE_ENDIAN = sys.byteorder == 'little'
91# "man iostat" states that sectors are equivalent with blocks and have
92# a size of 512 bytes. Despite this value can be queried at runtime
93# via /sys/block/{DISK}/queue/hw_sector_size and results may vary
94# between 1k, 2k, or 4k... 512 appears to be a magic constant used
95# throughout Linux source code:
96# * https://stackoverflow.com/a/38136179/376587
97# * https://lists.gt.net/linux/kernel/2241060
98# * https://github.com/giampaolo/psutil/issues/1305
99# * https://github.com/torvalds/linux/blob/
100# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99
101# * https://lkml.org/lkml/2015/8/17/234
102DISK_SECTOR_SIZE = 512
104if enum is None:
105 AF_LINK = socket.AF_PACKET
106else:
107 AddressFamily = enum.IntEnum('AddressFamily',
108 {'AF_LINK': int(socket.AF_PACKET)})
109 AF_LINK = AddressFamily.AF_LINK
111# ioprio_* constants http://linux.die.net/man/2/ioprio_get
112if enum is None:
113 IOPRIO_CLASS_NONE = 0
114 IOPRIO_CLASS_RT = 1
115 IOPRIO_CLASS_BE = 2
116 IOPRIO_CLASS_IDLE = 3
117else:
118 class IOPriority(enum.IntEnum):
119 IOPRIO_CLASS_NONE = 0
120 IOPRIO_CLASS_RT = 1
121 IOPRIO_CLASS_BE = 2
122 IOPRIO_CLASS_IDLE = 3
124 globals().update(IOPriority.__members__)
126# See:
127# https://github.com/torvalds/linux/blame/master/fs/proc/array.c
128# ...and (TASK_* constants):
129# https://github.com/torvalds/linux/blob/master/include/linux/sched.h
130PROC_STATUSES = {
131 "R": _common.STATUS_RUNNING,
132 "S": _common.STATUS_SLEEPING,
133 "D": _common.STATUS_DISK_SLEEP,
134 "T": _common.STATUS_STOPPED,
135 "t": _common.STATUS_TRACING_STOP,
136 "Z": _common.STATUS_ZOMBIE,
137 "X": _common.STATUS_DEAD,
138 "x": _common.STATUS_DEAD,
139 "K": _common.STATUS_WAKE_KILL,
140 "W": _common.STATUS_WAKING,
141 "I": _common.STATUS_IDLE,
142 "P": _common.STATUS_PARKED,
143}
145# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h
146TCP_STATUSES = {
147 "01": _common.CONN_ESTABLISHED,
148 "02": _common.CONN_SYN_SENT,
149 "03": _common.CONN_SYN_RECV,
150 "04": _common.CONN_FIN_WAIT1,
151 "05": _common.CONN_FIN_WAIT2,
152 "06": _common.CONN_TIME_WAIT,
153 "07": _common.CONN_CLOSE,
154 "08": _common.CONN_CLOSE_WAIT,
155 "09": _common.CONN_LAST_ACK,
156 "0A": _common.CONN_LISTEN,
157 "0B": _common.CONN_CLOSING
158}
161# =====================================================================
162# --- named tuples
163# =====================================================================
166# psutil.virtual_memory()
167svmem = namedtuple(
168 'svmem', ['total', 'available', 'percent', 'used', 'free',
169 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab'])
170# psutil.disk_io_counters()
171sdiskio = namedtuple(
172 'sdiskio', ['read_count', 'write_count',
173 'read_bytes', 'write_bytes',
174 'read_time', 'write_time',
175 'read_merged_count', 'write_merged_count',
176 'busy_time'])
177# psutil.Process().open_files()
178popenfile = namedtuple(
179 'popenfile', ['path', 'fd', 'position', 'mode', 'flags'])
180# psutil.Process().memory_info()
181pmem = namedtuple('pmem', 'rss vms shared text lib data dirty')
182# psutil.Process().memory_full_info()
183pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap'))
184# psutil.Process().memory_maps(grouped=True)
185pmmap_grouped = namedtuple(
186 'pmmap_grouped',
187 ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty',
188 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap'])
189# psutil.Process().memory_maps(grouped=False)
190pmmap_ext = namedtuple(
191 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
192# psutil.Process.io_counters()
193pio = namedtuple('pio', ['read_count', 'write_count',
194 'read_bytes', 'write_bytes',
195 'read_chars', 'write_chars'])
196# psutil.Process.cpu_times()
197pcputimes = namedtuple('pcputimes',
198 ['user', 'system', 'children_user', 'children_system',
199 'iowait'])
202# =====================================================================
203# --- utils
204# =====================================================================
207def readlink(path):
208 """Wrapper around os.readlink()."""
209 assert isinstance(path, basestring), path
210 path = os.readlink(path)
211 # readlink() might return paths containing null bytes ('\x00')
212 # resulting in "TypeError: must be encoded string without NULL
213 # bytes, not str" errors when the string is passed to other
214 # fs-related functions (os.*, open(), ...).
215 # Apparently everything after '\x00' is garbage (we can have
216 # ' (deleted)', 'new' and possibly others), see:
217 # https://github.com/giampaolo/psutil/issues/717
218 path = path.split('\x00')[0]
219 # Certain paths have ' (deleted)' appended. Usually this is
220 # bogus as the file actually exists. Even if it doesn't we
221 # don't care.
222 if path.endswith(' (deleted)') and not path_exists_strict(path):
223 path = path[:-10]
224 return path
227def file_flags_to_mode(flags):
228 """Convert file's open() flags into a readable string.
229 Used by Process.open_files().
230 """
231 modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
232 mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
233 if flags & os.O_APPEND:
234 mode = mode.replace('w', 'a', 1)
235 mode = mode.replace('w+', 'r+')
236 # possible values: r, w, a, r+, a+
237 return mode
240def is_storage_device(name):
241 """Return True if the given name refers to a root device (e.g.
242 "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1",
243 "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram")
244 return True.
245 """
246 # Re-adapted from iostat source code, see:
247 # https://github.com/sysstat/sysstat/blob/
248 # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208
249 # Some devices may have a slash in their name (e.g. cciss/c0d0...).
250 name = name.replace('/', '!')
251 including_virtual = True
252 if including_virtual:
253 path = "/sys/block/%s" % name
254 else:
255 path = "/sys/block/%s/device" % name
256 return os.access(path, os.F_OK)
259@memoize
260def set_scputimes_ntuple(procfs_path):
261 """Set a namedtuple of variable fields depending on the CPU times
262 available on this Linux kernel version which may be:
263 (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
264 [guest_nice]]])
265 Used by cpu_times() function.
266 """
267 global scputimes
268 with open_binary('%s/stat' % procfs_path) as f:
269 values = f.readline().split()[1:]
270 fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
271 vlen = len(values)
272 if vlen >= 8:
273 # Linux >= 2.6.11
274 fields.append('steal')
275 if vlen >= 9:
276 # Linux >= 2.6.24
277 fields.append('guest')
278 if vlen >= 10:
279 # Linux >= 3.2.0
280 fields.append('guest_nice')
281 scputimes = namedtuple('scputimes', fields)
284try:
285 set_scputimes_ntuple("/proc")
286except Exception: # pragma: no cover
287 # Don't want to crash at import time.
288 traceback.print_exc()
289 scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0)
292# =====================================================================
293# --- prlimit
294# =====================================================================
296# Backport of resource.prlimit() for Python 2. Originally this was done
297# in C, but CentOS-6 which we use to create manylinux wheels is too old
298# and does not support prlimit() syscall. As such the resulting wheel
299# would not include prlimit(), even when installed on newer systems.
300# This is the only part of psutil using ctypes.
302prlimit = None
303try:
304 from resource import prlimit # python >= 3.4
305except ImportError:
306 import ctypes
308 libc = ctypes.CDLL(None, use_errno=True)
310 if hasattr(libc, "prlimit"):
312 def prlimit(pid, resource_, limits=None):
313 class StructRlimit(ctypes.Structure):
314 _fields_ = [('rlim_cur', ctypes.c_longlong),
315 ('rlim_max', ctypes.c_longlong)]
317 current = StructRlimit()
318 if limits is None:
319 # get
320 ret = libc.prlimit(pid, resource_, None, ctypes.byref(current))
321 else:
322 # set
323 new = StructRlimit()
324 new.rlim_cur = limits[0]
325 new.rlim_max = limits[1]
326 ret = libc.prlimit(
327 pid, resource_, ctypes.byref(new), ctypes.byref(current))
329 if ret != 0:
330 errno_ = ctypes.get_errno()
331 raise OSError(errno_, os.strerror(errno_))
332 return (current.rlim_cur, current.rlim_max)
335if prlimit is not None:
336 __extra__all__.extend(
337 [x for x in dir(cext) if x.startswith('RLIM') and x.isupper()])
340# =====================================================================
341# --- system memory
342# =====================================================================
345def calculate_avail_vmem(mems):
346 """Fallback for kernels < 3.14 where /proc/meminfo does not provide
347 "MemAvailable", see:
348 https://blog.famzah.net/2014/09/24/
350 This code reimplements the algorithm outlined here:
351 https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
352 commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
354 We use this function also when "MemAvailable" returns 0 (possibly a
355 kernel bug, see: https://github.com/giampaolo/psutil/issues/1915).
356 In that case this routine matches "free" CLI tool result ("available"
357 column).
359 XXX: on recent kernels this calculation may differ by ~1.5% compared
360 to "MemAvailable:", as it's calculated slightly differently.
361 It is still way more realistic than doing (free + cached) though.
362 See:
363 * https://gitlab.com/procps-ng/procps/issues/42
364 * https://github.com/famzah/linux-memavailable-procfs/issues/2
365 """
366 # Note about "fallback" value. According to:
367 # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
368 # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
369 # ...long ago "available" memory was calculated as (free + cached),
370 # We use fallback when one of these is missing from /proc/meminfo:
371 # "Active(file)": introduced in 2.6.28 / Dec 2008
372 # "Inactive(file)": introduced in 2.6.28 / Dec 2008
373 # "SReclaimable": introduced in 2.6.19 / Nov 2006
374 # /proc/zoneinfo: introduced in 2.6.13 / Aug 2005
375 free = mems[b'MemFree:']
376 fallback = free + mems.get(b"Cached:", 0)
377 try:
378 lru_active_file = mems[b'Active(file):']
379 lru_inactive_file = mems[b'Inactive(file):']
380 slab_reclaimable = mems[b'SReclaimable:']
381 except KeyError as err:
382 debug("%r is missing from /proc/meminfo; using an approximation "
383 "for calculating available memory" % err.args[0])
384 return fallback
385 try:
386 f = open_binary('%s/zoneinfo' % get_procfs_path())
387 except IOError:
388 return fallback # kernel 2.6.13
390 watermark_low = 0
391 with f:
392 for line in f:
393 line = line.strip()
394 if line.startswith(b'low'):
395 watermark_low += int(line.split()[1])
396 watermark_low *= PAGESIZE
398 avail = free - watermark_low
399 pagecache = lru_active_file + lru_inactive_file
400 pagecache -= min(pagecache / 2, watermark_low)
401 avail += pagecache
402 avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low)
403 return int(avail)
406def virtual_memory():
407 """Report virtual memory stats.
408 This implementation mimicks procps-ng-3.3.12, aka "free" CLI tool:
409 https://gitlab.com/procps-ng/procps/blob/
410 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L778-791
411 The returned values are supposed to match both "free" and "vmstat -s"
412 CLI tools.
413 """
414 missing_fields = []
415 mems = {}
416 with open_binary('%s/meminfo' % get_procfs_path()) as f:
417 for line in f:
418 fields = line.split()
419 mems[fields[0]] = int(fields[1]) * 1024
421 # /proc doc states that the available fields in /proc/meminfo vary
422 # by architecture and compile options, but these 3 values are also
423 # returned by sysinfo(2); as such we assume they are always there.
424 total = mems[b'MemTotal:']
425 free = mems[b'MemFree:']
426 try:
427 buffers = mems[b'Buffers:']
428 except KeyError:
429 # https://github.com/giampaolo/psutil/issues/1010
430 buffers = 0
431 missing_fields.append('buffers')
432 try:
433 cached = mems[b"Cached:"]
434 except KeyError:
435 cached = 0
436 missing_fields.append('cached')
437 else:
438 # "free" cmdline utility sums reclaimable to cached.
439 # Older versions of procps used to add slab memory instead.
440 # This got changed in:
441 # https://gitlab.com/procps-ng/procps/commit/
442 # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
443 cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19
445 try:
446 shared = mems[b'Shmem:'] # since kernel 2.6.32
447 except KeyError:
448 try:
449 shared = mems[b'MemShared:'] # kernels 2.4
450 except KeyError:
451 shared = 0
452 missing_fields.append('shared')
454 try:
455 active = mems[b"Active:"]
456 except KeyError:
457 active = 0
458 missing_fields.append('active')
460 try:
461 inactive = mems[b"Inactive:"]
462 except KeyError:
463 try:
464 inactive = \
465 mems[b"Inact_dirty:"] + \
466 mems[b"Inact_clean:"] + \
467 mems[b"Inact_laundry:"]
468 except KeyError:
469 inactive = 0
470 missing_fields.append('inactive')
472 try:
473 slab = mems[b"Slab:"]
474 except KeyError:
475 slab = 0
477 used = total - free - cached - buffers
478 if used < 0:
479 # May be symptomatic of running within a LCX container where such
480 # values will be dramatically distorted over those of the host.
481 used = total - free
483 # - starting from 4.4.0 we match free's "available" column.
484 # Before 4.4.0 we calculated it as (free + buffers + cached)
485 # which matched htop.
486 # - free and htop available memory differs as per:
487 # http://askubuntu.com/a/369589
488 # http://unix.stackexchange.com/a/65852/168884
489 # - MemAvailable has been introduced in kernel 3.14
490 try:
491 avail = mems[b'MemAvailable:']
492 except KeyError:
493 avail = calculate_avail_vmem(mems)
494 else:
495 if avail == 0:
496 # Yes, it can happen (probably a kernel bug):
497 # https://github.com/giampaolo/psutil/issues/1915
498 # In this case "free" CLI tool makes an estimate. We do the same,
499 # and it matches "free" CLI tool.
500 avail = calculate_avail_vmem(mems)
502 if avail < 0:
503 avail = 0
504 missing_fields.append('available')
505 elif avail > total:
506 # If avail is greater than total or our calculation overflows,
507 # that's symptomatic of running within a LCX container where such
508 # values will be dramatically distorted over those of the host.
509 # https://gitlab.com/procps-ng/procps/blob/
510 # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764
511 avail = free
513 percent = usage_percent((total - avail), total, round_=1)
515 # Warn about missing metrics which are set to 0.
516 if missing_fields:
517 msg = "%s memory stats couldn't be determined and %s set to 0" % (
518 ", ".join(missing_fields),
519 "was" if len(missing_fields) == 1 else "were")
520 warnings.warn(msg, RuntimeWarning, stacklevel=2)
522 return svmem(total, avail, percent, used, free,
523 active, inactive, buffers, cached, shared, slab)
526def swap_memory():
527 """Return swap memory metrics."""
528 mems = {}
529 with open_binary('%s/meminfo' % get_procfs_path()) as f:
530 for line in f:
531 fields = line.split()
532 mems[fields[0]] = int(fields[1]) * 1024
533 # We prefer /proc/meminfo over sysinfo() syscall so that
534 # psutil.PROCFS_PATH can be used in order to allow retrieval
535 # for linux containers, see:
536 # https://github.com/giampaolo/psutil/issues/1015
537 try:
538 total = mems[b'SwapTotal:']
539 free = mems[b'SwapFree:']
540 except KeyError:
541 _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo()
542 total *= unit_multiplier
543 free *= unit_multiplier
545 used = total - free
546 percent = usage_percent(used, total, round_=1)
547 # get pgin/pgouts
548 try:
549 f = open_binary("%s/vmstat" % get_procfs_path())
550 except IOError as err:
551 # see https://github.com/giampaolo/psutil/issues/722
552 msg = "'sin' and 'sout' swap memory stats couldn't " \
553 "be determined and were set to 0 (%s)" % str(err)
554 warnings.warn(msg, RuntimeWarning, stacklevel=2)
555 sin = sout = 0
556 else:
557 with f:
558 sin = sout = None
559 for line in f:
560 # values are expressed in 4 kilo bytes, we want
561 # bytes instead
562 if line.startswith(b'pswpin'):
563 sin = int(line.split(b' ')[1]) * 4 * 1024
564 elif line.startswith(b'pswpout'):
565 sout = int(line.split(b' ')[1]) * 4 * 1024
566 if sin is not None and sout is not None:
567 break
568 else:
569 # we might get here when dealing with exotic Linux
570 # flavors, see:
571 # https://github.com/giampaolo/psutil/issues/313
572 msg = "'sin' and 'sout' swap memory stats couldn't " \
573 "be determined and were set to 0"
574 warnings.warn(msg, RuntimeWarning, stacklevel=2)
575 sin = sout = 0
576 return _common.sswap(total, used, free, percent, sin, sout)
579# =====================================================================
580# --- CPU
581# =====================================================================
584def cpu_times():
585 """Return a named tuple representing the following system-wide
586 CPU times:
587 (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
588 [guest_nice]]])
589 Last 3 fields may not be available on all Linux kernel versions.
590 """
591 procfs_path = get_procfs_path()
592 set_scputimes_ntuple(procfs_path)
593 with open_binary('%s/stat' % procfs_path) as f:
594 values = f.readline().split()
595 fields = values[1:len(scputimes._fields) + 1]
596 fields = [float(x) / CLOCK_TICKS for x in fields]
597 return scputimes(*fields)
600def per_cpu_times():
601 """Return a list of namedtuple representing the CPU times
602 for every CPU available on the system.
603 """
604 procfs_path = get_procfs_path()
605 set_scputimes_ntuple(procfs_path)
606 cpus = []
607 with open_binary('%s/stat' % procfs_path) as f:
608 # get rid of the first line which refers to system wide CPU stats
609 f.readline()
610 for line in f:
611 if line.startswith(b'cpu'):
612 values = line.split()
613 fields = values[1:len(scputimes._fields) + 1]
614 fields = [float(x) / CLOCK_TICKS for x in fields]
615 entry = scputimes(*fields)
616 cpus.append(entry)
617 return cpus
620def cpu_count_logical():
621 """Return the number of logical CPUs in the system."""
622 try:
623 return os.sysconf("SC_NPROCESSORS_ONLN")
624 except ValueError:
625 # as a second fallback we try to parse /proc/cpuinfo
626 num = 0
627 with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
628 for line in f:
629 if line.lower().startswith(b'processor'):
630 num += 1
632 # unknown format (e.g. amrel/sparc architectures), see:
633 # https://github.com/giampaolo/psutil/issues/200
634 # try to parse /proc/stat as a last resort
635 if num == 0:
636 search = re.compile(r'cpu\d')
637 with open_text('%s/stat' % get_procfs_path()) as f:
638 for line in f:
639 line = line.split(' ')[0]
640 if search.match(line):
641 num += 1
643 if num == 0:
644 # mimic os.cpu_count()
645 return None
646 return num
649def cpu_count_cores():
650 """Return the number of CPU cores in the system."""
651 # Method #1
652 ls = set()
653 # These 2 files are the same but */core_cpus_list is newer while
654 # */thread_siblings_list is deprecated and may disappear in the future.
655 # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst
656 # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964
657 # https://lkml.org/lkml/2019/2/26/41
658 p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list"
659 p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"
660 for path in glob.glob(p1) or glob.glob(p2):
661 with open_binary(path) as f:
662 ls.add(f.read().strip())
663 result = len(ls)
664 if result != 0:
665 return result
667 # Method #2
668 mapping = {}
669 current_info = {}
670 with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
671 for line in f:
672 line = line.strip().lower()
673 if not line:
674 # new section
675 try:
676 mapping[current_info[b'physical id']] = \
677 current_info[b'cpu cores']
678 except KeyError:
679 pass
680 current_info = {}
681 else:
682 # ongoing section
683 if line.startswith((b'physical id', b'cpu cores')):
684 key, value = line.split(b'\t:', 1)
685 current_info[key] = int(value)
687 result = sum(mapping.values())
688 return result or None # mimic os.cpu_count()
691def cpu_stats():
692 """Return various CPU stats as a named tuple."""
693 with open_binary('%s/stat' % get_procfs_path()) as f:
694 ctx_switches = None
695 interrupts = None
696 soft_interrupts = None
697 for line in f:
698 if line.startswith(b'ctxt'):
699 ctx_switches = int(line.split()[1])
700 elif line.startswith(b'intr'):
701 interrupts = int(line.split()[1])
702 elif line.startswith(b'softirq'):
703 soft_interrupts = int(line.split()[1])
704 if ctx_switches is not None and soft_interrupts is not None \
705 and interrupts is not None:
706 break
707 syscalls = 0
708 return _common.scpustats(
709 ctx_switches, interrupts, soft_interrupts, syscalls)
712def _cpu_get_cpuinfo_freq():
713 """Return current CPU frequency from cpuinfo if available.
714 """
715 ret = []
716 with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
717 for line in f:
718 if line.lower().startswith(b'cpu mhz'):
719 ret.append(float(line.split(b':', 1)[1]))
720 return ret
723if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or \
724 os.path.exists("/sys/devices/system/cpu/cpu0/cpufreq"):
725 def cpu_freq():
726 """Return frequency metrics for all CPUs.
727 Contrarily to other OSes, Linux updates these values in
728 real-time.
729 """
730 cpuinfo_freqs = _cpu_get_cpuinfo_freq()
731 paths = \
732 glob.glob("/sys/devices/system/cpu/cpufreq/policy[0-9]*") or \
733 glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq")
734 paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group()))
735 ret = []
736 pjoin = os.path.join
737 for i, path in enumerate(paths):
738 if len(paths) == len(cpuinfo_freqs):
739 # take cached value from cpuinfo if available, see:
740 # https://github.com/giampaolo/psutil/issues/1851
741 curr = cpuinfo_freqs[i] * 1000
742 else:
743 curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None)
744 if curr is None:
745 # Likely an old RedHat, see:
746 # https://github.com/giampaolo/psutil/issues/1071
747 curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None)
748 if curr is None:
749 raise NotImplementedError(
750 "can't find current frequency file")
751 curr = int(curr) / 1000
752 max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000
753 min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000
754 ret.append(_common.scpufreq(curr, min_, max_))
755 return ret
757else:
758 def cpu_freq():
759 """Alternate implementation using /proc/cpuinfo.
760 min and max frequencies are not available and are set to None.
761 """
762 return [_common.scpufreq(x, 0., 0.) for x in _cpu_get_cpuinfo_freq()]
765# =====================================================================
766# --- network
767# =====================================================================
770net_if_addrs = cext_posix.net_if_addrs
773class _Ipv6UnsupportedError(Exception):
774 pass
777class Connections:
778 """A wrapper on top of /proc/net/* files, retrieving per-process
779 and system-wide open connections (TCP, UDP, UNIX) similarly to
780 "netstat -an".
782 Note: in case of UNIX sockets we're only able to determine the
783 local endpoint/path, not the one it's connected to.
784 According to [1] it would be possible but not easily.
786 [1] http://serverfault.com/a/417946
787 """
789 def __init__(self):
790 # The string represents the basename of the corresponding
791 # /proc/net/{proto_name} file.
792 tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
793 tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
794 udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
795 udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
796 unix = ("unix", socket.AF_UNIX, None)
797 self.tmap = {
798 "all": (tcp4, tcp6, udp4, udp6, unix),
799 "tcp": (tcp4, tcp6),
800 "tcp4": (tcp4,),
801 "tcp6": (tcp6,),
802 "udp": (udp4, udp6),
803 "udp4": (udp4,),
804 "udp6": (udp6,),
805 "unix": (unix,),
806 "inet": (tcp4, tcp6, udp4, udp6),
807 "inet4": (tcp4, udp4),
808 "inet6": (tcp6, udp6),
809 }
810 self._procfs_path = None
812 def get_proc_inodes(self, pid):
813 inodes = defaultdict(list)
814 for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)):
815 try:
816 inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd))
817 except (FileNotFoundError, ProcessLookupError):
818 # ENOENT == file which is gone in the meantime;
819 # os.stat('/proc/%s' % self.pid) will be done later
820 # to force NSP (if it's the case)
821 continue
822 except OSError as err:
823 if err.errno == errno.EINVAL:
824 # not a link
825 continue
826 if err.errno == errno.ENAMETOOLONG:
827 # file name too long
828 debug(err)
829 continue
830 raise
831 else:
832 if inode.startswith('socket:['):
833 # the process is using a socket
834 inode = inode[8:][:-1]
835 inodes[inode].append((pid, int(fd)))
836 return inodes
838 def get_all_inodes(self):
839 inodes = {}
840 for pid in pids():
841 try:
842 inodes.update(self.get_proc_inodes(pid))
843 except (FileNotFoundError, ProcessLookupError, PermissionError):
844 # os.listdir() is gonna raise a lot of access denied
845 # exceptions in case of unprivileged user; that's fine
846 # as we'll just end up returning a connection with PID
847 # and fd set to None anyway.
848 # Both netstat -an and lsof does the same so it's
849 # unlikely we can do any better.
850 # ENOENT just means a PID disappeared on us.
851 continue
852 return inodes
854 @staticmethod
855 def decode_address(addr, family):
856 """Accept an "ip:port" address as displayed in /proc/net/*
857 and convert it into a human readable form, like:
859 "0500000A:0016" -> ("10.0.0.5", 22)
860 "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
862 The IP address portion is a little or big endian four-byte
863 hexadecimal number; that is, the least significant byte is listed
864 first, so we need to reverse the order of the bytes to convert it
865 to an IP address.
866 The port is represented as a two-byte hexadecimal number.
868 Reference:
869 http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
870 """
871 ip, port = addr.split(':')
872 port = int(port, 16)
873 # this usually refers to a local socket in listen mode with
874 # no end-points connected
875 if not port:
876 return ()
877 if PY3:
878 ip = ip.encode('ascii')
879 if family == socket.AF_INET:
880 # see: https://github.com/giampaolo/psutil/issues/201
881 if LITTLE_ENDIAN:
882 ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
883 else:
884 ip = socket.inet_ntop(family, base64.b16decode(ip))
885 else: # IPv6
886 ip = base64.b16decode(ip)
887 try:
888 # see: https://github.com/giampaolo/psutil/issues/201
889 if LITTLE_ENDIAN:
890 ip = socket.inet_ntop(
891 socket.AF_INET6,
892 struct.pack('>4I', *struct.unpack('<4I', ip)))
893 else:
894 ip = socket.inet_ntop(
895 socket.AF_INET6,
896 struct.pack('<4I', *struct.unpack('<4I', ip)))
897 except ValueError:
898 # see: https://github.com/giampaolo/psutil/issues/623
899 if not supports_ipv6():
900 raise _Ipv6UnsupportedError
901 else:
902 raise
903 return _common.addr(ip, port)
905 @staticmethod
906 def process_inet(file, family, type_, inodes, filter_pid=None):
907 """Parse /proc/net/tcp* and /proc/net/udp* files."""
908 if file.endswith('6') and not os.path.exists(file):
909 # IPv6 not supported
910 return
911 with open_text(file) as f:
912 f.readline() # skip the first line
913 for lineno, line in enumerate(f, 1):
914 try:
915 _, laddr, raddr, status, _, _, _, _, _, inode = \
916 line.split()[:10]
917 except ValueError:
918 raise RuntimeError(
919 "error while parsing %s; malformed line %s %r" % (
920 file, lineno, line))
921 if inode in inodes:
922 # # We assume inet sockets are unique, so we error
923 # # out if there are multiple references to the
924 # # same inode. We won't do this for UNIX sockets.
925 # if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
926 # raise ValueError("ambiguous inode with multiple "
927 # "PIDs references")
928 pid, fd = inodes[inode][0]
929 else:
930 pid, fd = None, -1
931 if filter_pid is not None and filter_pid != pid:
932 continue
933 else:
934 if type_ == socket.SOCK_STREAM:
935 status = TCP_STATUSES[status]
936 else:
937 status = _common.CONN_NONE
938 try:
939 laddr = Connections.decode_address(laddr, family)
940 raddr = Connections.decode_address(raddr, family)
941 except _Ipv6UnsupportedError:
942 continue
943 yield (fd, family, type_, laddr, raddr, status, pid)
945 @staticmethod
946 def process_unix(file, family, inodes, filter_pid=None):
947 """Parse /proc/net/unix files."""
948 with open_text(file) as f:
949 f.readline() # skip the first line
950 for line in f:
951 tokens = line.split()
952 try:
953 _, _, _, _, type_, _, inode = tokens[0:7]
954 except ValueError:
955 if ' ' not in line:
956 # see: https://github.com/giampaolo/psutil/issues/766
957 continue
958 raise RuntimeError(
959 "error while parsing %s; malformed line %r" % (
960 file, line))
961 if inode in inodes:
962 # With UNIX sockets we can have a single inode
963 # referencing many file descriptors.
964 pairs = inodes[inode]
965 else:
966 pairs = [(None, -1)]
967 for pid, fd in pairs:
968 if filter_pid is not None and filter_pid != pid:
969 continue
970 else:
971 if len(tokens) == 8:
972 path = tokens[-1]
973 else:
974 path = ""
975 type_ = _common.socktype_to_enum(int(type_))
976 # XXX: determining the remote endpoint of a
977 # UNIX socket on Linux is not possible, see:
978 # https://serverfault.com/questions/252723/
979 raddr = ""
980 status = _common.CONN_NONE
981 yield (fd, family, type_, path, raddr, status, pid)
983 def retrieve(self, kind, pid=None):
984 if kind not in self.tmap:
985 raise ValueError("invalid %r kind argument; choose between %s"
986 % (kind, ', '.join([repr(x) for x in self.tmap])))
987 self._procfs_path = get_procfs_path()
988 if pid is not None:
989 inodes = self.get_proc_inodes(pid)
990 if not inodes:
991 # no connections for this process
992 return []
993 else:
994 inodes = self.get_all_inodes()
995 ret = set()
996 for proto_name, family, type_ in self.tmap[kind]:
997 path = "%s/net/%s" % (self._procfs_path, proto_name)
998 if family in (socket.AF_INET, socket.AF_INET6):
999 ls = self.process_inet(
1000 path, family, type_, inodes, filter_pid=pid)
1001 else:
1002 ls = self.process_unix(
1003 path, family, inodes, filter_pid=pid)
1004 for fd, family, type_, laddr, raddr, status, bound_pid in ls:
1005 if pid:
1006 conn = _common.pconn(fd, family, type_, laddr, raddr,
1007 status)
1008 else:
1009 conn = _common.sconn(fd, family, type_, laddr, raddr,
1010 status, bound_pid)
1011 ret.add(conn)
1012 return list(ret)
1015_connections = Connections()
1018def net_connections(kind='inet'):
1019 """Return system-wide open connections."""
1020 return _connections.retrieve(kind)
1023def net_io_counters():
1024 """Return network I/O statistics for every network interface
1025 installed on the system as a dict of raw tuples.
1026 """
1027 with open_text("%s/net/dev" % get_procfs_path()) as f:
1028 lines = f.readlines()
1029 retdict = {}
1030 for line in lines[2:]:
1031 colon = line.rfind(':')
1032 assert colon > 0, repr(line)
1033 name = line[:colon].strip()
1034 fields = line[colon + 1:].strip().split()
1036 # in
1037 (bytes_recv,
1038 packets_recv,
1039 errin,
1040 dropin,
1041 fifoin, # unused
1042 framein, # unused
1043 compressedin, # unused
1044 multicastin, # unused
1045 # out
1046 bytes_sent,
1047 packets_sent,
1048 errout,
1049 dropout,
1050 fifoout, # unused
1051 collisionsout, # unused
1052 carrierout, # unused
1053 compressedout) = map(int, fields)
1055 retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
1056 errin, errout, dropin, dropout)
1057 return retdict
1060def net_if_stats():
1061 """Get NIC stats (isup, duplex, speed, mtu)."""
1062 duplex_map = {cext.DUPLEX_FULL: NIC_DUPLEX_FULL,
1063 cext.DUPLEX_HALF: NIC_DUPLEX_HALF,
1064 cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN}
1065 names = net_io_counters().keys()
1066 ret = {}
1067 for name in names:
1068 try:
1069 mtu = cext_posix.net_if_mtu(name)
1070 flags = cext_posix.net_if_flags(name)
1071 duplex, speed = cext.net_if_duplex_speed(name)
1072 except OSError as err:
1073 # https://github.com/giampaolo/psutil/issues/1279
1074 if err.errno != errno.ENODEV:
1075 raise
1076 else:
1077 debug(err)
1078 else:
1079 output_flags = ','.join(flags)
1080 isup = 'running' in flags
1081 ret[name] = _common.snicstats(isup, duplex_map[duplex], speed, mtu,
1082 output_flags)
1083 return ret
1086# =====================================================================
1087# --- disks
1088# =====================================================================
1091disk_usage = _psposix.disk_usage
1094def disk_io_counters(perdisk=False):
1095 """Return disk I/O statistics for every disk installed on the
1096 system as a dict of raw tuples.
1097 """
1098 def read_procfs():
1099 # OK, this is a bit confusing. The format of /proc/diskstats can
1100 # have 3 variations.
1101 # On Linux 2.4 each line has always 15 fields, e.g.:
1102 # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8"
1103 # On Linux 2.6+ each line *usually* has 14 fields, and the disk
1104 # name is in another position, like this:
1105 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8"
1106 # ...unless (Linux 2.6) the line refers to a partition instead
1107 # of a disk, in which case the line has less fields (7):
1108 # "3 1 hda1 8 8 8 8"
1109 # 4.18+ has 4 fields added:
1110 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0"
1111 # 5.5 has 2 more fields.
1112 # See:
1113 # https://www.kernel.org/doc/Documentation/iostats.txt
1114 # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
1115 with open_text("%s/diskstats" % get_procfs_path()) as f:
1116 lines = f.readlines()
1117 for line in lines:
1118 fields = line.split()
1119 flen = len(fields)
1120 if flen == 15:
1121 # Linux 2.4
1122 name = fields[3]
1123 reads = int(fields[2])
1124 (reads_merged, rbytes, rtime, writes, writes_merged,
1125 wbytes, wtime, _, busy_time, _) = map(int, fields[4:14])
1126 elif flen == 14 or flen >= 18:
1127 # Linux 2.6+, line referring to a disk
1128 name = fields[2]
1129 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1130 wbytes, wtime, _, busy_time, _) = map(int, fields[3:14])
1131 elif flen == 7:
1132 # Linux 2.6+, line referring to a partition
1133 name = fields[2]
1134 reads, rbytes, writes, wbytes = map(int, fields[3:])
1135 rtime = wtime = reads_merged = writes_merged = busy_time = 0
1136 else:
1137 raise ValueError("not sure how to interpret line %r" % line)
1138 yield (name, reads, writes, rbytes, wbytes, rtime, wtime,
1139 reads_merged, writes_merged, busy_time)
1141 def read_sysfs():
1142 for block in os.listdir('/sys/block'):
1143 for root, _, files in os.walk(os.path.join('/sys/block', block)):
1144 if 'stat' not in files:
1145 continue
1146 with open_text(os.path.join(root, 'stat')) as f:
1147 fields = f.read().strip().split()
1148 name = os.path.basename(root)
1149 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1150 wbytes, wtime, _, busy_time) = map(int, fields[:10])
1151 yield (name, reads, writes, rbytes, wbytes, rtime,
1152 wtime, reads_merged, writes_merged, busy_time)
1154 if os.path.exists('%s/diskstats' % get_procfs_path()):
1155 gen = read_procfs()
1156 elif os.path.exists('/sys/block'):
1157 gen = read_sysfs()
1158 else:
1159 raise NotImplementedError(
1160 "%s/diskstats nor /sys/block filesystem are available on this "
1161 "system" % get_procfs_path())
1163 retdict = {}
1164 for entry in gen:
1165 (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged,
1166 writes_merged, busy_time) = entry
1167 if not perdisk and not is_storage_device(name):
1168 # perdisk=False means we want to calculate totals so we skip
1169 # partitions (e.g. 'sda1', 'nvme0n1p1') and only include
1170 # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks
1171 # include a total of all their partitions + some extra size
1172 # of their own:
1173 # $ cat /proc/diskstats
1174 # 259 0 sda 10485760 ...
1175 # 259 1 sda1 5186039 ...
1176 # 259 1 sda2 5082039 ...
1177 # See:
1178 # https://github.com/giampaolo/psutil/pull/1313
1179 continue
1181 rbytes *= DISK_SECTOR_SIZE
1182 wbytes *= DISK_SECTOR_SIZE
1183 retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime,
1184 reads_merged, writes_merged, busy_time)
1186 return retdict
1189class RootFsDeviceFinder:
1190 """disk_partitions() may return partitions with device == "/dev/root"
1191 or "rootfs". This container class uses different strategies to try to
1192 obtain the real device path. Resources:
1193 https://bootlin.com/blog/find-root-device/
1194 https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/
1195 """
1196 __slots__ = ['major', 'minor']
1198 def __init__(self):
1199 dev = os.stat("/").st_dev
1200 self.major = os.major(dev)
1201 self.minor = os.minor(dev)
1203 def ask_proc_partitions(self):
1204 with open_text("%s/partitions" % get_procfs_path()) as f:
1205 for line in f.readlines()[2:]:
1206 fields = line.split()
1207 if len(fields) < 4: # just for extra safety
1208 continue
1209 major = int(fields[0]) if fields[0].isdigit() else None
1210 minor = int(fields[1]) if fields[1].isdigit() else None
1211 name = fields[3]
1212 if major == self.major and minor == self.minor:
1213 if name: # just for extra safety
1214 return "/dev/%s" % name
1216 def ask_sys_dev_block(self):
1217 path = "/sys/dev/block/%s:%s/uevent" % (self.major, self.minor)
1218 with open_text(path) as f:
1219 for line in f:
1220 if line.startswith("DEVNAME="):
1221 name = line.strip().rpartition("DEVNAME=")[2]
1222 if name: # just for extra safety
1223 return "/dev/%s" % name
1225 def ask_sys_class_block(self):
1226 needle = "%s:%s" % (self.major, self.minor)
1227 files = glob.iglob("/sys/class/block/*/dev")
1228 for file in files:
1229 try:
1230 f = open_text(file)
1231 except FileNotFoundError: # race condition
1232 continue
1233 else:
1234 with f:
1235 data = f.read().strip()
1236 if data == needle:
1237 name = os.path.basename(os.path.dirname(file))
1238 return "/dev/%s" % name
1240 def find(self):
1241 path = None
1242 if path is None:
1243 try:
1244 path = self.ask_proc_partitions()
1245 except (IOError, OSError) as err:
1246 debug(err)
1247 if path is None:
1248 try:
1249 path = self.ask_sys_dev_block()
1250 except (IOError, OSError) as err:
1251 debug(err)
1252 if path is None:
1253 try:
1254 path = self.ask_sys_class_block()
1255 except (IOError, OSError) as err:
1256 debug(err)
1257 # We use exists() because the "/dev/*" part of the path is hard
1258 # coded, so we want to be sure.
1259 if path is not None and os.path.exists(path):
1260 return path
1263def disk_partitions(all=False):
1264 """Return mounted disk partitions as a list of namedtuples."""
1265 fstypes = set()
1266 procfs_path = get_procfs_path()
1267 if not all:
1268 with open_text("%s/filesystems" % procfs_path) as f:
1269 for line in f:
1270 line = line.strip()
1271 if not line.startswith("nodev"):
1272 fstypes.add(line.strip())
1273 else:
1274 # ignore all lines starting with "nodev" except "nodev zfs"
1275 fstype = line.split("\t")[1]
1276 if fstype == "zfs":
1277 fstypes.add("zfs")
1279 # See: https://github.com/giampaolo/psutil/issues/1307
1280 if procfs_path == "/proc" and os.path.isfile('/etc/mtab'):
1281 mounts_path = os.path.realpath("/etc/mtab")
1282 else:
1283 mounts_path = os.path.realpath("%s/self/mounts" % procfs_path)
1285 retlist = []
1286 partitions = cext.disk_partitions(mounts_path)
1287 for partition in partitions:
1288 device, mountpoint, fstype, opts = partition
1289 if device == 'none':
1290 device = ''
1291 if device in ("/dev/root", "rootfs"):
1292 device = RootFsDeviceFinder().find() or device
1293 if not all:
1294 if device == '' or fstype not in fstypes:
1295 continue
1296 maxfile = maxpath = None # set later
1297 ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
1298 maxfile, maxpath)
1299 retlist.append(ntuple)
1301 return retlist
1304# =====================================================================
1305# --- sensors
1306# =====================================================================
1309def sensors_temperatures():
1310 """Return hardware (CPU and others) temperatures as a dict
1311 including hardware name, label, current, max and critical
1312 temperatures.
1314 Implementation notes:
1315 - /sys/class/hwmon looks like the most recent interface to
1316 retrieve this info, and this implementation relies on it
1317 only (old distros will probably use something else)
1318 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1319 - /sys/class/thermal/thermal_zone* is another one but it's more
1320 difficult to parse
1321 """
1322 ret = collections.defaultdict(list)
1323 basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*')
1324 # CentOS has an intermediate /device directory:
1325 # https://github.com/giampaolo/psutil/issues/971
1326 # https://github.com/nicolargo/glances/issues/1060
1327 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*'))
1328 basenames = sorted(set([x.split('_')[0] for x in basenames]))
1330 # Only add the coretemp hwmon entries if they're not already in
1331 # /sys/class/hwmon/
1332 # https://github.com/giampaolo/psutil/issues/1708
1333 # https://github.com/giampaolo/psutil/pull/1648
1334 basenames2 = glob.glob(
1335 '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*')
1336 repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/')
1337 for name in basenames2:
1338 altname = repl.sub('/sys/class/hwmon/', name)
1339 if altname not in basenames:
1340 basenames.append(name)
1342 for base in basenames:
1343 try:
1344 path = base + '_input'
1345 current = float(bcat(path)) / 1000.0
1346 path = os.path.join(os.path.dirname(base), 'name')
1347 unit_name = cat(path).strip()
1348 except (IOError, OSError, ValueError):
1349 # A lot of things can go wrong here, so let's just skip the
1350 # whole entry. Sure thing is Linux's /sys/class/hwmon really
1351 # is a stinky broken mess.
1352 # https://github.com/giampaolo/psutil/issues/1009
1353 # https://github.com/giampaolo/psutil/issues/1101
1354 # https://github.com/giampaolo/psutil/issues/1129
1355 # https://github.com/giampaolo/psutil/issues/1245
1356 # https://github.com/giampaolo/psutil/issues/1323
1357 continue
1359 high = bcat(base + '_max', fallback=None)
1360 critical = bcat(base + '_crit', fallback=None)
1361 label = cat(base + '_label', fallback='').strip()
1363 if high is not None:
1364 try:
1365 high = float(high) / 1000.0
1366 except ValueError:
1367 high = None
1368 if critical is not None:
1369 try:
1370 critical = float(critical) / 1000.0
1371 except ValueError:
1372 critical = None
1374 ret[unit_name].append((label, current, high, critical))
1376 # Indication that no sensors were detected in /sys/class/hwmon/
1377 if not basenames:
1378 basenames = glob.glob('/sys/class/thermal/thermal_zone*')
1379 basenames = sorted(set(basenames))
1381 for base in basenames:
1382 try:
1383 path = os.path.join(base, 'temp')
1384 current = float(bcat(path)) / 1000.0
1385 path = os.path.join(base, 'type')
1386 unit_name = cat(path).strip()
1387 except (IOError, OSError, ValueError) as err:
1388 debug(err)
1389 continue
1391 trip_paths = glob.glob(base + '/trip_point*')
1392 trip_points = set(['_'.join(
1393 os.path.basename(p).split('_')[0:3]) for p in trip_paths])
1394 critical = None
1395 high = None
1396 for trip_point in trip_points:
1397 path = os.path.join(base, trip_point + "_type")
1398 trip_type = cat(path, fallback='').strip()
1399 if trip_type == 'critical':
1400 critical = bcat(os.path.join(base, trip_point + "_temp"),
1401 fallback=None)
1402 elif trip_type == 'high':
1403 high = bcat(os.path.join(base, trip_point + "_temp"),
1404 fallback=None)
1406 if high is not None:
1407 try:
1408 high = float(high) / 1000.0
1409 except ValueError:
1410 high = None
1411 if critical is not None:
1412 try:
1413 critical = float(critical) / 1000.0
1414 except ValueError:
1415 critical = None
1417 ret[unit_name].append(('', current, high, critical))
1419 return dict(ret)
1422def sensors_fans():
1423 """Return hardware fans info (for CPU and other peripherals) as a
1424 dict including hardware label and current speed.
1426 Implementation notes:
1427 - /sys/class/hwmon looks like the most recent interface to
1428 retrieve this info, and this implementation relies on it
1429 only (old distros will probably use something else)
1430 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1431 """
1432 ret = collections.defaultdict(list)
1433 basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
1434 if not basenames:
1435 # CentOS has an intermediate /device directory:
1436 # https://github.com/giampaolo/psutil/issues/971
1437 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')
1439 basenames = sorted(set([x.split('_')[0] for x in basenames]))
1440 for base in basenames:
1441 try:
1442 current = int(bcat(base + '_input'))
1443 except (IOError, OSError) as err:
1444 debug(err)
1445 continue
1446 unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip()
1447 label = cat(base + '_label', fallback='').strip()
1448 ret[unit_name].append(_common.sfan(label, current))
1450 return dict(ret)
1453def sensors_battery():
1454 """Return battery information.
1455 Implementation note: it appears /sys/class/power_supply/BAT0/
1456 directory structure may vary and provide files with the same
1457 meaning but under different names, see:
1458 https://github.com/giampaolo/psutil/issues/966
1459 """
1460 null = object()
1462 def multi_bcat(*paths):
1463 """Attempt to read the content of multiple files which may
1464 not exist. If none of them exist return None.
1465 """
1466 for path in paths:
1467 ret = bcat(path, fallback=null)
1468 if ret != null:
1469 try:
1470 return int(ret)
1471 except ValueError:
1472 return ret.strip()
1473 return None
1475 bats = [x for x in os.listdir(POWER_SUPPLY_PATH) if x.startswith('BAT') or
1476 'battery' in x.lower()]
1477 if not bats:
1478 return None
1479 # Get the first available battery. Usually this is "BAT0", except
1480 # some rare exceptions:
1481 # https://github.com/giampaolo/psutil/issues/1238
1482 root = os.path.join(POWER_SUPPLY_PATH, sorted(bats)[0])
1484 # Base metrics.
1485 energy_now = multi_bcat(
1486 root + "/energy_now",
1487 root + "/charge_now")
1488 power_now = multi_bcat(
1489 root + "/power_now",
1490 root + "/current_now")
1491 energy_full = multi_bcat(
1492 root + "/energy_full",
1493 root + "/charge_full")
1494 time_to_empty = multi_bcat(root + "/time_to_empty_now")
1496 # Percent. If we have energy_full the percentage will be more
1497 # accurate compared to reading /capacity file (float vs. int).
1498 if energy_full is not None and energy_now is not None:
1499 try:
1500 percent = 100.0 * energy_now / energy_full
1501 except ZeroDivisionError:
1502 percent = 0.0
1503 else:
1504 percent = int(cat(root + "/capacity", fallback=-1))
1505 if percent == -1:
1506 return None
1508 # Is AC power cable plugged in?
1509 # Note: AC0 is not always available and sometimes (e.g. CentOS7)
1510 # it's called "AC".
1511 power_plugged = None
1512 online = multi_bcat(
1513 os.path.join(POWER_SUPPLY_PATH, "AC0/online"),
1514 os.path.join(POWER_SUPPLY_PATH, "AC/online"))
1515 if online is not None:
1516 power_plugged = online == 1
1517 else:
1518 status = cat(root + "/status", fallback="").strip().lower()
1519 if status == "discharging":
1520 power_plugged = False
1521 elif status in ("charging", "full"):
1522 power_plugged = True
1524 # Seconds left.
1525 # Note to self: we may also calculate the charging ETA as per:
1526 # https://github.com/thialfihar/dotfiles/blob/
1527 # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55
1528 if power_plugged:
1529 secsleft = _common.POWER_TIME_UNLIMITED
1530 elif energy_now is not None and power_now is not None:
1531 try:
1532 secsleft = int(energy_now / power_now * 3600)
1533 except ZeroDivisionError:
1534 secsleft = _common.POWER_TIME_UNKNOWN
1535 elif time_to_empty is not None:
1536 secsleft = int(time_to_empty * 60)
1537 if secsleft < 0:
1538 secsleft = _common.POWER_TIME_UNKNOWN
1539 else:
1540 secsleft = _common.POWER_TIME_UNKNOWN
1542 return _common.sbattery(percent, secsleft, power_plugged)
1545# =====================================================================
1546# --- other system functions
1547# =====================================================================
1550def users():
1551 """Return currently connected users as a list of namedtuples."""
1552 retlist = []
1553 rawlist = cext.users()
1554 for item in rawlist:
1555 user, tty, hostname, tstamp, user_process, pid = item
1556 # note: the underlying C function includes entries about
1557 # system boot, run level and others. We might want
1558 # to use them in the future.
1559 if not user_process:
1560 continue
1561 if hostname in (':0.0', ':0'):
1562 hostname = 'localhost'
1563 nt = _common.suser(user, tty or None, hostname, tstamp, pid)
1564 retlist.append(nt)
1565 return retlist
1568def boot_time():
1569 """Return the system boot time expressed in seconds since the epoch."""
1570 global BOOT_TIME
1571 path = '%s/stat' % get_procfs_path()
1572 with open_binary(path) as f:
1573 for line in f:
1574 if line.startswith(b'btime'):
1575 ret = float(line.strip().split()[1])
1576 BOOT_TIME = ret
1577 return ret
1578 raise RuntimeError(
1579 "line 'btime' not found in %s" % path)
1582# =====================================================================
1583# --- processes
1584# =====================================================================
1587def pids():
1588 """Returns a list of PIDs currently running on the system."""
1589 return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
1592def pid_exists(pid):
1593 """Check for the existence of a unix PID. Linux TIDs are not
1594 supported (always return False).
1595 """
1596 if not _psposix.pid_exists(pid):
1597 return False
1598 else:
1599 # Linux's apparently does not distinguish between PIDs and TIDs
1600 # (thread IDs).
1601 # listdir("/proc") won't show any TID (only PIDs) but
1602 # os.stat("/proc/{tid}") will succeed if {tid} exists.
1603 # os.kill() can also be passed a TID. This is quite confusing.
1604 # In here we want to enforce this distinction and support PIDs
1605 # only, see:
1606 # https://github.com/giampaolo/psutil/issues/687
1607 try:
1608 # Note: already checked that this is faster than using a
1609 # regular expr. Also (a lot) faster than doing
1610 # 'return pid in pids()'
1611 path = "%s/%s/status" % (get_procfs_path(), pid)
1612 with open_binary(path) as f:
1613 for line in f:
1614 if line.startswith(b"Tgid:"):
1615 tgid = int(line.split()[1])
1616 # If tgid and pid are the same then we're
1617 # dealing with a process PID.
1618 return tgid == pid
1619 raise ValueError("'Tgid' line not found in %s" % path)
1620 except (EnvironmentError, ValueError):
1621 return pid in pids()
1624def ppid_map():
1625 """Obtain a {pid: ppid, ...} dict for all running processes in
1626 one shot. Used to speed up Process.children().
1627 """
1628 ret = {}
1629 procfs_path = get_procfs_path()
1630 for pid in pids():
1631 try:
1632 with open_binary("%s/%s/stat" % (procfs_path, pid)) as f:
1633 data = f.read()
1634 except (FileNotFoundError, ProcessLookupError):
1635 # Note: we should be able to access /stat for all processes
1636 # aka it's unlikely we'll bump into EPERM, which is good.
1637 pass
1638 else:
1639 rpar = data.rfind(b')')
1640 dset = data[rpar + 2:].split()
1641 ppid = int(dset[1])
1642 ret[pid] = ppid
1643 return ret
1646def wrap_exceptions(fun):
1647 """Decorator which translates bare OSError and IOError exceptions
1648 into NoSuchProcess and AccessDenied.
1649 """
1650 @functools.wraps(fun)
1651 def wrapper(self, *args, **kwargs):
1652 try:
1653 return fun(self, *args, **kwargs)
1654 except PermissionError:
1655 raise AccessDenied(self.pid, self._name)
1656 except ProcessLookupError:
1657 raise NoSuchProcess(self.pid, self._name)
1658 except FileNotFoundError:
1659 if not os.path.exists("%s/%s" % (self._procfs_path, self.pid)):
1660 raise NoSuchProcess(self.pid, self._name)
1661 # Note: zombies will keep existing under /proc until they're
1662 # gone so there's no way to distinguish them in here.
1663 raise
1664 return wrapper
1667class Process(object):
1668 """Linux process implementation."""
1670 __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
1672 def __init__(self, pid):
1673 self.pid = pid
1674 self._name = None
1675 self._ppid = None
1676 self._procfs_path = get_procfs_path()
1678 def _assert_alive(self):
1679 """Raise NSP if the process disappeared on us."""
1680 # For those C function who do not raise NSP, possibly returning
1681 # incorrect or incomplete result.
1682 os.stat('%s/%s' % (self._procfs_path, self.pid))
1684 @wrap_exceptions
1685 @memoize_when_activated
1686 def _parse_stat_file(self):
1687 """Parse /proc/{pid}/stat file and return a dict with various
1688 process info.
1689 Using "man proc" as a reference: where "man proc" refers to
1690 position N always subtract 3 (e.g ppid position 4 in
1691 'man proc' == position 1 in here).
1692 The return value is cached in case oneshot() ctx manager is
1693 in use.
1694 """
1695 data = bcat("%s/%s/stat" % (self._procfs_path, self.pid))
1696 # Process name is between parentheses. It can contain spaces and
1697 # other parentheses. This is taken into account by looking for
1698 # the first occurrence of "(" and the last occurrence of ")".
1699 rpar = data.rfind(b')')
1700 name = data[data.find(b'(') + 1:rpar]
1701 fields = data[rpar + 2:].split()
1703 ret = {}
1704 ret['name'] = name
1705 ret['status'] = fields[0]
1706 ret['ppid'] = fields[1]
1707 ret['ttynr'] = fields[4]
1708 ret['utime'] = fields[11]
1709 ret['stime'] = fields[12]
1710 ret['children_utime'] = fields[13]
1711 ret['children_stime'] = fields[14]
1712 ret['create_time'] = fields[19]
1713 ret['cpu_num'] = fields[36]
1714 ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks'
1716 return ret
1718 @wrap_exceptions
1719 @memoize_when_activated
1720 def _read_status_file(self):
1721 """Read /proc/{pid}/stat file and return its content.
1722 The return value is cached in case oneshot() ctx manager is
1723 in use.
1724 """
1725 with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f:
1726 return f.read()
1728 @wrap_exceptions
1729 @memoize_when_activated
1730 def _read_smaps_file(self):
1731 with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid)) as f:
1732 return f.read().strip()
1734 def oneshot_enter(self):
1735 self._parse_stat_file.cache_activate(self)
1736 self._read_status_file.cache_activate(self)
1737 self._read_smaps_file.cache_activate(self)
1739 def oneshot_exit(self):
1740 self._parse_stat_file.cache_deactivate(self)
1741 self._read_status_file.cache_deactivate(self)
1742 self._read_smaps_file.cache_deactivate(self)
1744 @wrap_exceptions
1745 def name(self):
1746 name = self._parse_stat_file()['name']
1747 if PY3:
1748 name = decode(name)
1749 # XXX - gets changed later and probably needs refactoring
1750 return name
1752 def exe(self):
1753 try:
1754 return readlink("%s/%s/exe" % (self._procfs_path, self.pid))
1755 except (FileNotFoundError, ProcessLookupError):
1756 # no such file error; might be raised also if the
1757 # path actually exists for system processes with
1758 # low pids (about 0-20)
1759 if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)):
1760 return ""
1761 else:
1762 if not pid_exists(self.pid):
1763 raise NoSuchProcess(self.pid, self._name)
1764 else:
1765 raise ZombieProcess(self.pid, self._name, self._ppid)
1766 except PermissionError:
1767 raise AccessDenied(self.pid, self._name)
1769 @wrap_exceptions
1770 def cmdline(self):
1771 with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f:
1772 data = f.read()
1773 if not data:
1774 # may happen in case of zombie process
1775 return []
1776 # 'man proc' states that args are separated by null bytes '\0'
1777 # and last char is supposed to be a null byte. Nevertheless
1778 # some processes may change their cmdline after being started
1779 # (via setproctitle() or similar), they are usually not
1780 # compliant with this rule and use spaces instead. Google
1781 # Chrome process is an example. See:
1782 # https://github.com/giampaolo/psutil/issues/1179
1783 sep = '\x00' if data.endswith('\x00') else ' '
1784 if data.endswith(sep):
1785 data = data[:-1]
1786 cmdline = data.split(sep)
1787 # Sometimes last char is a null byte '\0' but the args are
1788 # separated by spaces, see: https://github.com/giampaolo/psutil/
1789 # issues/1179#issuecomment-552984549
1790 if sep == '\x00' and len(cmdline) == 1 and ' ' in data:
1791 cmdline = data.split(' ')
1792 return cmdline
1794 @wrap_exceptions
1795 def environ(self):
1796 with open_text("%s/%s/environ" % (self._procfs_path, self.pid)) as f:
1797 data = f.read()
1798 return parse_environ_block(data)
1800 @wrap_exceptions
1801 def terminal(self):
1802 tty_nr = int(self._parse_stat_file()['ttynr'])
1803 tmap = _psposix.get_terminal_map()
1804 try:
1805 return tmap[tty_nr]
1806 except KeyError:
1807 return None
1809 # May not be available on old kernels.
1810 if os.path.exists('/proc/%s/io' % os.getpid()):
1811 @wrap_exceptions
1812 def io_counters(self):
1813 fname = "%s/%s/io" % (self._procfs_path, self.pid)
1814 fields = {}
1815 with open_binary(fname) as f:
1816 for line in f:
1817 # https://github.com/giampaolo/psutil/issues/1004
1818 line = line.strip()
1819 if line:
1820 try:
1821 name, value = line.split(b': ')
1822 except ValueError:
1823 # https://github.com/giampaolo/psutil/issues/1004
1824 continue
1825 else:
1826 fields[name] = int(value)
1827 if not fields:
1828 raise RuntimeError("%s file was empty" % fname)
1829 try:
1830 return pio(
1831 fields[b'syscr'], # read syscalls
1832 fields[b'syscw'], # write syscalls
1833 fields[b'read_bytes'], # read bytes
1834 fields[b'write_bytes'], # write bytes
1835 fields[b'rchar'], # read chars
1836 fields[b'wchar'], # write chars
1837 )
1838 except KeyError as err:
1839 raise ValueError("%r field was not found in %s; found fields "
1840 "are %r" % (err.args[0], fname, fields))
1842 @wrap_exceptions
1843 def cpu_times(self):
1844 values = self._parse_stat_file()
1845 utime = float(values['utime']) / CLOCK_TICKS
1846 stime = float(values['stime']) / CLOCK_TICKS
1847 children_utime = float(values['children_utime']) / CLOCK_TICKS
1848 children_stime = float(values['children_stime']) / CLOCK_TICKS
1849 iowait = float(values['blkio_ticks']) / CLOCK_TICKS
1850 return pcputimes(utime, stime, children_utime, children_stime, iowait)
1852 @wrap_exceptions
1853 def cpu_num(self):
1854 """What CPU the process is on."""
1855 return int(self._parse_stat_file()['cpu_num'])
1857 @wrap_exceptions
1858 def wait(self, timeout=None):
1859 return _psposix.wait_pid(self.pid, timeout, self._name)
1861 @wrap_exceptions
1862 def create_time(self):
1863 ctime = float(self._parse_stat_file()['create_time'])
1864 # According to documentation, starttime is in field 21 and the
1865 # unit is jiffies (clock ticks).
1866 # We first divide it for clock ticks and then add uptime returning
1867 # seconds since the epoch.
1868 # Also use cached value if available.
1869 bt = BOOT_TIME or boot_time()
1870 return (ctime / CLOCK_TICKS) + bt
1872 @wrap_exceptions
1873 def memory_info(self):
1874 # ============================================================
1875 # | FIELD | DESCRIPTION | AKA | TOP |
1876 # ============================================================
1877 # | rss | resident set size | | RES |
1878 # | vms | total program size | size | VIRT |
1879 # | shared | shared pages (from shared mappings) | | SHR |
1880 # | text | text ('code') | trs | CODE |
1881 # | lib | library (unused in Linux 2.6) | lrs | |
1882 # | data | data + stack | drs | DATA |
1883 # | dirty | dirty pages (unused in Linux 2.6) | dt | |
1884 # ============================================================
1885 with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f:
1886 vms, rss, shared, text, lib, data, dirty = \
1887 [int(x) * PAGESIZE for x in f.readline().split()[:7]]
1888 return pmem(rss, vms, shared, text, lib, data, dirty)
1890 if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS:
1892 @wrap_exceptions
1893 def _parse_smaps_rollup(self):
1894 # /proc/pid/smaps_rollup was added to Linux in 2017. Faster
1895 # than /proc/pid/smaps. It reports higher PSS than */smaps
1896 # (from 1k up to 200k higher; tested against all processes).
1897 uss = pss = swap = 0
1898 try:
1899 with open_binary("{}/{}/smaps_rollup".format(
1900 self._procfs_path, self.pid)) as f:
1901 for line in f:
1902 if line.startswith(b"Private_"):
1903 # Private_Clean, Private_Dirty, Private_Hugetlb
1904 uss += int(line.split()[1]) * 1024
1905 elif line.startswith(b"Pss:"):
1906 pss = int(line.split()[1]) * 1024
1907 elif line.startswith(b"Swap:"):
1908 swap = int(line.split()[1]) * 1024
1909 except ProcessLookupError: # happens on readline()
1910 if not pid_exists(self.pid):
1911 raise NoSuchProcess(self.pid, self._name)
1912 else:
1913 raise ZombieProcess(self.pid, self._name, self._ppid)
1914 return (uss, pss, swap)
1916 @wrap_exceptions
1917 def _parse_smaps(
1918 self,
1919 # Gets Private_Clean, Private_Dirty, Private_Hugetlb.
1920 _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"),
1921 _pss_re=re.compile(br"\nPss\:\s+(\d+)"),
1922 _swap_re=re.compile(br"\nSwap\:\s+(\d+)")):
1923 # /proc/pid/smaps does not exist on kernels < 2.6.14 or if
1924 # CONFIG_MMU kernel configuration option is not enabled.
1926 # Note: using 3 regexes is faster than reading the file
1927 # line by line.
1928 # XXX: on Python 3 the 2 regexes are 30% slower than on
1929 # Python 2 though. Figure out why.
1930 #
1931 # You might be tempted to calculate USS by subtracting
1932 # the "shared" value from the "resident" value in
1933 # /proc/<pid>/statm. But at least on Linux, statm's "shared"
1934 # value actually counts pages backed by files, which has
1935 # little to do with whether the pages are actually shared.
1936 # /proc/self/smaps on the other hand appears to give us the
1937 # correct information.
1938 smaps_data = self._read_smaps_file()
1939 # Note: smaps file can be empty for certain processes.
1940 # The code below will not crash though and will result to 0.
1941 uss = sum(map(int, _private_re.findall(smaps_data))) * 1024
1942 pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024
1943 swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024
1944 return (uss, pss, swap)
1946 def memory_full_info(self):
1947 if HAS_PROC_SMAPS_ROLLUP: # faster
1948 uss, pss, swap = self._parse_smaps_rollup()
1949 else:
1950 uss, pss, swap = self._parse_smaps()
1951 basic_mem = self.memory_info()
1952 return pfullmem(*basic_mem + (uss, pss, swap))
1954 else:
1955 memory_full_info = memory_info
1957 if HAS_PROC_SMAPS:
1959 @wrap_exceptions
1960 def memory_maps(self):
1961 """Return process's mapped memory regions as a list of named
1962 tuples. Fields are explained in 'man proc'; here is an updated
1963 (Apr 2012) version: http://goo.gl/fmebo
1965 /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if
1966 CONFIG_MMU kernel configuration option is not enabled.
1967 """
1968 def get_blocks(lines, current_block):
1969 data = {}
1970 for line in lines:
1971 fields = line.split(None, 5)
1972 if not fields[0].endswith(b':'):
1973 # new block section
1974 yield (current_block.pop(), data)
1975 current_block.append(line)
1976 else:
1977 try:
1978 data[fields[0]] = int(fields[1]) * 1024
1979 except ValueError:
1980 if fields[0].startswith(b'VmFlags:'):
1981 # see issue #369
1982 continue
1983 else:
1984 raise ValueError("don't know how to inte"
1985 "rpret line %r" % line)
1986 yield (current_block.pop(), data)
1988 data = self._read_smaps_file()
1989 # Note: smaps file can be empty for certain processes.
1990 if not data:
1991 return []
1992 lines = data.split(b'\n')
1993 ls = []
1994 first_line = lines.pop(0)
1995 current_block = [first_line]
1996 for header, data in get_blocks(lines, current_block):
1997 hfields = header.split(None, 5)
1998 try:
1999 addr, perms, offset, dev, inode, path = hfields
2000 except ValueError:
2001 addr, perms, offset, dev, inode, path = \
2002 hfields + ['']
2003 if not path:
2004 path = '[anon]'
2005 else:
2006 if PY3:
2007 path = decode(path)
2008 path = path.strip()
2009 if (path.endswith(' (deleted)') and not
2010 path_exists_strict(path)):
2011 path = path[:-10]
2012 ls.append((
2013 decode(addr), decode(perms), path,
2014 data.get(b'Rss:', 0),
2015 data.get(b'Size:', 0),
2016 data.get(b'Pss:', 0),
2017 data.get(b'Shared_Clean:', 0),
2018 data.get(b'Shared_Dirty:', 0),
2019 data.get(b'Private_Clean:', 0),
2020 data.get(b'Private_Dirty:', 0),
2021 data.get(b'Referenced:', 0),
2022 data.get(b'Anonymous:', 0),
2023 data.get(b'Swap:', 0)
2024 ))
2025 return ls
2027 @wrap_exceptions
2028 def cwd(self):
2029 try:
2030 return readlink("%s/%s/cwd" % (self._procfs_path, self.pid))
2031 except (FileNotFoundError, ProcessLookupError):
2032 # https://github.com/giampaolo/psutil/issues/986
2033 if not pid_exists(self.pid):
2034 raise NoSuchProcess(self.pid, self._name)
2035 else:
2036 raise ZombieProcess(self.pid, self._name, self._ppid)
2038 @wrap_exceptions
2039 def num_ctx_switches(self,
2040 _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)')):
2041 data = self._read_status_file()
2042 ctxsw = _ctxsw_re.findall(data)
2043 if not ctxsw:
2044 raise NotImplementedError(
2045 "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
2046 "lines were not found in %s/%s/status; the kernel is "
2047 "probably older than 2.6.23" % (
2048 self._procfs_path, self.pid))
2049 else:
2050 return _common.pctxsw(int(ctxsw[0]), int(ctxsw[1]))
2052 @wrap_exceptions
2053 def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')):
2054 # Note: on Python 3 using a re is faster than iterating over file
2055 # line by line. On Python 2 is the exact opposite, and iterating
2056 # over a file on Python 3 is slower than on Python 2.
2057 data = self._read_status_file()
2058 return int(_num_threads_re.findall(data)[0])
2060 @wrap_exceptions
2061 def threads(self):
2062 thread_ids = os.listdir("%s/%s/task" % (self._procfs_path, self.pid))
2063 thread_ids.sort()
2064 retlist = []
2065 hit_enoent = False
2066 for thread_id in thread_ids:
2067 fname = "%s/%s/task/%s/stat" % (
2068 self._procfs_path, self.pid, thread_id)
2069 try:
2070 with open_binary(fname) as f:
2071 st = f.read().strip()
2072 except (FileNotFoundError, ProcessLookupError):
2073 # no such file or directory or no such process;
2074 # it means thread disappeared on us
2075 hit_enoent = True
2076 continue
2077 # ignore the first two values ("pid (exe)")
2078 st = st[st.find(b')') + 2:]
2079 values = st.split(b' ')
2080 utime = float(values[11]) / CLOCK_TICKS
2081 stime = float(values[12]) / CLOCK_TICKS
2082 ntuple = _common.pthread(int(thread_id), utime, stime)
2083 retlist.append(ntuple)
2084 if hit_enoent:
2085 self._assert_alive()
2086 return retlist
2088 @wrap_exceptions
2089 def nice_get(self):
2090 # with open_text('%s/%s/stat' % (self._procfs_path, self.pid)) as f:
2091 # data = f.read()
2092 # return int(data.split()[18])
2094 # Use C implementation
2095 return cext_posix.getpriority(self.pid)
2097 @wrap_exceptions
2098 def nice_set(self, value):
2099 return cext_posix.setpriority(self.pid, value)
2101 # starting from CentOS 6.
2102 if HAS_CPU_AFFINITY:
2104 @wrap_exceptions
2105 def cpu_affinity_get(self):
2106 return cext.proc_cpu_affinity_get(self.pid)
2108 def _get_eligible_cpus(
2109 self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")):
2110 # See: https://github.com/giampaolo/psutil/issues/956
2111 data = self._read_status_file()
2112 match = _re.findall(data)
2113 if match:
2114 return list(range(int(match[0][0]), int(match[0][1]) + 1))
2115 else:
2116 return list(range(len(per_cpu_times())))
2118 @wrap_exceptions
2119 def cpu_affinity_set(self, cpus):
2120 try:
2121 cext.proc_cpu_affinity_set(self.pid, cpus)
2122 except (OSError, ValueError) as err:
2123 if isinstance(err, ValueError) or err.errno == errno.EINVAL:
2124 eligible_cpus = self._get_eligible_cpus()
2125 all_cpus = tuple(range(len(per_cpu_times())))
2126 for cpu in cpus:
2127 if cpu not in all_cpus:
2128 raise ValueError(
2129 "invalid CPU number %r; choose between %s" % (
2130 cpu, eligible_cpus))
2131 if cpu not in eligible_cpus:
2132 raise ValueError(
2133 "CPU number %r is not eligible; choose "
2134 "between %s" % (cpu, eligible_cpus))
2135 raise
2137 # only starting from kernel 2.6.13
2138 if HAS_PROC_IO_PRIORITY:
2140 @wrap_exceptions
2141 def ionice_get(self):
2142 ioclass, value = cext.proc_ioprio_get(self.pid)
2143 if enum is not None:
2144 ioclass = IOPriority(ioclass)
2145 return _common.pionice(ioclass, value)
2147 @wrap_exceptions
2148 def ionice_set(self, ioclass, value):
2149 if value is None:
2150 value = 0
2151 if value and ioclass in (IOPRIO_CLASS_IDLE, IOPRIO_CLASS_NONE):
2152 raise ValueError("%r ioclass accepts no value" % ioclass)
2153 if value < 0 or value > 7:
2154 raise ValueError("value not in 0-7 range")
2155 return cext.proc_ioprio_set(self.pid, ioclass, value)
2157 if prlimit is not None:
2159 @wrap_exceptions
2160 def rlimit(self, resource_, limits=None):
2161 # If pid is 0 prlimit() applies to the calling process and
2162 # we don't want that. We should never get here though as
2163 # PID 0 is not supported on Linux.
2164 if self.pid == 0:
2165 raise ValueError("can't use prlimit() against PID 0 process")
2166 try:
2167 if limits is None:
2168 # get
2169 return prlimit(self.pid, resource_)
2170 else:
2171 # set
2172 if len(limits) != 2:
2173 raise ValueError(
2174 "second argument must be a (soft, hard) tuple, "
2175 "got %s" % repr(limits))
2176 prlimit(self.pid, resource_, limits)
2177 except OSError as err:
2178 if err.errno == errno.ENOSYS and pid_exists(self.pid):
2179 # I saw this happening on Travis:
2180 # https://travis-ci.org/giampaolo/psutil/jobs/51368273
2181 raise ZombieProcess(self.pid, self._name, self._ppid)
2182 else:
2183 raise
2185 @wrap_exceptions
2186 def status(self):
2187 letter = self._parse_stat_file()['status']
2188 if PY3:
2189 letter = letter.decode()
2190 # XXX is '?' legit? (we're not supposed to return it anyway)
2191 return PROC_STATUSES.get(letter, '?')
2193 @wrap_exceptions
2194 def open_files(self):
2195 retlist = []
2196 files = os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))
2197 hit_enoent = False
2198 for fd in files:
2199 file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd)
2200 try:
2201 path = readlink(file)
2202 except (FileNotFoundError, ProcessLookupError):
2203 # ENOENT == file which is gone in the meantime
2204 hit_enoent = True
2205 continue
2206 except OSError as err:
2207 if err.errno == errno.EINVAL:
2208 # not a link
2209 continue
2210 if err.errno == errno.ENAMETOOLONG:
2211 # file name too long
2212 debug(err)
2213 continue
2214 raise
2215 else:
2216 # If path is not an absolute there's no way to tell
2217 # whether it's a regular file or not, so we skip it.
2218 # A regular file is always supposed to be have an
2219 # absolute path though.
2220 if path.startswith('/') and isfile_strict(path):
2221 # Get file position and flags.
2222 file = "%s/%s/fdinfo/%s" % (
2223 self._procfs_path, self.pid, fd)
2224 try:
2225 with open_binary(file) as f:
2226 pos = int(f.readline().split()[1])
2227 flags = int(f.readline().split()[1], 8)
2228 except (FileNotFoundError, ProcessLookupError):
2229 # fd gone in the meantime; process may
2230 # still be alive
2231 hit_enoent = True
2232 else:
2233 mode = file_flags_to_mode(flags)
2234 ntuple = popenfile(
2235 path, int(fd), int(pos), mode, flags)
2236 retlist.append(ntuple)
2237 if hit_enoent:
2238 self._assert_alive()
2239 return retlist
2241 @wrap_exceptions
2242 def connections(self, kind='inet'):
2243 ret = _connections.retrieve(kind, self.pid)
2244 self._assert_alive()
2245 return ret
2247 @wrap_exceptions
2248 def num_fds(self):
2249 return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
2251 @wrap_exceptions
2252 def ppid(self):
2253 return int(self._parse_stat_file()['ppid'])
2255 @wrap_exceptions
2256 def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')):
2257 data = self._read_status_file()
2258 real, effective, saved = _uids_re.findall(data)[0]
2259 return _common.puids(int(real), int(effective), int(saved))
2261 @wrap_exceptions
2262 def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')):
2263 data = self._read_status_file()
2264 real, effective, saved = _gids_re.findall(data)[0]
2265 return _common.pgids(int(real), int(effective), int(saved))