Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/psutil/_pslinux.py: 22%
1240 statements
« prev ^ index » next coverage.py v7.0.1, created at 2022-12-25 06:11 +0000
« prev ^ index » next coverage.py v7.0.1, created at 2022-12-25 06:11 +0000
1# Copyright (c) 2009, Giampaolo Rodola'. All rights reserved.
2# Use of this source code is governed by a BSD-style license that can be
3# found in the LICENSE file.
5"""Linux platform implementation."""
7from __future__ import division
9import base64
10import collections
11import errno
12import functools
13import glob
14import os
15import re
16import socket
17import struct
18import sys
19import traceback
20import warnings
21from collections import defaultdict
22from collections import namedtuple
24from . import _common
25from . import _psposix
26from . import _psutil_linux as cext
27from . import _psutil_posix as cext_posix
28from ._common import NIC_DUPLEX_FULL
29from ._common import NIC_DUPLEX_HALF
30from ._common import NIC_DUPLEX_UNKNOWN
31from ._common import AccessDenied
32from ._common import NoSuchProcess
33from ._common import ZombieProcess
34from ._common import bcat
35from ._common import cat
36from ._common import debug
37from ._common import decode
38from ._common import get_procfs_path
39from ._common import isfile_strict
40from ._common import memoize
41from ._common import memoize_when_activated
42from ._common import open_binary
43from ._common import open_text
44from ._common import parse_environ_block
45from ._common import path_exists_strict
46from ._common import supports_ipv6
47from ._common import usage_percent
48from ._compat import PY3
49from ._compat import FileNotFoundError
50from ._compat import PermissionError
51from ._compat import ProcessLookupError
52from ._compat import b
53from ._compat import basestring
56if sys.version_info >= (3, 4):
57 import enum
58else:
59 enum = None
62__extra__all__ = [
63 #
64 'PROCFS_PATH',
65 # io prio constants
66 "IOPRIO_CLASS_NONE", "IOPRIO_CLASS_RT", "IOPRIO_CLASS_BE",
67 "IOPRIO_CLASS_IDLE",
68 # connection status constants
69 "CONN_ESTABLISHED", "CONN_SYN_SENT", "CONN_SYN_RECV", "CONN_FIN_WAIT1",
70 "CONN_FIN_WAIT2", "CONN_TIME_WAIT", "CONN_CLOSE", "CONN_CLOSE_WAIT",
71 "CONN_LAST_ACK", "CONN_LISTEN", "CONN_CLOSING", ]
74# =====================================================================
75# --- globals
76# =====================================================================
79POWER_SUPPLY_PATH = "/sys/class/power_supply"
80HAS_PROC_SMAPS = os.path.exists('/proc/%s/smaps' % os.getpid())
81HAS_PROC_SMAPS_ROLLUP = os.path.exists('/proc/%s/smaps_rollup' % os.getpid())
82HAS_PROC_IO_PRIORITY = hasattr(cext, "proc_ioprio_get")
83HAS_CPU_AFFINITY = hasattr(cext, "proc_cpu_affinity_get")
85# Number of clock ticks per second
86CLOCK_TICKS = os.sysconf("SC_CLK_TCK")
87PAGESIZE = cext_posix.getpagesize()
88BOOT_TIME = None # set later
89LITTLE_ENDIAN = sys.byteorder == 'little'
91# "man iostat" states that sectors are equivalent with blocks and have
92# a size of 512 bytes. Despite this value can be queried at runtime
93# via /sys/block/{DISK}/queue/hw_sector_size and results may vary
94# between 1k, 2k, or 4k... 512 appears to be a magic constant used
95# throughout Linux source code:
96# * https://stackoverflow.com/a/38136179/376587
97# * https://lists.gt.net/linux/kernel/2241060
98# * https://github.com/giampaolo/psutil/issues/1305
99# * https://github.com/torvalds/linux/blob/
100# 4f671fe2f9523a1ea206f63fe60a7c7b3a56d5c7/include/linux/bio.h#L99
101# * https://lkml.org/lkml/2015/8/17/234
102DISK_SECTOR_SIZE = 512
104if enum is None:
105 AF_LINK = socket.AF_PACKET
106else:
107 AddressFamily = enum.IntEnum('AddressFamily',
108 {'AF_LINK': int(socket.AF_PACKET)})
109 AF_LINK = AddressFamily.AF_LINK
111# ioprio_* constants http://linux.die.net/man/2/ioprio_get
112if enum is None:
113 IOPRIO_CLASS_NONE = 0
114 IOPRIO_CLASS_RT = 1
115 IOPRIO_CLASS_BE = 2
116 IOPRIO_CLASS_IDLE = 3
117else:
118 class IOPriority(enum.IntEnum):
119 IOPRIO_CLASS_NONE = 0
120 IOPRIO_CLASS_RT = 1
121 IOPRIO_CLASS_BE = 2
122 IOPRIO_CLASS_IDLE = 3
124 globals().update(IOPriority.__members__)
126# See:
127# https://github.com/torvalds/linux/blame/master/fs/proc/array.c
128# ...and (TASK_* constants):
129# https://github.com/torvalds/linux/blob/master/include/linux/sched.h
130PROC_STATUSES = {
131 "R": _common.STATUS_RUNNING,
132 "S": _common.STATUS_SLEEPING,
133 "D": _common.STATUS_DISK_SLEEP,
134 "T": _common.STATUS_STOPPED,
135 "t": _common.STATUS_TRACING_STOP,
136 "Z": _common.STATUS_ZOMBIE,
137 "X": _common.STATUS_DEAD,
138 "x": _common.STATUS_DEAD,
139 "K": _common.STATUS_WAKE_KILL,
140 "W": _common.STATUS_WAKING,
141 "I": _common.STATUS_IDLE,
142 "P": _common.STATUS_PARKED,
143}
145# https://github.com/torvalds/linux/blob/master/include/net/tcp_states.h
146TCP_STATUSES = {
147 "01": _common.CONN_ESTABLISHED,
148 "02": _common.CONN_SYN_SENT,
149 "03": _common.CONN_SYN_RECV,
150 "04": _common.CONN_FIN_WAIT1,
151 "05": _common.CONN_FIN_WAIT2,
152 "06": _common.CONN_TIME_WAIT,
153 "07": _common.CONN_CLOSE,
154 "08": _common.CONN_CLOSE_WAIT,
155 "09": _common.CONN_LAST_ACK,
156 "0A": _common.CONN_LISTEN,
157 "0B": _common.CONN_CLOSING
158}
161# =====================================================================
162# --- named tuples
163# =====================================================================
166# psutil.virtual_memory()
167svmem = namedtuple(
168 'svmem', ['total', 'available', 'percent', 'used', 'free',
169 'active', 'inactive', 'buffers', 'cached', 'shared', 'slab'])
170# psutil.disk_io_counters()
171sdiskio = namedtuple(
172 'sdiskio', ['read_count', 'write_count',
173 'read_bytes', 'write_bytes',
174 'read_time', 'write_time',
175 'read_merged_count', 'write_merged_count',
176 'busy_time'])
177# psutil.Process().open_files()
178popenfile = namedtuple(
179 'popenfile', ['path', 'fd', 'position', 'mode', 'flags'])
180# psutil.Process().memory_info()
181pmem = namedtuple('pmem', 'rss vms shared text lib data dirty')
182# psutil.Process().memory_full_info()
183pfullmem = namedtuple('pfullmem', pmem._fields + ('uss', 'pss', 'swap'))
184# psutil.Process().memory_maps(grouped=True)
185pmmap_grouped = namedtuple(
186 'pmmap_grouped',
187 ['path', 'rss', 'size', 'pss', 'shared_clean', 'shared_dirty',
188 'private_clean', 'private_dirty', 'referenced', 'anonymous', 'swap'])
189# psutil.Process().memory_maps(grouped=False)
190pmmap_ext = namedtuple(
191 'pmmap_ext', 'addr perms ' + ' '.join(pmmap_grouped._fields))
192# psutil.Process.io_counters()
193pio = namedtuple('pio', ['read_count', 'write_count',
194 'read_bytes', 'write_bytes',
195 'read_chars', 'write_chars'])
196# psutil.Process.cpu_times()
197pcputimes = namedtuple('pcputimes',
198 ['user', 'system', 'children_user', 'children_system',
199 'iowait'])
202# =====================================================================
203# --- utils
204# =====================================================================
207def readlink(path):
208 """Wrapper around os.readlink()."""
209 assert isinstance(path, basestring), path
210 path = os.readlink(path)
211 # readlink() might return paths containing null bytes ('\x00')
212 # resulting in "TypeError: must be encoded string without NULL
213 # bytes, not str" errors when the string is passed to other
214 # fs-related functions (os.*, open(), ...).
215 # Apparently everything after '\x00' is garbage (we can have
216 # ' (deleted)', 'new' and possibly others), see:
217 # https://github.com/giampaolo/psutil/issues/717
218 path = path.split('\x00')[0]
219 # Certain paths have ' (deleted)' appended. Usually this is
220 # bogus as the file actually exists. Even if it doesn't we
221 # don't care.
222 if path.endswith(' (deleted)') and not path_exists_strict(path):
223 path = path[:-10]
224 return path
227def file_flags_to_mode(flags):
228 """Convert file's open() flags into a readable string.
229 Used by Process.open_files().
230 """
231 modes_map = {os.O_RDONLY: 'r', os.O_WRONLY: 'w', os.O_RDWR: 'w+'}
232 mode = modes_map[flags & (os.O_RDONLY | os.O_WRONLY | os.O_RDWR)]
233 if flags & os.O_APPEND:
234 mode = mode.replace('w', 'a', 1)
235 mode = mode.replace('w+', 'r+')
236 # possible values: r, w, a, r+, a+
237 return mode
240def is_storage_device(name):
241 """Return True if the given name refers to a root device (e.g.
242 "sda", "nvme0n1") as opposed to a logical partition (e.g. "sda1",
243 "nvme0n1p1"). If name is a virtual device (e.g. "loop1", "ram")
244 return True.
245 """
246 # Re-adapted from iostat source code, see:
247 # https://github.com/sysstat/sysstat/blob/
248 # 97912938cd476645b267280069e83b1c8dc0e1c7/common.c#L208
249 # Some devices may have a slash in their name (e.g. cciss/c0d0...).
250 name = name.replace('/', '!')
251 including_virtual = True
252 if including_virtual:
253 path = "/sys/block/%s" % name
254 else:
255 path = "/sys/block/%s/device" % name
256 return os.access(path, os.F_OK)
259@memoize
260def set_scputimes_ntuple(procfs_path):
261 """Set a namedtuple of variable fields depending on the CPU times
262 available on this Linux kernel version which may be:
263 (user, nice, system, idle, iowait, irq, softirq, [steal, [guest,
264 [guest_nice]]])
265 Used by cpu_times() function.
266 """
267 global scputimes
268 with open_binary('%s/stat' % procfs_path) as f:
269 values = f.readline().split()[1:]
270 fields = ['user', 'nice', 'system', 'idle', 'iowait', 'irq', 'softirq']
271 vlen = len(values)
272 if vlen >= 8:
273 # Linux >= 2.6.11
274 fields.append('steal')
275 if vlen >= 9:
276 # Linux >= 2.6.24
277 fields.append('guest')
278 if vlen >= 10:
279 # Linux >= 3.2.0
280 fields.append('guest_nice')
281 scputimes = namedtuple('scputimes', fields)
284try:
285 set_scputimes_ntuple("/proc")
286except Exception: # pragma: no cover
287 # Don't want to crash at import time.
288 traceback.print_exc()
289 scputimes = namedtuple('scputimes', 'user system idle')(0.0, 0.0, 0.0)
292# =====================================================================
293# --- prlimit
294# =====================================================================
296# Backport of resource.prlimit() for Python 2. Originally this was done
297# in C, but CentOS-6 which we use to create manylinux wheels is too old
298# and does not support prlimit() syscall. As such the resulting wheel
299# would not include prlimit(), even when installed on newer systems.
300# This is the only part of psutil using ctypes.
302prlimit = None
303try:
304 from resource import prlimit # python >= 3.4
305except ImportError:
306 import ctypes
308 libc = ctypes.CDLL(None, use_errno=True)
310 if hasattr(libc, "prlimit"):
312 def prlimit(pid, resource_, limits=None):
313 class StructRlimit(ctypes.Structure):
314 _fields_ = [('rlim_cur', ctypes.c_longlong),
315 ('rlim_max', ctypes.c_longlong)]
317 current = StructRlimit()
318 if limits is None:
319 # get
320 ret = libc.prlimit(pid, resource_, None, ctypes.byref(current))
321 else:
322 # set
323 new = StructRlimit()
324 new.rlim_cur = limits[0]
325 new.rlim_max = limits[1]
326 ret = libc.prlimit(
327 pid, resource_, ctypes.byref(new), ctypes.byref(current))
329 if ret != 0:
330 errno = ctypes.get_errno()
331 raise OSError(errno, os.strerror(errno))
332 return (current.rlim_cur, current.rlim_max)
335if prlimit is not None:
336 __extra__all__.extend(
337 [x for x in dir(cext) if x.startswith('RLIM') and x.isupper()])
340# =====================================================================
341# --- system memory
342# =====================================================================
345def calculate_avail_vmem(mems):
346 """Fallback for kernels < 3.14 where /proc/meminfo does not provide
347 "MemAvailable:" column, see:
348 https://blog.famzah.net/2014/09/24/
349 This code reimplements the algorithm outlined here:
350 https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
351 commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
353 XXX: on recent kernels this calculation differs by ~1.5% than
354 "MemAvailable:" as it's calculated slightly differently, see:
355 https://gitlab.com/procps-ng/procps/issues/42
356 https://github.com/famzah/linux-memavailable-procfs/issues/2
357 It is still way more realistic than doing (free + cached) though.
358 """
359 # Fallback for very old distros. According to
360 # https://git.kernel.org/cgit/linux/kernel/git/torvalds/linux.git/
361 # commit/?id=34e431b0ae398fc54ea69ff85ec700722c9da773
362 # ...long ago "avail" was calculated as (free + cached).
363 # We might fallback in such cases:
364 # "Active(file)" not available: 2.6.28 / Dec 2008
365 # "Inactive(file)" not available: 2.6.28 / Dec 2008
366 # "SReclaimable:" not available: 2.6.19 / Nov 2006
367 # /proc/zoneinfo not available: 2.6.13 / Aug 2005
368 free = mems[b'MemFree:']
369 fallback = free + mems.get(b"Cached:", 0)
370 try:
371 lru_active_file = mems[b'Active(file):']
372 lru_inactive_file = mems[b'Inactive(file):']
373 slab_reclaimable = mems[b'SReclaimable:']
374 except KeyError:
375 return fallback
376 try:
377 f = open_binary('%s/zoneinfo' % get_procfs_path())
378 except IOError:
379 return fallback # kernel 2.6.13
381 watermark_low = 0
382 with f:
383 for line in f:
384 line = line.strip()
385 if line.startswith(b'low'):
386 watermark_low += int(line.split()[1])
387 watermark_low *= PAGESIZE
389 avail = free - watermark_low
390 pagecache = lru_active_file + lru_inactive_file
391 pagecache -= min(pagecache / 2, watermark_low)
392 avail += pagecache
393 avail += slab_reclaimable - min(slab_reclaimable / 2.0, watermark_low)
394 return int(avail)
397def virtual_memory():
398 """Report virtual memory stats.
399 This implementation matches "free" and "vmstat -s" cmdline
400 utility values and procps-ng-3.3.12 source was used as a reference
401 (2016-09-18):
402 https://gitlab.com/procps-ng/procps/blob/
403 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c
404 For reference, procps-ng-3.3.10 is the version available on Ubuntu
405 16.04.
407 Note about "available" memory: up until psutil 4.3 it was
408 calculated as "avail = (free + buffers + cached)". Now
409 "MemAvailable:" column (kernel 3.14) from /proc/meminfo is used as
410 it's more accurate.
411 That matches "available" column in newer versions of "free".
412 """
413 missing_fields = []
414 mems = {}
415 with open_binary('%s/meminfo' % get_procfs_path()) as f:
416 for line in f:
417 fields = line.split()
418 mems[fields[0]] = int(fields[1]) * 1024
420 # /proc doc states that the available fields in /proc/meminfo vary
421 # by architecture and compile options, but these 3 values are also
422 # returned by sysinfo(2); as such we assume they are always there.
423 total = mems[b'MemTotal:']
424 free = mems[b'MemFree:']
425 try:
426 buffers = mems[b'Buffers:']
427 except KeyError:
428 # https://github.com/giampaolo/psutil/issues/1010
429 buffers = 0
430 missing_fields.append('buffers')
431 try:
432 cached = mems[b"Cached:"]
433 except KeyError:
434 cached = 0
435 missing_fields.append('cached')
436 else:
437 # "free" cmdline utility sums reclaimable to cached.
438 # Older versions of procps used to add slab memory instead.
439 # This got changed in:
440 # https://gitlab.com/procps-ng/procps/commit/
441 # 05d751c4f076a2f0118b914c5e51cfbb4762ad8e
442 cached += mems.get(b"SReclaimable:", 0) # since kernel 2.6.19
444 try:
445 shared = mems[b'Shmem:'] # since kernel 2.6.32
446 except KeyError:
447 try:
448 shared = mems[b'MemShared:'] # kernels 2.4
449 except KeyError:
450 shared = 0
451 missing_fields.append('shared')
453 try:
454 active = mems[b"Active:"]
455 except KeyError:
456 active = 0
457 missing_fields.append('active')
459 try:
460 inactive = mems[b"Inactive:"]
461 except KeyError:
462 try:
463 inactive = \
464 mems[b"Inact_dirty:"] + \
465 mems[b"Inact_clean:"] + \
466 mems[b"Inact_laundry:"]
467 except KeyError:
468 inactive = 0
469 missing_fields.append('inactive')
471 try:
472 slab = mems[b"Slab:"]
473 except KeyError:
474 slab = 0
476 used = total - free - cached - buffers
477 if used < 0:
478 # May be symptomatic of running within a LCX container where such
479 # values will be dramatically distorted over those of the host.
480 used = total - free
482 # - starting from 4.4.0 we match free's "available" column.
483 # Before 4.4.0 we calculated it as (free + buffers + cached)
484 # which matched htop.
485 # - free and htop available memory differs as per:
486 # http://askubuntu.com/a/369589
487 # http://unix.stackexchange.com/a/65852/168884
488 # - MemAvailable has been introduced in kernel 3.14
489 try:
490 avail = mems[b'MemAvailable:']
491 except KeyError:
492 avail = calculate_avail_vmem(mems)
494 if avail < 0:
495 avail = 0
496 missing_fields.append('available')
498 # If avail is greater than total or our calculation overflows,
499 # that's symptomatic of running within a LCX container where such
500 # values will be dramatically distorted over those of the host.
501 # https://gitlab.com/procps-ng/procps/blob/
502 # 24fd2605c51fccc375ab0287cec33aa767f06718/proc/sysinfo.c#L764
503 if avail > total:
504 avail = free
506 percent = usage_percent((total - avail), total, round_=1)
508 # Warn about missing metrics which are set to 0.
509 if missing_fields:
510 msg = "%s memory stats couldn't be determined and %s set to 0" % (
511 ", ".join(missing_fields),
512 "was" if len(missing_fields) == 1 else "were")
513 warnings.warn(msg, RuntimeWarning)
515 return svmem(total, avail, percent, used, free,
516 active, inactive, buffers, cached, shared, slab)
519def swap_memory():
520 """Return swap memory metrics."""
521 mems = {}
522 with open_binary('%s/meminfo' % get_procfs_path()) as f:
523 for line in f:
524 fields = line.split()
525 mems[fields[0]] = int(fields[1]) * 1024
526 # We prefer /proc/meminfo over sysinfo() syscall so that
527 # psutil.PROCFS_PATH can be used in order to allow retrieval
528 # for linux containers, see:
529 # https://github.com/giampaolo/psutil/issues/1015
530 try:
531 total = mems[b'SwapTotal:']
532 free = mems[b'SwapFree:']
533 except KeyError:
534 _, _, _, _, total, free, unit_multiplier = cext.linux_sysinfo()
535 total *= unit_multiplier
536 free *= unit_multiplier
538 used = total - free
539 percent = usage_percent(used, total, round_=1)
540 # get pgin/pgouts
541 try:
542 f = open_binary("%s/vmstat" % get_procfs_path())
543 except IOError as err:
544 # see https://github.com/giampaolo/psutil/issues/722
545 msg = "'sin' and 'sout' swap memory stats couldn't " \
546 "be determined and were set to 0 (%s)" % str(err)
547 warnings.warn(msg, RuntimeWarning)
548 sin = sout = 0
549 else:
550 with f:
551 sin = sout = None
552 for line in f:
553 # values are expressed in 4 kilo bytes, we want
554 # bytes instead
555 if line.startswith(b'pswpin'):
556 sin = int(line.split(b' ')[1]) * 4 * 1024
557 elif line.startswith(b'pswpout'):
558 sout = int(line.split(b' ')[1]) * 4 * 1024
559 if sin is not None and sout is not None:
560 break
561 else:
562 # we might get here when dealing with exotic Linux
563 # flavors, see:
564 # https://github.com/giampaolo/psutil/issues/313
565 msg = "'sin' and 'sout' swap memory stats couldn't " \
566 "be determined and were set to 0"
567 warnings.warn(msg, RuntimeWarning)
568 sin = sout = 0
569 return _common.sswap(total, used, free, percent, sin, sout)
572# =====================================================================
573# --- CPU
574# =====================================================================
577def cpu_times():
578 """Return a named tuple representing the following system-wide
579 CPU times:
580 (user, nice, system, idle, iowait, irq, softirq [steal, [guest,
581 [guest_nice]]])
582 Last 3 fields may not be available on all Linux kernel versions.
583 """
584 procfs_path = get_procfs_path()
585 set_scputimes_ntuple(procfs_path)
586 with open_binary('%s/stat' % procfs_path) as f:
587 values = f.readline().split()
588 fields = values[1:len(scputimes._fields) + 1]
589 fields = [float(x) / CLOCK_TICKS for x in fields]
590 return scputimes(*fields)
593def per_cpu_times():
594 """Return a list of namedtuple representing the CPU times
595 for every CPU available on the system.
596 """
597 procfs_path = get_procfs_path()
598 set_scputimes_ntuple(procfs_path)
599 cpus = []
600 with open_binary('%s/stat' % procfs_path) as f:
601 # get rid of the first line which refers to system wide CPU stats
602 f.readline()
603 for line in f:
604 if line.startswith(b'cpu'):
605 values = line.split()
606 fields = values[1:len(scputimes._fields) + 1]
607 fields = [float(x) / CLOCK_TICKS for x in fields]
608 entry = scputimes(*fields)
609 cpus.append(entry)
610 return cpus
613def cpu_count_logical():
614 """Return the number of logical CPUs in the system."""
615 try:
616 return os.sysconf("SC_NPROCESSORS_ONLN")
617 except ValueError:
618 # as a second fallback we try to parse /proc/cpuinfo
619 num = 0
620 with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
621 for line in f:
622 if line.lower().startswith(b'processor'):
623 num += 1
625 # unknown format (e.g. amrel/sparc architectures), see:
626 # https://github.com/giampaolo/psutil/issues/200
627 # try to parse /proc/stat as a last resort
628 if num == 0:
629 search = re.compile(r'cpu\d')
630 with open_text('%s/stat' % get_procfs_path()) as f:
631 for line in f:
632 line = line.split(' ')[0]
633 if search.match(line):
634 num += 1
636 if num == 0:
637 # mimic os.cpu_count()
638 return None
639 return num
642def cpu_count_cores():
643 """Return the number of CPU cores in the system."""
644 # Method #1
645 ls = set()
646 # These 2 files are the same but */core_cpus_list is newer while
647 # */thread_siblings_list is deprecated and may disappear in the future.
648 # https://www.kernel.org/doc/Documentation/admin-guide/cputopology.rst
649 # https://github.com/giampaolo/psutil/pull/1727#issuecomment-707624964
650 # https://lkml.org/lkml/2019/2/26/41
651 p1 = "/sys/devices/system/cpu/cpu[0-9]*/topology/core_cpus_list"
652 p2 = "/sys/devices/system/cpu/cpu[0-9]*/topology/thread_siblings_list"
653 for path in glob.glob(p1) or glob.glob(p2):
654 with open_binary(path) as f:
655 ls.add(f.read().strip())
656 result = len(ls)
657 if result != 0:
658 return result
660 # Method #2
661 mapping = {}
662 current_info = {}
663 with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
664 for line in f:
665 line = line.strip().lower()
666 if not line:
667 # new section
668 try:
669 mapping[current_info[b'physical id']] = \
670 current_info[b'cpu cores']
671 except KeyError:
672 pass
673 current_info = {}
674 else:
675 # ongoing section
676 if line.startswith((b'physical id', b'cpu cores')):
677 key, value = line.split(b'\t:', 1)
678 current_info[key] = int(value)
680 result = sum(mapping.values())
681 return result or None # mimic os.cpu_count()
684def cpu_stats():
685 """Return various CPU stats as a named tuple."""
686 with open_binary('%s/stat' % get_procfs_path()) as f:
687 ctx_switches = None
688 interrupts = None
689 soft_interrupts = None
690 for line in f:
691 if line.startswith(b'ctxt'):
692 ctx_switches = int(line.split()[1])
693 elif line.startswith(b'intr'):
694 interrupts = int(line.split()[1])
695 elif line.startswith(b'softirq'):
696 soft_interrupts = int(line.split()[1])
697 if ctx_switches is not None and soft_interrupts is not None \
698 and interrupts is not None:
699 break
700 syscalls = 0
701 return _common.scpustats(
702 ctx_switches, interrupts, soft_interrupts, syscalls)
705def _cpu_get_cpuinfo_freq():
706 """Return current CPU frequency from cpuinfo if available.
707 """
708 ret = []
709 with open_binary('%s/cpuinfo' % get_procfs_path()) as f:
710 for line in f:
711 if line.lower().startswith(b'cpu mhz'):
712 ret.append(float(line.split(b':', 1)[1]))
713 return ret
716if os.path.exists("/sys/devices/system/cpu/cpufreq/policy0") or \
717 os.path.exists("/sys/devices/system/cpu/cpu0/cpufreq"):
718 def cpu_freq():
719 """Return frequency metrics for all CPUs.
720 Contrarily to other OSes, Linux updates these values in
721 real-time.
722 """
723 cpuinfo_freqs = _cpu_get_cpuinfo_freq()
724 paths = \
725 glob.glob("/sys/devices/system/cpu/cpufreq/policy[0-9]*") or \
726 glob.glob("/sys/devices/system/cpu/cpu[0-9]*/cpufreq")
727 paths.sort(key=lambda x: int(re.search(r"[0-9]+", x).group()))
728 ret = []
729 pjoin = os.path.join
730 for i, path in enumerate(paths):
731 if len(paths) == len(cpuinfo_freqs):
732 # take cached value from cpuinfo if available, see:
733 # https://github.com/giampaolo/psutil/issues/1851
734 curr = cpuinfo_freqs[i] * 1000
735 else:
736 curr = bcat(pjoin(path, "scaling_cur_freq"), fallback=None)
737 if curr is None:
738 # Likely an old RedHat, see:
739 # https://github.com/giampaolo/psutil/issues/1071
740 curr = bcat(pjoin(path, "cpuinfo_cur_freq"), fallback=None)
741 if curr is None:
742 raise NotImplementedError(
743 "can't find current frequency file")
744 curr = int(curr) / 1000
745 max_ = int(bcat(pjoin(path, "scaling_max_freq"))) / 1000
746 min_ = int(bcat(pjoin(path, "scaling_min_freq"))) / 1000
747 ret.append(_common.scpufreq(curr, min_, max_))
748 return ret
750else:
751 def cpu_freq():
752 """Alternate implementation using /proc/cpuinfo.
753 min and max frequencies are not available and are set to None.
754 """
755 return [_common.scpufreq(x, 0., 0.) for x in _cpu_get_cpuinfo_freq()]
758# =====================================================================
759# --- network
760# =====================================================================
763net_if_addrs = cext_posix.net_if_addrs
766class _Ipv6UnsupportedError(Exception):
767 pass
770class Connections:
771 """A wrapper on top of /proc/net/* files, retrieving per-process
772 and system-wide open connections (TCP, UDP, UNIX) similarly to
773 "netstat -an".
775 Note: in case of UNIX sockets we're only able to determine the
776 local endpoint/path, not the one it's connected to.
777 According to [1] it would be possible but not easily.
779 [1] http://serverfault.com/a/417946
780 """
782 def __init__(self):
783 # The string represents the basename of the corresponding
784 # /proc/net/{proto_name} file.
785 tcp4 = ("tcp", socket.AF_INET, socket.SOCK_STREAM)
786 tcp6 = ("tcp6", socket.AF_INET6, socket.SOCK_STREAM)
787 udp4 = ("udp", socket.AF_INET, socket.SOCK_DGRAM)
788 udp6 = ("udp6", socket.AF_INET6, socket.SOCK_DGRAM)
789 unix = ("unix", socket.AF_UNIX, None)
790 self.tmap = {
791 "all": (tcp4, tcp6, udp4, udp6, unix),
792 "tcp": (tcp4, tcp6),
793 "tcp4": (tcp4,),
794 "tcp6": (tcp6,),
795 "udp": (udp4, udp6),
796 "udp4": (udp4,),
797 "udp6": (udp6,),
798 "unix": (unix,),
799 "inet": (tcp4, tcp6, udp4, udp6),
800 "inet4": (tcp4, udp4),
801 "inet6": (tcp6, udp6),
802 }
803 self._procfs_path = None
805 def get_proc_inodes(self, pid):
806 inodes = defaultdict(list)
807 for fd in os.listdir("%s/%s/fd" % (self._procfs_path, pid)):
808 try:
809 inode = readlink("%s/%s/fd/%s" % (self._procfs_path, pid, fd))
810 except (FileNotFoundError, ProcessLookupError):
811 # ENOENT == file which is gone in the meantime;
812 # os.stat('/proc/%s' % self.pid) will be done later
813 # to force NSP (if it's the case)
814 continue
815 except OSError as err:
816 if err.errno == errno.EINVAL:
817 # not a link
818 continue
819 if err.errno == errno.ENAMETOOLONG:
820 # file name too long
821 debug(err)
822 continue
823 raise
824 else:
825 if inode.startswith('socket:['):
826 # the process is using a socket
827 inode = inode[8:][:-1]
828 inodes[inode].append((pid, int(fd)))
829 return inodes
831 def get_all_inodes(self):
832 inodes = {}
833 for pid in pids():
834 try:
835 inodes.update(self.get_proc_inodes(pid))
836 except (FileNotFoundError, ProcessLookupError, PermissionError):
837 # os.listdir() is gonna raise a lot of access denied
838 # exceptions in case of unprivileged user; that's fine
839 # as we'll just end up returning a connection with PID
840 # and fd set to None anyway.
841 # Both netstat -an and lsof does the same so it's
842 # unlikely we can do any better.
843 # ENOENT just means a PID disappeared on us.
844 continue
845 return inodes
847 @staticmethod
848 def decode_address(addr, family):
849 """Accept an "ip:port" address as displayed in /proc/net/*
850 and convert it into a human readable form, like:
852 "0500000A:0016" -> ("10.0.0.5", 22)
853 "0000000000000000FFFF00000100007F:9E49" -> ("::ffff:127.0.0.1", 40521)
855 The IP address portion is a little or big endian four-byte
856 hexadecimal number; that is, the least significant byte is listed
857 first, so we need to reverse the order of the bytes to convert it
858 to an IP address.
859 The port is represented as a two-byte hexadecimal number.
861 Reference:
862 http://linuxdevcenter.com/pub/a/linux/2000/11/16/LinuxAdmin.html
863 """
864 ip, port = addr.split(':')
865 port = int(port, 16)
866 # this usually refers to a local socket in listen mode with
867 # no end-points connected
868 if not port:
869 return ()
870 if PY3:
871 ip = ip.encode('ascii')
872 if family == socket.AF_INET:
873 # see: https://github.com/giampaolo/psutil/issues/201
874 if LITTLE_ENDIAN:
875 ip = socket.inet_ntop(family, base64.b16decode(ip)[::-1])
876 else:
877 ip = socket.inet_ntop(family, base64.b16decode(ip))
878 else: # IPv6
879 ip = base64.b16decode(ip)
880 try:
881 # see: https://github.com/giampaolo/psutil/issues/201
882 if LITTLE_ENDIAN:
883 ip = socket.inet_ntop(
884 socket.AF_INET6,
885 struct.pack('>4I', *struct.unpack('<4I', ip)))
886 else:
887 ip = socket.inet_ntop(
888 socket.AF_INET6,
889 struct.pack('<4I', *struct.unpack('<4I', ip)))
890 except ValueError:
891 # see: https://github.com/giampaolo/psutil/issues/623
892 if not supports_ipv6():
893 raise _Ipv6UnsupportedError
894 else:
895 raise
896 return _common.addr(ip, port)
898 @staticmethod
899 def process_inet(file, family, type_, inodes, filter_pid=None):
900 """Parse /proc/net/tcp* and /proc/net/udp* files."""
901 if file.endswith('6') and not os.path.exists(file):
902 # IPv6 not supported
903 return
904 with open_text(file) as f:
905 f.readline() # skip the first line
906 for lineno, line in enumerate(f, 1):
907 try:
908 _, laddr, raddr, status, _, _, _, _, _, inode = \
909 line.split()[:10]
910 except ValueError:
911 raise RuntimeError(
912 "error while parsing %s; malformed line %s %r" % (
913 file, lineno, line))
914 if inode in inodes:
915 # # We assume inet sockets are unique, so we error
916 # # out if there are multiple references to the
917 # # same inode. We won't do this for UNIX sockets.
918 # if len(inodes[inode]) > 1 and family != socket.AF_UNIX:
919 # raise ValueError("ambiguous inode with multiple "
920 # "PIDs references")
921 pid, fd = inodes[inode][0]
922 else:
923 pid, fd = None, -1
924 if filter_pid is not None and filter_pid != pid:
925 continue
926 else:
927 if type_ == socket.SOCK_STREAM:
928 status = TCP_STATUSES[status]
929 else:
930 status = _common.CONN_NONE
931 try:
932 laddr = Connections.decode_address(laddr, family)
933 raddr = Connections.decode_address(raddr, family)
934 except _Ipv6UnsupportedError:
935 continue
936 yield (fd, family, type_, laddr, raddr, status, pid)
938 @staticmethod
939 def process_unix(file, family, inodes, filter_pid=None):
940 """Parse /proc/net/unix files."""
941 with open_text(file) as f:
942 f.readline() # skip the first line
943 for line in f:
944 tokens = line.split()
945 try:
946 _, _, _, _, type_, _, inode = tokens[0:7]
947 except ValueError:
948 if ' ' not in line:
949 # see: https://github.com/giampaolo/psutil/issues/766
950 continue
951 raise RuntimeError(
952 "error while parsing %s; malformed line %r" % (
953 file, line))
954 if inode in inodes:
955 # With UNIX sockets we can have a single inode
956 # referencing many file descriptors.
957 pairs = inodes[inode]
958 else:
959 pairs = [(None, -1)]
960 for pid, fd in pairs:
961 if filter_pid is not None and filter_pid != pid:
962 continue
963 else:
964 if len(tokens) == 8:
965 path = tokens[-1]
966 else:
967 path = ""
968 type_ = _common.socktype_to_enum(int(type_))
969 # XXX: determining the remote endpoint of a
970 # UNIX socket on Linux is not possible, see:
971 # https://serverfault.com/questions/252723/
972 raddr = ""
973 status = _common.CONN_NONE
974 yield (fd, family, type_, path, raddr, status, pid)
976 def retrieve(self, kind, pid=None):
977 if kind not in self.tmap:
978 raise ValueError("invalid %r kind argument; choose between %s"
979 % (kind, ', '.join([repr(x) for x in self.tmap])))
980 self._procfs_path = get_procfs_path()
981 if pid is not None:
982 inodes = self.get_proc_inodes(pid)
983 if not inodes:
984 # no connections for this process
985 return []
986 else:
987 inodes = self.get_all_inodes()
988 ret = set()
989 for proto_name, family, type_ in self.tmap[kind]:
990 path = "%s/net/%s" % (self._procfs_path, proto_name)
991 if family in (socket.AF_INET, socket.AF_INET6):
992 ls = self.process_inet(
993 path, family, type_, inodes, filter_pid=pid)
994 else:
995 ls = self.process_unix(
996 path, family, inodes, filter_pid=pid)
997 for fd, family, type_, laddr, raddr, status, bound_pid in ls:
998 if pid:
999 conn = _common.pconn(fd, family, type_, laddr, raddr,
1000 status)
1001 else:
1002 conn = _common.sconn(fd, family, type_, laddr, raddr,
1003 status, bound_pid)
1004 ret.add(conn)
1005 return list(ret)
1008_connections = Connections()
1011def net_connections(kind='inet'):
1012 """Return system-wide open connections."""
1013 return _connections.retrieve(kind)
1016def net_io_counters():
1017 """Return network I/O statistics for every network interface
1018 installed on the system as a dict of raw tuples.
1019 """
1020 with open_text("%s/net/dev" % get_procfs_path()) as f:
1021 lines = f.readlines()
1022 retdict = {}
1023 for line in lines[2:]:
1024 colon = line.rfind(':')
1025 assert colon > 0, repr(line)
1026 name = line[:colon].strip()
1027 fields = line[colon + 1:].strip().split()
1029 # in
1030 (bytes_recv,
1031 packets_recv,
1032 errin,
1033 dropin,
1034 fifoin, # unused
1035 framein, # unused
1036 compressedin, # unused
1037 multicastin, # unused
1038 # out
1039 bytes_sent,
1040 packets_sent,
1041 errout,
1042 dropout,
1043 fifoout, # unused
1044 collisionsout, # unused
1045 carrierout, # unused
1046 compressedout) = map(int, fields)
1048 retdict[name] = (bytes_sent, bytes_recv, packets_sent, packets_recv,
1049 errin, errout, dropin, dropout)
1050 return retdict
1053def net_if_stats():
1054 """Get NIC stats (isup, duplex, speed, mtu)."""
1055 duplex_map = {cext.DUPLEX_FULL: NIC_DUPLEX_FULL,
1056 cext.DUPLEX_HALF: NIC_DUPLEX_HALF,
1057 cext.DUPLEX_UNKNOWN: NIC_DUPLEX_UNKNOWN}
1058 names = net_io_counters().keys()
1059 ret = {}
1060 for name in names:
1061 try:
1062 mtu = cext_posix.net_if_mtu(name)
1063 flags = cext_posix.net_if_flags(name)
1064 duplex, speed = cext.net_if_duplex_speed(name)
1065 except OSError as err:
1066 # https://github.com/giampaolo/psutil/issues/1279
1067 if err.errno != errno.ENODEV:
1068 raise
1069 else:
1070 debug(err)
1071 else:
1072 output_flags = ','.join(flags)
1073 isup = 'running' in flags
1074 ret[name] = _common.snicstats(isup, duplex_map[duplex], speed, mtu,
1075 output_flags)
1076 return ret
1079# =====================================================================
1080# --- disks
1081# =====================================================================
1084disk_usage = _psposix.disk_usage
1087def disk_io_counters(perdisk=False):
1088 """Return disk I/O statistics for every disk installed on the
1089 system as a dict of raw tuples.
1090 """
1091 def read_procfs():
1092 # OK, this is a bit confusing. The format of /proc/diskstats can
1093 # have 3 variations.
1094 # On Linux 2.4 each line has always 15 fields, e.g.:
1095 # "3 0 8 hda 8 8 8 8 8 8 8 8 8 8 8"
1096 # On Linux 2.6+ each line *usually* has 14 fields, and the disk
1097 # name is in another position, like this:
1098 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8"
1099 # ...unless (Linux 2.6) the line refers to a partition instead
1100 # of a disk, in which case the line has less fields (7):
1101 # "3 1 hda1 8 8 8 8"
1102 # 4.18+ has 4 fields added:
1103 # "3 0 hda 8 8 8 8 8 8 8 8 8 8 8 0 0 0 0"
1104 # 5.5 has 2 more fields.
1105 # See:
1106 # https://www.kernel.org/doc/Documentation/iostats.txt
1107 # https://www.kernel.org/doc/Documentation/ABI/testing/procfs-diskstats
1108 with open_text("%s/diskstats" % get_procfs_path()) as f:
1109 lines = f.readlines()
1110 for line in lines:
1111 fields = line.split()
1112 flen = len(fields)
1113 if flen == 15:
1114 # Linux 2.4
1115 name = fields[3]
1116 reads = int(fields[2])
1117 (reads_merged, rbytes, rtime, writes, writes_merged,
1118 wbytes, wtime, _, busy_time, _) = map(int, fields[4:14])
1119 elif flen == 14 or flen >= 18:
1120 # Linux 2.6+, line referring to a disk
1121 name = fields[2]
1122 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1123 wbytes, wtime, _, busy_time, _) = map(int, fields[3:14])
1124 elif flen == 7:
1125 # Linux 2.6+, line referring to a partition
1126 name = fields[2]
1127 reads, rbytes, writes, wbytes = map(int, fields[3:])
1128 rtime = wtime = reads_merged = writes_merged = busy_time = 0
1129 else:
1130 raise ValueError("not sure how to interpret line %r" % line)
1131 yield (name, reads, writes, rbytes, wbytes, rtime, wtime,
1132 reads_merged, writes_merged, busy_time)
1134 def read_sysfs():
1135 for block in os.listdir('/sys/block'):
1136 for root, _, files in os.walk(os.path.join('/sys/block', block)):
1137 if 'stat' not in files:
1138 continue
1139 with open_text(os.path.join(root, 'stat')) as f:
1140 fields = f.read().strip().split()
1141 name = os.path.basename(root)
1142 (reads, reads_merged, rbytes, rtime, writes, writes_merged,
1143 wbytes, wtime, _, busy_time) = map(int, fields[:10])
1144 yield (name, reads, writes, rbytes, wbytes, rtime,
1145 wtime, reads_merged, writes_merged, busy_time)
1147 if os.path.exists('%s/diskstats' % get_procfs_path()):
1148 gen = read_procfs()
1149 elif os.path.exists('/sys/block'):
1150 gen = read_sysfs()
1151 else:
1152 raise NotImplementedError(
1153 "%s/diskstats nor /sys/block filesystem are available on this "
1154 "system" % get_procfs_path())
1156 retdict = {}
1157 for entry in gen:
1158 (name, reads, writes, rbytes, wbytes, rtime, wtime, reads_merged,
1159 writes_merged, busy_time) = entry
1160 if not perdisk and not is_storage_device(name):
1161 # perdisk=False means we want to calculate totals so we skip
1162 # partitions (e.g. 'sda1', 'nvme0n1p1') and only include
1163 # base disk devices (e.g. 'sda', 'nvme0n1'). Base disks
1164 # include a total of all their partitions + some extra size
1165 # of their own:
1166 # $ cat /proc/diskstats
1167 # 259 0 sda 10485760 ...
1168 # 259 1 sda1 5186039 ...
1169 # 259 1 sda2 5082039 ...
1170 # See:
1171 # https://github.com/giampaolo/psutil/pull/1313
1172 continue
1174 rbytes *= DISK_SECTOR_SIZE
1175 wbytes *= DISK_SECTOR_SIZE
1176 retdict[name] = (reads, writes, rbytes, wbytes, rtime, wtime,
1177 reads_merged, writes_merged, busy_time)
1179 return retdict
1182class RootFsDeviceFinder:
1183 """disk_partitions() may return partitions with device == "/dev/root"
1184 or "rootfs". This container class uses different strategies to try to
1185 obtain the real device path. Resources:
1186 https://bootlin.com/blog/find-root-device/
1187 https://www.systutorials.com/how-to-find-the-disk-where-root-is-on-in-bash-on-linux/
1188 """
1189 __slots__ = ['major', 'minor']
1191 def __init__(self):
1192 dev = os.stat("/").st_dev
1193 self.major = os.major(dev)
1194 self.minor = os.minor(dev)
1196 def ask_proc_partitions(self):
1197 with open_text("%s/partitions" % get_procfs_path()) as f:
1198 for line in f.readlines()[2:]:
1199 fields = line.split()
1200 if len(fields) < 4: # just for extra safety
1201 continue
1202 major = int(fields[0]) if fields[0].isdigit() else None
1203 minor = int(fields[1]) if fields[1].isdigit() else None
1204 name = fields[3]
1205 if major == self.major and minor == self.minor:
1206 if name: # just for extra safety
1207 return "/dev/%s" % name
1209 def ask_sys_dev_block(self):
1210 path = "/sys/dev/block/%s:%s/uevent" % (self.major, self.minor)
1211 with open_text(path) as f:
1212 for line in f:
1213 if line.startswith("DEVNAME="):
1214 name = line.strip().rpartition("DEVNAME=")[2]
1215 if name: # just for extra safety
1216 return "/dev/%s" % name
1218 def ask_sys_class_block(self):
1219 needle = "%s:%s" % (self.major, self.minor)
1220 files = glob.iglob("/sys/class/block/*/dev")
1221 for file in files:
1222 try:
1223 f = open_text(file)
1224 except FileNotFoundError: # race condition
1225 continue
1226 else:
1227 with f:
1228 data = f.read().strip()
1229 if data == needle:
1230 name = os.path.basename(os.path.dirname(file))
1231 return "/dev/%s" % name
1233 def find(self):
1234 path = None
1235 if path is None:
1236 try:
1237 path = self.ask_proc_partitions()
1238 except (IOError, OSError) as err:
1239 debug(err)
1240 if path is None:
1241 try:
1242 path = self.ask_sys_dev_block()
1243 except (IOError, OSError) as err:
1244 debug(err)
1245 if path is None:
1246 try:
1247 path = self.ask_sys_class_block()
1248 except (IOError, OSError) as err:
1249 debug(err)
1250 # We use exists() because the "/dev/*" part of the path is hard
1251 # coded, so we want to be sure.
1252 if path is not None and os.path.exists(path):
1253 return path
1256def disk_partitions(all=False):
1257 """Return mounted disk partitions as a list of namedtuples."""
1258 fstypes = set()
1259 procfs_path = get_procfs_path()
1260 with open_text("%s/filesystems" % procfs_path) as f:
1261 for line in f:
1262 line = line.strip()
1263 if not line.startswith("nodev"):
1264 fstypes.add(line.strip())
1265 else:
1266 # ignore all lines starting with "nodev" except "nodev zfs"
1267 fstype = line.split("\t")[1]
1268 if fstype == "zfs":
1269 fstypes.add("zfs")
1271 # See: https://github.com/giampaolo/psutil/issues/1307
1272 if procfs_path == "/proc" and os.path.isfile('/etc/mtab'):
1273 mounts_path = os.path.realpath("/etc/mtab")
1274 else:
1275 mounts_path = os.path.realpath("%s/self/mounts" % procfs_path)
1277 retlist = []
1278 partitions = cext.disk_partitions(mounts_path)
1279 for partition in partitions:
1280 device, mountpoint, fstype, opts = partition
1281 if device == 'none':
1282 device = ''
1283 if device in ("/dev/root", "rootfs"):
1284 device = RootFsDeviceFinder().find() or device
1285 if not all:
1286 if device == '' or fstype not in fstypes:
1287 continue
1288 maxfile = maxpath = None # set later
1289 ntuple = _common.sdiskpart(device, mountpoint, fstype, opts,
1290 maxfile, maxpath)
1291 retlist.append(ntuple)
1293 return retlist
1296# =====================================================================
1297# --- sensors
1298# =====================================================================
1301def sensors_temperatures():
1302 """Return hardware (CPU and others) temperatures as a dict
1303 including hardware name, label, current, max and critical
1304 temperatures.
1306 Implementation notes:
1307 - /sys/class/hwmon looks like the most recent interface to
1308 retrieve this info, and this implementation relies on it
1309 only (old distros will probably use something else)
1310 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1311 - /sys/class/thermal/thermal_zone* is another one but it's more
1312 difficult to parse
1313 """
1314 ret = collections.defaultdict(list)
1315 basenames = glob.glob('/sys/class/hwmon/hwmon*/temp*_*')
1316 # CentOS has an intermediate /device directory:
1317 # https://github.com/giampaolo/psutil/issues/971
1318 # https://github.com/nicolargo/glances/issues/1060
1319 basenames.extend(glob.glob('/sys/class/hwmon/hwmon*/device/temp*_*'))
1320 basenames = sorted(set([x.split('_')[0] for x in basenames]))
1322 # Only add the coretemp hwmon entries if they're not already in
1323 # /sys/class/hwmon/
1324 # https://github.com/giampaolo/psutil/issues/1708
1325 # https://github.com/giampaolo/psutil/pull/1648
1326 basenames2 = glob.glob(
1327 '/sys/devices/platform/coretemp.*/hwmon/hwmon*/temp*_*')
1328 repl = re.compile('/sys/devices/platform/coretemp.*/hwmon/')
1329 for name in basenames2:
1330 altname = repl.sub('/sys/class/hwmon/', name)
1331 if altname not in basenames:
1332 basenames.append(name)
1334 for base in basenames:
1335 try:
1336 path = base + '_input'
1337 current = float(bcat(path)) / 1000.0
1338 path = os.path.join(os.path.dirname(base), 'name')
1339 unit_name = cat(path).strip()
1340 except (IOError, OSError, ValueError):
1341 # A lot of things can go wrong here, so let's just skip the
1342 # whole entry. Sure thing is Linux's /sys/class/hwmon really
1343 # is a stinky broken mess.
1344 # https://github.com/giampaolo/psutil/issues/1009
1345 # https://github.com/giampaolo/psutil/issues/1101
1346 # https://github.com/giampaolo/psutil/issues/1129
1347 # https://github.com/giampaolo/psutil/issues/1245
1348 # https://github.com/giampaolo/psutil/issues/1323
1349 continue
1351 high = bcat(base + '_max', fallback=None)
1352 critical = bcat(base + '_crit', fallback=None)
1353 label = cat(base + '_label', fallback='').strip()
1355 if high is not None:
1356 try:
1357 high = float(high) / 1000.0
1358 except ValueError:
1359 high = None
1360 if critical is not None:
1361 try:
1362 critical = float(critical) / 1000.0
1363 except ValueError:
1364 critical = None
1366 ret[unit_name].append((label, current, high, critical))
1368 # Indication that no sensors were detected in /sys/class/hwmon/
1369 if not basenames:
1370 basenames = glob.glob('/sys/class/thermal/thermal_zone*')
1371 basenames = sorted(set(basenames))
1373 for base in basenames:
1374 try:
1375 path = os.path.join(base, 'temp')
1376 current = float(bcat(path)) / 1000.0
1377 path = os.path.join(base, 'type')
1378 unit_name = cat(path).strip()
1379 except (IOError, OSError, ValueError) as err:
1380 debug(err)
1381 continue
1383 trip_paths = glob.glob(base + '/trip_point*')
1384 trip_points = set(['_'.join(
1385 os.path.basename(p).split('_')[0:3]) for p in trip_paths])
1386 critical = None
1387 high = None
1388 for trip_point in trip_points:
1389 path = os.path.join(base, trip_point + "_type")
1390 trip_type = cat(path, fallback='').strip()
1391 if trip_type == 'critical':
1392 critical = bcat(os.path.join(base, trip_point + "_temp"),
1393 fallback=None)
1394 elif trip_type == 'high':
1395 high = bcat(os.path.join(base, trip_point + "_temp"),
1396 fallback=None)
1398 if high is not None:
1399 try:
1400 high = float(high) / 1000.0
1401 except ValueError:
1402 high = None
1403 if critical is not None:
1404 try:
1405 critical = float(critical) / 1000.0
1406 except ValueError:
1407 critical = None
1409 ret[unit_name].append(('', current, high, critical))
1411 return dict(ret)
1414def sensors_fans():
1415 """Return hardware fans info (for CPU and other peripherals) as a
1416 dict including hardware label and current speed.
1418 Implementation notes:
1419 - /sys/class/hwmon looks like the most recent interface to
1420 retrieve this info, and this implementation relies on it
1421 only (old distros will probably use something else)
1422 - lm-sensors on Ubuntu 16.04 relies on /sys/class/hwmon
1423 """
1424 ret = collections.defaultdict(list)
1425 basenames = glob.glob('/sys/class/hwmon/hwmon*/fan*_*')
1426 if not basenames:
1427 # CentOS has an intermediate /device directory:
1428 # https://github.com/giampaolo/psutil/issues/971
1429 basenames = glob.glob('/sys/class/hwmon/hwmon*/device/fan*_*')
1431 basenames = sorted(set([x.split('_')[0] for x in basenames]))
1432 for base in basenames:
1433 try:
1434 current = int(bcat(base + '_input'))
1435 except (IOError, OSError) as err:
1436 debug(err)
1437 continue
1438 unit_name = cat(os.path.join(os.path.dirname(base), 'name')).strip()
1439 label = cat(base + '_label', fallback='').strip()
1440 ret[unit_name].append(_common.sfan(label, current))
1442 return dict(ret)
1445def sensors_battery():
1446 """Return battery information.
1447 Implementation note: it appears /sys/class/power_supply/BAT0/
1448 directory structure may vary and provide files with the same
1449 meaning but under different names, see:
1450 https://github.com/giampaolo/psutil/issues/966
1451 """
1452 null = object()
1454 def multi_bcat(*paths):
1455 """Attempt to read the content of multiple files which may
1456 not exist. If none of them exist return None.
1457 """
1458 for path in paths:
1459 ret = bcat(path, fallback=null)
1460 if ret != null:
1461 try:
1462 return int(ret)
1463 except ValueError:
1464 return ret.strip()
1465 return None
1467 bats = [x for x in os.listdir(POWER_SUPPLY_PATH) if x.startswith('BAT') or
1468 'battery' in x.lower()]
1469 if not bats:
1470 return None
1471 # Get the first available battery. Usually this is "BAT0", except
1472 # some rare exceptions:
1473 # https://github.com/giampaolo/psutil/issues/1238
1474 root = os.path.join(POWER_SUPPLY_PATH, sorted(bats)[0])
1476 # Base metrics.
1477 energy_now = multi_bcat(
1478 root + "/energy_now",
1479 root + "/charge_now")
1480 power_now = multi_bcat(
1481 root + "/power_now",
1482 root + "/current_now")
1483 energy_full = multi_bcat(
1484 root + "/energy_full",
1485 root + "/charge_full")
1486 time_to_empty = multi_bcat(root + "/time_to_empty_now")
1488 # Percent. If we have energy_full the percentage will be more
1489 # accurate compared to reading /capacity file (float vs. int).
1490 if energy_full is not None and energy_now is not None:
1491 try:
1492 percent = 100.0 * energy_now / energy_full
1493 except ZeroDivisionError:
1494 percent = 0.0
1495 else:
1496 percent = int(cat(root + "/capacity", fallback=-1))
1497 if percent == -1:
1498 return None
1500 # Is AC power cable plugged in?
1501 # Note: AC0 is not always available and sometimes (e.g. CentOS7)
1502 # it's called "AC".
1503 power_plugged = None
1504 online = multi_bcat(
1505 os.path.join(POWER_SUPPLY_PATH, "AC0/online"),
1506 os.path.join(POWER_SUPPLY_PATH, "AC/online"))
1507 if online is not None:
1508 power_plugged = online == 1
1509 else:
1510 status = cat(root + "/status", fallback="").strip().lower()
1511 if status == "discharging":
1512 power_plugged = False
1513 elif status in ("charging", "full"):
1514 power_plugged = True
1516 # Seconds left.
1517 # Note to self: we may also calculate the charging ETA as per:
1518 # https://github.com/thialfihar/dotfiles/blob/
1519 # 013937745fd9050c30146290e8f963d65c0179e6/bin/battery.py#L55
1520 if power_plugged:
1521 secsleft = _common.POWER_TIME_UNLIMITED
1522 elif energy_now is not None and power_now is not None:
1523 try:
1524 secsleft = int(energy_now / power_now * 3600)
1525 except ZeroDivisionError:
1526 secsleft = _common.POWER_TIME_UNKNOWN
1527 elif time_to_empty is not None:
1528 secsleft = int(time_to_empty * 60)
1529 if secsleft < 0:
1530 secsleft = _common.POWER_TIME_UNKNOWN
1531 else:
1532 secsleft = _common.POWER_TIME_UNKNOWN
1534 return _common.sbattery(percent, secsleft, power_plugged)
1537# =====================================================================
1538# --- other system functions
1539# =====================================================================
1542def users():
1543 """Return currently connected users as a list of namedtuples."""
1544 retlist = []
1545 rawlist = cext.users()
1546 for item in rawlist:
1547 user, tty, hostname, tstamp, user_process, pid = item
1548 # note: the underlying C function includes entries about
1549 # system boot, run level and others. We might want
1550 # to use them in the future.
1551 if not user_process:
1552 continue
1553 if hostname in (':0.0', ':0'):
1554 hostname = 'localhost'
1555 nt = _common.suser(user, tty or None, hostname, tstamp, pid)
1556 retlist.append(nt)
1557 return retlist
1560def boot_time():
1561 """Return the system boot time expressed in seconds since the epoch."""
1562 global BOOT_TIME
1563 path = '%s/stat' % get_procfs_path()
1564 with open_binary(path) as f:
1565 for line in f:
1566 if line.startswith(b'btime'):
1567 ret = float(line.strip().split()[1])
1568 BOOT_TIME = ret
1569 return ret
1570 raise RuntimeError(
1571 "line 'btime' not found in %s" % path)
1574# =====================================================================
1575# --- processes
1576# =====================================================================
1579def pids():
1580 """Returns a list of PIDs currently running on the system."""
1581 return [int(x) for x in os.listdir(b(get_procfs_path())) if x.isdigit()]
1584def pid_exists(pid):
1585 """Check for the existence of a unix PID. Linux TIDs are not
1586 supported (always return False).
1587 """
1588 if not _psposix.pid_exists(pid):
1589 return False
1590 else:
1591 # Linux's apparently does not distinguish between PIDs and TIDs
1592 # (thread IDs).
1593 # listdir("/proc") won't show any TID (only PIDs) but
1594 # os.stat("/proc/{tid}") will succeed if {tid} exists.
1595 # os.kill() can also be passed a TID. This is quite confusing.
1596 # In here we want to enforce this distinction and support PIDs
1597 # only, see:
1598 # https://github.com/giampaolo/psutil/issues/687
1599 try:
1600 # Note: already checked that this is faster than using a
1601 # regular expr. Also (a lot) faster than doing
1602 # 'return pid in pids()'
1603 path = "%s/%s/status" % (get_procfs_path(), pid)
1604 with open_binary(path) as f:
1605 for line in f:
1606 if line.startswith(b"Tgid:"):
1607 tgid = int(line.split()[1])
1608 # If tgid and pid are the same then we're
1609 # dealing with a process PID.
1610 return tgid == pid
1611 raise ValueError("'Tgid' line not found in %s" % path)
1612 except (EnvironmentError, ValueError):
1613 return pid in pids()
1616def ppid_map():
1617 """Obtain a {pid: ppid, ...} dict for all running processes in
1618 one shot. Used to speed up Process.children().
1619 """
1620 ret = {}
1621 procfs_path = get_procfs_path()
1622 for pid in pids():
1623 try:
1624 with open_binary("%s/%s/stat" % (procfs_path, pid)) as f:
1625 data = f.read()
1626 except (FileNotFoundError, ProcessLookupError):
1627 # Note: we should be able to access /stat for all processes
1628 # aka it's unlikely we'll bump into EPERM, which is good.
1629 pass
1630 else:
1631 rpar = data.rfind(b')')
1632 dset = data[rpar + 2:].split()
1633 ppid = int(dset[1])
1634 ret[pid] = ppid
1635 return ret
1638def wrap_exceptions(fun):
1639 """Decorator which translates bare OSError and IOError exceptions
1640 into NoSuchProcess and AccessDenied.
1641 """
1642 @functools.wraps(fun)
1643 def wrapper(self, *args, **kwargs):
1644 try:
1645 return fun(self, *args, **kwargs)
1646 except PermissionError:
1647 raise AccessDenied(self.pid, self._name)
1648 except ProcessLookupError:
1649 raise NoSuchProcess(self.pid, self._name)
1650 except FileNotFoundError:
1651 if not os.path.exists("%s/%s" % (self._procfs_path, self.pid)):
1652 raise NoSuchProcess(self.pid, self._name)
1653 # Note: zombies will keep existing under /proc until they're
1654 # gone so there's no way to distinguish them in here.
1655 raise
1656 return wrapper
1659class Process(object):
1660 """Linux process implementation."""
1662 __slots__ = ["pid", "_name", "_ppid", "_procfs_path", "_cache"]
1664 def __init__(self, pid):
1665 self.pid = pid
1666 self._name = None
1667 self._ppid = None
1668 self._procfs_path = get_procfs_path()
1670 def _assert_alive(self):
1671 """Raise NSP if the process disappeared on us."""
1672 # For those C function who do not raise NSP, possibly returning
1673 # incorrect or incomplete result.
1674 os.stat('%s/%s' % (self._procfs_path, self.pid))
1676 @wrap_exceptions
1677 @memoize_when_activated
1678 def _parse_stat_file(self):
1679 """Parse /proc/{pid}/stat file and return a dict with various
1680 process info.
1681 Using "man proc" as a reference: where "man proc" refers to
1682 position N always subtract 3 (e.g ppid position 4 in
1683 'man proc' == position 1 in here).
1684 The return value is cached in case oneshot() ctx manager is
1685 in use.
1686 """
1687 data = bcat("%s/%s/stat" % (self._procfs_path, self.pid))
1688 # Process name is between parentheses. It can contain spaces and
1689 # other parentheses. This is taken into account by looking for
1690 # the first occurrence of "(" and the last occurrence of ")".
1691 rpar = data.rfind(b')')
1692 name = data[data.find(b'(') + 1:rpar]
1693 fields = data[rpar + 2:].split()
1695 ret = {}
1696 ret['name'] = name
1697 ret['status'] = fields[0]
1698 ret['ppid'] = fields[1]
1699 ret['ttynr'] = fields[4]
1700 ret['utime'] = fields[11]
1701 ret['stime'] = fields[12]
1702 ret['children_utime'] = fields[13]
1703 ret['children_stime'] = fields[14]
1704 ret['create_time'] = fields[19]
1705 ret['cpu_num'] = fields[36]
1706 ret['blkio_ticks'] = fields[39] # aka 'delayacct_blkio_ticks'
1708 return ret
1710 @wrap_exceptions
1711 @memoize_when_activated
1712 def _read_status_file(self):
1713 """Read /proc/{pid}/stat file and return its content.
1714 The return value is cached in case oneshot() ctx manager is
1715 in use.
1716 """
1717 with open_binary("%s/%s/status" % (self._procfs_path, self.pid)) as f:
1718 return f.read()
1720 @wrap_exceptions
1721 @memoize_when_activated
1722 def _read_smaps_file(self):
1723 with open_binary("%s/%s/smaps" % (self._procfs_path, self.pid)) as f:
1724 return f.read().strip()
1726 def oneshot_enter(self):
1727 self._parse_stat_file.cache_activate(self)
1728 self._read_status_file.cache_activate(self)
1729 self._read_smaps_file.cache_activate(self)
1731 def oneshot_exit(self):
1732 self._parse_stat_file.cache_deactivate(self)
1733 self._read_status_file.cache_deactivate(self)
1734 self._read_smaps_file.cache_deactivate(self)
1736 @wrap_exceptions
1737 def name(self):
1738 name = self._parse_stat_file()['name']
1739 if PY3:
1740 name = decode(name)
1741 # XXX - gets changed later and probably needs refactoring
1742 return name
1744 def exe(self):
1745 try:
1746 return readlink("%s/%s/exe" % (self._procfs_path, self.pid))
1747 except (FileNotFoundError, ProcessLookupError):
1748 # no such file error; might be raised also if the
1749 # path actually exists for system processes with
1750 # low pids (about 0-20)
1751 if os.path.lexists("%s/%s" % (self._procfs_path, self.pid)):
1752 return ""
1753 else:
1754 if not pid_exists(self.pid):
1755 raise NoSuchProcess(self.pid, self._name)
1756 else:
1757 raise ZombieProcess(self.pid, self._name, self._ppid)
1758 except PermissionError:
1759 raise AccessDenied(self.pid, self._name)
1761 @wrap_exceptions
1762 def cmdline(self):
1763 with open_text("%s/%s/cmdline" % (self._procfs_path, self.pid)) as f:
1764 data = f.read()
1765 if not data:
1766 # may happen in case of zombie process
1767 return []
1768 # 'man proc' states that args are separated by null bytes '\0'
1769 # and last char is supposed to be a null byte. Nevertheless
1770 # some processes may change their cmdline after being started
1771 # (via setproctitle() or similar), they are usually not
1772 # compliant with this rule and use spaces instead. Google
1773 # Chrome process is an example. See:
1774 # https://github.com/giampaolo/psutil/issues/1179
1775 sep = '\x00' if data.endswith('\x00') else ' '
1776 if data.endswith(sep):
1777 data = data[:-1]
1778 cmdline = data.split(sep)
1779 # Sometimes last char is a null byte '\0' but the args are
1780 # separated by spaces, see: https://github.com/giampaolo/psutil/
1781 # issues/1179#issuecomment-552984549
1782 if sep == '\x00' and len(cmdline) == 1 and ' ' in data:
1783 cmdline = data.split(' ')
1784 return cmdline
1786 @wrap_exceptions
1787 def environ(self):
1788 with open_text("%s/%s/environ" % (self._procfs_path, self.pid)) as f:
1789 data = f.read()
1790 return parse_environ_block(data)
1792 @wrap_exceptions
1793 def terminal(self):
1794 tty_nr = int(self._parse_stat_file()['ttynr'])
1795 tmap = _psposix.get_terminal_map()
1796 try:
1797 return tmap[tty_nr]
1798 except KeyError:
1799 return None
1801 # May not be available on old kernels.
1802 if os.path.exists('/proc/%s/io' % os.getpid()):
1803 @wrap_exceptions
1804 def io_counters(self):
1805 fname = "%s/%s/io" % (self._procfs_path, self.pid)
1806 fields = {}
1807 with open_binary(fname) as f:
1808 for line in f:
1809 # https://github.com/giampaolo/psutil/issues/1004
1810 line = line.strip()
1811 if line:
1812 try:
1813 name, value = line.split(b': ')
1814 except ValueError:
1815 # https://github.com/giampaolo/psutil/issues/1004
1816 continue
1817 else:
1818 fields[name] = int(value)
1819 if not fields:
1820 raise RuntimeError("%s file was empty" % fname)
1821 try:
1822 return pio(
1823 fields[b'syscr'], # read syscalls
1824 fields[b'syscw'], # write syscalls
1825 fields[b'read_bytes'], # read bytes
1826 fields[b'write_bytes'], # write bytes
1827 fields[b'rchar'], # read chars
1828 fields[b'wchar'], # write chars
1829 )
1830 except KeyError as err:
1831 raise ValueError("%r field was not found in %s; found fields "
1832 "are %r" % (err[0], fname, fields))
1834 @wrap_exceptions
1835 def cpu_times(self):
1836 values = self._parse_stat_file()
1837 utime = float(values['utime']) / CLOCK_TICKS
1838 stime = float(values['stime']) / CLOCK_TICKS
1839 children_utime = float(values['children_utime']) / CLOCK_TICKS
1840 children_stime = float(values['children_stime']) / CLOCK_TICKS
1841 iowait = float(values['blkio_ticks']) / CLOCK_TICKS
1842 return pcputimes(utime, stime, children_utime, children_stime, iowait)
1844 @wrap_exceptions
1845 def cpu_num(self):
1846 """What CPU the process is on."""
1847 return int(self._parse_stat_file()['cpu_num'])
1849 @wrap_exceptions
1850 def wait(self, timeout=None):
1851 return _psposix.wait_pid(self.pid, timeout, self._name)
1853 @wrap_exceptions
1854 def create_time(self):
1855 ctime = float(self._parse_stat_file()['create_time'])
1856 # According to documentation, starttime is in field 21 and the
1857 # unit is jiffies (clock ticks).
1858 # We first divide it for clock ticks and then add uptime returning
1859 # seconds since the epoch.
1860 # Also use cached value if available.
1861 bt = BOOT_TIME or boot_time()
1862 return (ctime / CLOCK_TICKS) + bt
1864 @wrap_exceptions
1865 def memory_info(self):
1866 # ============================================================
1867 # | FIELD | DESCRIPTION | AKA | TOP |
1868 # ============================================================
1869 # | rss | resident set size | | RES |
1870 # | vms | total program size | size | VIRT |
1871 # | shared | shared pages (from shared mappings) | | SHR |
1872 # | text | text ('code') | trs | CODE |
1873 # | lib | library (unused in Linux 2.6) | lrs | |
1874 # | data | data + stack | drs | DATA |
1875 # | dirty | dirty pages (unused in Linux 2.6) | dt | |
1876 # ============================================================
1877 with open_binary("%s/%s/statm" % (self._procfs_path, self.pid)) as f:
1878 vms, rss, shared, text, lib, data, dirty = \
1879 [int(x) * PAGESIZE for x in f.readline().split()[:7]]
1880 return pmem(rss, vms, shared, text, lib, data, dirty)
1882 if HAS_PROC_SMAPS_ROLLUP or HAS_PROC_SMAPS:
1884 @wrap_exceptions
1885 def _parse_smaps_rollup(self):
1886 # /proc/pid/smaps_rollup was added to Linux in 2017. Faster
1887 # than /proc/pid/smaps. It reports higher PSS than */smaps
1888 # (from 1k up to 200k higher; tested against all processes).
1889 uss = pss = swap = 0
1890 try:
1891 with open_binary("{}/{}/smaps_rollup".format(
1892 self._procfs_path, self.pid)) as f:
1893 for line in f:
1894 if line.startswith(b"Private_"):
1895 # Private_Clean, Private_Dirty, Private_Hugetlb
1896 uss += int(line.split()[1]) * 1024
1897 elif line.startswith(b"Pss:"):
1898 pss = int(line.split()[1]) * 1024
1899 elif line.startswith(b"Swap:"):
1900 swap = int(line.split()[1]) * 1024
1901 except ProcessLookupError: # happens on readline()
1902 if not pid_exists(self.pid):
1903 raise NoSuchProcess(self.pid, self._name)
1904 else:
1905 raise ZombieProcess(self.pid, self._name, self._ppid)
1906 return (uss, pss, swap)
1908 @wrap_exceptions
1909 def _parse_smaps(
1910 self,
1911 # Gets Private_Clean, Private_Dirty, Private_Hugetlb.
1912 _private_re=re.compile(br"\nPrivate.*:\s+(\d+)"),
1913 _pss_re=re.compile(br"\nPss\:\s+(\d+)"),
1914 _swap_re=re.compile(br"\nSwap\:\s+(\d+)")):
1915 # /proc/pid/smaps does not exist on kernels < 2.6.14 or if
1916 # CONFIG_MMU kernel configuration option is not enabled.
1918 # Note: using 3 regexes is faster than reading the file
1919 # line by line.
1920 # XXX: on Python 3 the 2 regexes are 30% slower than on
1921 # Python 2 though. Figure out why.
1922 #
1923 # You might be tempted to calculate USS by subtracting
1924 # the "shared" value from the "resident" value in
1925 # /proc/<pid>/statm. But at least on Linux, statm's "shared"
1926 # value actually counts pages backed by files, which has
1927 # little to do with whether the pages are actually shared.
1928 # /proc/self/smaps on the other hand appears to give us the
1929 # correct information.
1930 smaps_data = self._read_smaps_file()
1931 # Note: smaps file can be empty for certain processes.
1932 # The code below will not crash though and will result to 0.
1933 uss = sum(map(int, _private_re.findall(smaps_data))) * 1024
1934 pss = sum(map(int, _pss_re.findall(smaps_data))) * 1024
1935 swap = sum(map(int, _swap_re.findall(smaps_data))) * 1024
1936 return (uss, pss, swap)
1938 def memory_full_info(self):
1939 if HAS_PROC_SMAPS_ROLLUP: # faster
1940 uss, pss, swap = self._parse_smaps_rollup()
1941 else:
1942 uss, pss, swap = self._parse_smaps()
1943 basic_mem = self.memory_info()
1944 return pfullmem(*basic_mem + (uss, pss, swap))
1946 else:
1947 memory_full_info = memory_info
1949 if HAS_PROC_SMAPS:
1951 @wrap_exceptions
1952 def memory_maps(self):
1953 """Return process's mapped memory regions as a list of named
1954 tuples. Fields are explained in 'man proc'; here is an updated
1955 (Apr 2012) version: http://goo.gl/fmebo
1957 /proc/{PID}/smaps does not exist on kernels < 2.6.14 or if
1958 CONFIG_MMU kernel configuration option is not enabled.
1959 """
1960 def get_blocks(lines, current_block):
1961 data = {}
1962 for line in lines:
1963 fields = line.split(None, 5)
1964 if not fields[0].endswith(b':'):
1965 # new block section
1966 yield (current_block.pop(), data)
1967 current_block.append(line)
1968 else:
1969 try:
1970 data[fields[0]] = int(fields[1]) * 1024
1971 except ValueError:
1972 if fields[0].startswith(b'VmFlags:'):
1973 # see issue #369
1974 continue
1975 else:
1976 raise ValueError("don't know how to inte"
1977 "rpret line %r" % line)
1978 yield (current_block.pop(), data)
1980 data = self._read_smaps_file()
1981 # Note: smaps file can be empty for certain processes.
1982 if not data:
1983 return []
1984 lines = data.split(b'\n')
1985 ls = []
1986 first_line = lines.pop(0)
1987 current_block = [first_line]
1988 for header, data in get_blocks(lines, current_block):
1989 hfields = header.split(None, 5)
1990 try:
1991 addr, perms, offset, dev, inode, path = hfields
1992 except ValueError:
1993 addr, perms, offset, dev, inode, path = \
1994 hfields + ['']
1995 if not path:
1996 path = '[anon]'
1997 else:
1998 if PY3:
1999 path = decode(path)
2000 path = path.strip()
2001 if (path.endswith(' (deleted)') and not
2002 path_exists_strict(path)):
2003 path = path[:-10]
2004 ls.append((
2005 decode(addr), decode(perms), path,
2006 data.get(b'Rss:', 0),
2007 data.get(b'Size:', 0),
2008 data.get(b'Pss:', 0),
2009 data.get(b'Shared_Clean:', 0),
2010 data.get(b'Shared_Dirty:', 0),
2011 data.get(b'Private_Clean:', 0),
2012 data.get(b'Private_Dirty:', 0),
2013 data.get(b'Referenced:', 0),
2014 data.get(b'Anonymous:', 0),
2015 data.get(b'Swap:', 0)
2016 ))
2017 return ls
2019 @wrap_exceptions
2020 def cwd(self):
2021 try:
2022 return readlink("%s/%s/cwd" % (self._procfs_path, self.pid))
2023 except (FileNotFoundError, ProcessLookupError):
2024 # https://github.com/giampaolo/psutil/issues/986
2025 if not pid_exists(self.pid):
2026 raise NoSuchProcess(self.pid, self._name)
2027 else:
2028 raise ZombieProcess(self.pid, self._name, self._ppid)
2030 @wrap_exceptions
2031 def num_ctx_switches(self,
2032 _ctxsw_re=re.compile(br'ctxt_switches:\t(\d+)')):
2033 data = self._read_status_file()
2034 ctxsw = _ctxsw_re.findall(data)
2035 if not ctxsw:
2036 raise NotImplementedError(
2037 "'voluntary_ctxt_switches' and 'nonvoluntary_ctxt_switches'"
2038 "lines were not found in %s/%s/status; the kernel is "
2039 "probably older than 2.6.23" % (
2040 self._procfs_path, self.pid))
2041 else:
2042 return _common.pctxsw(int(ctxsw[0]), int(ctxsw[1]))
2044 @wrap_exceptions
2045 def num_threads(self, _num_threads_re=re.compile(br'Threads:\t(\d+)')):
2046 # Note: on Python 3 using a re is faster than iterating over file
2047 # line by line. On Python 2 is the exact opposite, and iterating
2048 # over a file on Python 3 is slower than on Python 2.
2049 data = self._read_status_file()
2050 return int(_num_threads_re.findall(data)[0])
2052 @wrap_exceptions
2053 def threads(self):
2054 thread_ids = os.listdir("%s/%s/task" % (self._procfs_path, self.pid))
2055 thread_ids.sort()
2056 retlist = []
2057 hit_enoent = False
2058 for thread_id in thread_ids:
2059 fname = "%s/%s/task/%s/stat" % (
2060 self._procfs_path, self.pid, thread_id)
2061 try:
2062 with open_binary(fname) as f:
2063 st = f.read().strip()
2064 except (FileNotFoundError, ProcessLookupError):
2065 # no such file or directory or no such process;
2066 # it means thread disappeared on us
2067 hit_enoent = True
2068 continue
2069 # ignore the first two values ("pid (exe)")
2070 st = st[st.find(b')') + 2:]
2071 values = st.split(b' ')
2072 utime = float(values[11]) / CLOCK_TICKS
2073 stime = float(values[12]) / CLOCK_TICKS
2074 ntuple = _common.pthread(int(thread_id), utime, stime)
2075 retlist.append(ntuple)
2076 if hit_enoent:
2077 self._assert_alive()
2078 return retlist
2080 @wrap_exceptions
2081 def nice_get(self):
2082 # with open_text('%s/%s/stat' % (self._procfs_path, self.pid)) as f:
2083 # data = f.read()
2084 # return int(data.split()[18])
2086 # Use C implementation
2087 return cext_posix.getpriority(self.pid)
2089 @wrap_exceptions
2090 def nice_set(self, value):
2091 return cext_posix.setpriority(self.pid, value)
2093 # starting from CentOS 6.
2094 if HAS_CPU_AFFINITY:
2096 @wrap_exceptions
2097 def cpu_affinity_get(self):
2098 return cext.proc_cpu_affinity_get(self.pid)
2100 def _get_eligible_cpus(
2101 self, _re=re.compile(br"Cpus_allowed_list:\t(\d+)-(\d+)")):
2102 # See: https://github.com/giampaolo/psutil/issues/956
2103 data = self._read_status_file()
2104 match = _re.findall(data)
2105 if match:
2106 return list(range(int(match[0][0]), int(match[0][1]) + 1))
2107 else:
2108 return list(range(len(per_cpu_times())))
2110 @wrap_exceptions
2111 def cpu_affinity_set(self, cpus):
2112 try:
2113 cext.proc_cpu_affinity_set(self.pid, cpus)
2114 except (OSError, ValueError) as err:
2115 if isinstance(err, ValueError) or err.errno == errno.EINVAL:
2116 eligible_cpus = self._get_eligible_cpus()
2117 all_cpus = tuple(range(len(per_cpu_times())))
2118 for cpu in cpus:
2119 if cpu not in all_cpus:
2120 raise ValueError(
2121 "invalid CPU number %r; choose between %s" % (
2122 cpu, eligible_cpus))
2123 if cpu not in eligible_cpus:
2124 raise ValueError(
2125 "CPU number %r is not eligible; choose "
2126 "between %s" % (cpu, eligible_cpus))
2127 raise
2129 # only starting from kernel 2.6.13
2130 if HAS_PROC_IO_PRIORITY:
2132 @wrap_exceptions
2133 def ionice_get(self):
2134 ioclass, value = cext.proc_ioprio_get(self.pid)
2135 if enum is not None:
2136 ioclass = IOPriority(ioclass)
2137 return _common.pionice(ioclass, value)
2139 @wrap_exceptions
2140 def ionice_set(self, ioclass, value):
2141 if value is None:
2142 value = 0
2143 if value and ioclass in (IOPRIO_CLASS_IDLE, IOPRIO_CLASS_NONE):
2144 raise ValueError("%r ioclass accepts no value" % ioclass)
2145 if value < 0 or value > 7:
2146 raise ValueError("value not in 0-7 range")
2147 return cext.proc_ioprio_set(self.pid, ioclass, value)
2149 if prlimit is not None:
2151 @wrap_exceptions
2152 def rlimit(self, resource_, limits=None):
2153 # If pid is 0 prlimit() applies to the calling process and
2154 # we don't want that. We should never get here though as
2155 # PID 0 is not supported on Linux.
2156 if self.pid == 0:
2157 raise ValueError("can't use prlimit() against PID 0 process")
2158 try:
2159 if limits is None:
2160 # get
2161 return prlimit(self.pid, resource_)
2162 else:
2163 # set
2164 if len(limits) != 2:
2165 raise ValueError(
2166 "second argument must be a (soft, hard) tuple, "
2167 "got %s" % repr(limits))
2168 prlimit(self.pid, resource_, limits)
2169 except OSError as err:
2170 if err.errno == errno.ENOSYS and pid_exists(self.pid):
2171 # I saw this happening on Travis:
2172 # https://travis-ci.org/giampaolo/psutil/jobs/51368273
2173 raise ZombieProcess(self.pid, self._name, self._ppid)
2174 else:
2175 raise
2177 @wrap_exceptions
2178 def status(self):
2179 letter = self._parse_stat_file()['status']
2180 if PY3:
2181 letter = letter.decode()
2182 # XXX is '?' legit? (we're not supposed to return it anyway)
2183 return PROC_STATUSES.get(letter, '?')
2185 @wrap_exceptions
2186 def open_files(self):
2187 retlist = []
2188 files = os.listdir("%s/%s/fd" % (self._procfs_path, self.pid))
2189 hit_enoent = False
2190 for fd in files:
2191 file = "%s/%s/fd/%s" % (self._procfs_path, self.pid, fd)
2192 try:
2193 path = readlink(file)
2194 except (FileNotFoundError, ProcessLookupError):
2195 # ENOENT == file which is gone in the meantime
2196 hit_enoent = True
2197 continue
2198 except OSError as err:
2199 if err.errno == errno.EINVAL:
2200 # not a link
2201 continue
2202 if err.errno == errno.ENAMETOOLONG:
2203 # file name too long
2204 debug(err)
2205 continue
2206 raise
2207 else:
2208 # If path is not an absolute there's no way to tell
2209 # whether it's a regular file or not, so we skip it.
2210 # A regular file is always supposed to be have an
2211 # absolute path though.
2212 if path.startswith('/') and isfile_strict(path):
2213 # Get file position and flags.
2214 file = "%s/%s/fdinfo/%s" % (
2215 self._procfs_path, self.pid, fd)
2216 try:
2217 with open_binary(file) as f:
2218 pos = int(f.readline().split()[1])
2219 flags = int(f.readline().split()[1], 8)
2220 except (FileNotFoundError, ProcessLookupError):
2221 # fd gone in the meantime; process may
2222 # still be alive
2223 hit_enoent = True
2224 else:
2225 mode = file_flags_to_mode(flags)
2226 ntuple = popenfile(
2227 path, int(fd), int(pos), mode, flags)
2228 retlist.append(ntuple)
2229 if hit_enoent:
2230 self._assert_alive()
2231 return retlist
2233 @wrap_exceptions
2234 def connections(self, kind='inet'):
2235 ret = _connections.retrieve(kind, self.pid)
2236 self._assert_alive()
2237 return ret
2239 @wrap_exceptions
2240 def num_fds(self):
2241 return len(os.listdir("%s/%s/fd" % (self._procfs_path, self.pid)))
2243 @wrap_exceptions
2244 def ppid(self):
2245 return int(self._parse_stat_file()['ppid'])
2247 @wrap_exceptions
2248 def uids(self, _uids_re=re.compile(br'Uid:\t(\d+)\t(\d+)\t(\d+)')):
2249 data = self._read_status_file()
2250 real, effective, saved = _uids_re.findall(data)[0]
2251 return _common.puids(int(real), int(effective), int(saved))
2253 @wrap_exceptions
2254 def gids(self, _gids_re=re.compile(br'Gid:\t(\d+)\t(\d+)\t(\d+)')):
2255 data = self._read_status_file()
2256 real, effective, saved = _gids_re.findall(data)[0]
2257 return _common.pgids(int(real), int(effective), int(saved))