1# This file is part of Hypothesis, which may be found at
2# https://github.com/HypothesisWorks/hypothesis/
3#
4# Copyright the Hypothesis Authors.
5# Individual contributors are listed in AUTHORS.rst and the git log.
6#
7# This Source Code Form is subject to the terms of the Mozilla Public License,
8# v. 2.0. If a copy of the MPL was not distributed with this file, You can
9# obtain one at https://mozilla.org/MPL/2.0/.
10
11import functools
12import os
13import re
14import subprocess
15import sys
16import sysconfig
17import types
18from collections import defaultdict
19from collections.abc import Iterable
20from enum import IntEnum
21from functools import lru_cache, reduce
22from os import sep
23from pathlib import Path
24from typing import TYPE_CHECKING, Optional
25
26from hypothesis._settings import Phase, Verbosity
27from hypothesis.internal.compat import PYPY
28from hypothesis.internal.escalation import is_hypothesis_file
29
30if TYPE_CHECKING:
31 from typing import TypeAlias
32
33Location: "TypeAlias" = tuple[str, int]
34Branch: "TypeAlias" = tuple[Optional[Location], Location]
35Trace: "TypeAlias" = set[Branch]
36
37
38@functools.cache
39def should_trace_file(fname: str) -> bool:
40 # fname.startswith("<") indicates runtime code-generation via compile,
41 # e.g. compile("def ...", "<string>", "exec") in e.g. attrs methods.
42 return not (is_hypothesis_file(fname) or fname.startswith("<"))
43
44
45# where possible, we'll use 3.12's new sys.monitoring module for low-overhead
46# coverage instrumentation; on older python versions we'll use sys.settrace.
47# tool_id = 1 is designated for coverage, but we intentionally choose a
48# non-reserved tool id so we can co-exist with coverage tools.
49MONITORING_TOOL_ID = 3
50if hasattr(sys, "monitoring"):
51 MONITORING_EVENTS = {sys.monitoring.events.LINE: "trace_line"}
52
53
54class Tracer:
55 """A super-simple branch coverage tracer."""
56
57 __slots__ = (
58 "_previous_location",
59 "_should_trace",
60 "_tried_and_failed_to_trace",
61 "branches",
62 )
63
64 def __init__(self, *, should_trace: bool) -> None:
65 self.branches: Trace = set()
66 self._previous_location: Optional[Location] = None
67 self._tried_and_failed_to_trace = False
68 self._should_trace = should_trace and self.can_trace()
69
70 @staticmethod
71 def can_trace() -> bool:
72 if PYPY:
73 return False
74 if hasattr(sys, "monitoring"):
75 return sys.monitoring.get_tool(MONITORING_TOOL_ID) is None
76 return sys.gettrace() is None
77
78 def trace(self, frame, event, arg):
79 try:
80 if event == "call":
81 return self.trace
82 elif event == "line":
83 fname = frame.f_code.co_filename
84 if should_trace_file(fname):
85 current_location = (fname, frame.f_lineno)
86 self.branches.add((self._previous_location, current_location))
87 self._previous_location = current_location
88 except RecursionError:
89 pass
90
91 def trace_line(self, code: types.CodeType, line_number: int) -> None:
92 fname = code.co_filename
93 if not should_trace_file(fname):
94 # this function is only called on 3.12+, but we want to avoid an
95 # assertion to that effect for performance.
96 return sys.monitoring.DISABLE # type: ignore
97
98 current_location = (fname, line_number)
99 self.branches.add((self._previous_location, current_location))
100 self._previous_location = current_location
101
102 def __enter__(self):
103 self._tried_and_failed_to_trace = False
104
105 if not self._should_trace:
106 return self
107
108 if not hasattr(sys, "monitoring"):
109 sys.settrace(self.trace)
110 return self
111
112 try:
113 sys.monitoring.use_tool_id(MONITORING_TOOL_ID, "scrutineer")
114 except ValueError:
115 # another thread may have registered a tool for MONITORING_TOOL_ID
116 # since we checked in can_trace.
117 self._tried_and_failed_to_trace = True
118 return self
119
120 for event, callback_name in MONITORING_EVENTS.items():
121 sys.monitoring.set_events(MONITORING_TOOL_ID, event)
122 callback = getattr(self, callback_name)
123 sys.monitoring.register_callback(MONITORING_TOOL_ID, event, callback)
124
125 return self
126
127 def __exit__(self, *args, **kwargs):
128 if not self._should_trace:
129 return
130
131 if not hasattr(sys, "monitoring"):
132 sys.settrace(None)
133 return
134
135 if self._tried_and_failed_to_trace:
136 return
137
138 sys.monitoring.free_tool_id(MONITORING_TOOL_ID)
139 for event in MONITORING_EVENTS:
140 sys.monitoring.register_callback(MONITORING_TOOL_ID, event, None)
141
142
143UNHELPFUL_LOCATIONS = (
144 # There's a branch which is only taken when an exception is active while exiting
145 # a contextmanager; this is probably after the fault has been triggered.
146 # Similar reasoning applies to a few other standard-library modules: even
147 # if the fault was later, these still aren't useful locations to report!
148 # Note: The list is post-processed, so use plain "/" for separator here.
149 "/contextlib.py",
150 "/inspect.py",
151 "/re.py",
152 "/re/__init__.py", # refactored in Python 3.11
153 "/warnings.py",
154 # Quite rarely, the first AFNP line is in Pytest's internals.
155 "/_pytest/**",
156 "/pluggy/_*.py",
157 # used by pytest for failure formatting in the terminal.
158 # seen: pygments/lexer.py, pygments/formatters/, pygments/filter.py.
159 "/pygments/*",
160 # used by pytest for failure formatting
161 "/difflib.py",
162 "/reprlib.py",
163 "/typing.py",
164 "/conftest.py",
165 "/pprint.py",
166)
167
168
169def _glob_to_re(locs: Iterable[str]) -> str:
170 """Translate a list of glob patterns to a combined regular expression.
171 Only the * and ** wildcards are supported, and patterns including special
172 characters will only work by chance."""
173 # fnmatch.translate is not an option since its "*" consumes path sep
174 return "|".join(
175 loc.replace(".", re.escape("."))
176 .replace("**", r".+")
177 .replace("*", r"[^/]+")
178 .replace("/", re.escape(sep))
179 + r"\Z" # right anchored
180 for loc in locs
181 )
182
183
184def get_explaining_locations(traces):
185 # Traces is a dict[interesting_origin | None, set[frozenset[tuple[str, int]]]]
186 # Each trace in the set might later become a Counter instead of frozenset.
187 if not traces:
188 return {}
189
190 unions = {origin: set().union(*values) for origin, values in traces.items()}
191 seen_passing = {None}.union(*unions.pop(None, set()))
192
193 always_failing_never_passing = {
194 origin: reduce(set.intersection, [set().union(*v) for v in values])
195 - seen_passing
196 for origin, values in traces.items()
197 if origin is not None
198 }
199
200 # Build the observed parts of the control-flow graph for each origin
201 cf_graphs = {origin: defaultdict(set) for origin in unions}
202 for origin, seen_arcs in unions.items():
203 for src, dst in seen_arcs:
204 cf_graphs[origin][src].add(dst)
205 assert cf_graphs[origin][None], "Expected start node with >=1 successor"
206
207 # For each origin, our explanation is the always_failing_never_passing lines
208 # which are reachable from the start node (None) without passing through another
209 # AFNP line. So here's a whatever-first search with early stopping:
210 explanations = defaultdict(set)
211 for origin in unions:
212 queue = {None}
213 seen = set()
214 while queue:
215 assert queue.isdisjoint(seen), f"Intersection: {queue & seen}"
216 src = queue.pop()
217 seen.add(src)
218 if src in always_failing_never_passing[origin]:
219 explanations[origin].add(src)
220 else:
221 queue.update(cf_graphs[origin][src] - seen)
222
223 # The last step is to filter out explanations that we know would be uninformative.
224 # When this is the first AFNP location, we conclude that Scrutineer missed the
225 # real divergence (earlier in the trace) and drop that unhelpful explanation.
226 filter_regex = re.compile(_glob_to_re(UNHELPFUL_LOCATIONS))
227 return {
228 origin: {loc for loc in afnp_locs if not filter_regex.search(loc[0])}
229 for origin, afnp_locs in explanations.items()
230 }
231
232
233# see e.g. https://docs.python.org/3/library/sysconfig.html#posix-user
234# for examples of these path schemes
235STDLIB_DIRS = {
236 Path(sysconfig.get_path("platstdlib")).resolve(),
237 Path(sysconfig.get_path("stdlib")).resolve(),
238}
239SITE_PACKAGES_DIRS = {
240 Path(sysconfig.get_path("purelib")).resolve(),
241 Path(sysconfig.get_path("platlib")).resolve(),
242}
243
244EXPLANATION_STUB = (
245 "Explanation:",
246 " These lines were always and only run by failing examples:",
247)
248
249
250class ModuleLocation(IntEnum):
251 LOCAL = 0
252 SITE_PACKAGES = 1
253 STDLIB = 2
254
255 @classmethod
256 @lru_cache(1024)
257 def from_path(cls, path: str) -> "ModuleLocation":
258 path = Path(path).resolve()
259 # site-packages may be a subdir of stdlib or platlib, so it's important to
260 # check is_relative_to for this before the stdlib.
261 if any(path.is_relative_to(p) for p in SITE_PACKAGES_DIRS):
262 return cls.SITE_PACKAGES
263 if any(path.is_relative_to(p) for p in STDLIB_DIRS):
264 return cls.STDLIB
265 return cls.LOCAL
266
267
268# show local files first, then site-packages, then stdlib
269def _sort_key(path: str, lineno: int) -> tuple[int, str, int]:
270 return (ModuleLocation.from_path(path), path, lineno)
271
272
273def make_report(explanations, *, cap_lines_at=5):
274 report = defaultdict(list)
275 for origin, locations in explanations.items():
276 locations = list(locations)
277 locations.sort(key=lambda v: _sort_key(v[0], v[1]))
278 report_lines = [f" {fname}:{lineno}" for fname, lineno in locations]
279 if len(report_lines) > cap_lines_at + 1:
280 msg = " (and {} more with settings.verbosity >= verbose)"
281 report_lines[cap_lines_at:] = [msg.format(len(report_lines[cap_lines_at:]))]
282 if report_lines: # We might have filtered out every location as uninformative.
283 report[origin] = list(EXPLANATION_STUB) + report_lines
284 return report
285
286
287def explanatory_lines(traces, settings):
288 if Phase.explain in settings.phases and sys.gettrace() and not traces:
289 return defaultdict(list)
290 # Return human-readable report lines summarising the traces
291 explanations = get_explaining_locations(traces)
292 max_lines = 5 if settings.verbosity <= Verbosity.normal else float("inf")
293 return make_report(explanations, cap_lines_at=max_lines)
294
295
296# beware the code below; we're using some heuristics to make a nicer report...
297
298
299@functools.lru_cache
300def _get_git_repo_root() -> Path:
301 try:
302 where = subprocess.run(
303 ["git", "rev-parse", "--show-toplevel"],
304 check=True,
305 timeout=10,
306 capture_output=True,
307 text=True,
308 encoding="utf-8",
309 ).stdout.strip()
310 except Exception: # pragma: no cover
311 return Path().absolute().parents[-1]
312 else:
313 return Path(where)
314
315
316def tractable_coverage_report(trace: Trace) -> dict[str, list[int]]:
317 """Report a simple coverage map which is (probably most) of the user's code."""
318 coverage: dict = {}
319 t = dict(trace)
320 for file, line in set(t.keys()).union(t.values()) - {None}: # type: ignore
321 # On Python <= 3.11, we can use coverage.py xor Hypothesis' tracer,
322 # so the trace will be empty and this line never run under coverage.
323 coverage.setdefault(file, set()).add(line) # pragma: no cover
324 stdlib_fragment = f"{os.sep}lib{os.sep}python3.{sys.version_info.minor}{os.sep}"
325 return {
326 k: sorted(v)
327 for k, v in coverage.items()
328 if stdlib_fragment not in k
329 and (p := Path(k)).is_relative_to(_get_git_repo_root())
330 and "site-packages" not in p.parts
331 }