Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/hypothesis/core.py: 35%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of Hypothesis, which may be found at
2# https://github.com/HypothesisWorks/hypothesis/
3#
4# Copyright the Hypothesis Authors.
5# Individual contributors are listed in AUTHORS.rst and the git log.
6#
7# This Source Code Form is subject to the terms of the Mozilla Public License,
8# v. 2.0. If a copy of the MPL was not distributed with this file, You can
9# obtain one at https://mozilla.org/MPL/2.0/.
11"""This module provides the core primitives of Hypothesis, such as given."""
13import base64
14import contextlib
15import dataclasses
16import datetime
17import inspect
18import io
19import math
20import os
21import sys
22import threading
23import time
24import traceback
25import types
26import unittest
27import warnings
28import zlib
29from collections import defaultdict
30from collections.abc import Callable, Coroutine, Generator, Hashable, Iterable, Sequence
31from dataclasses import dataclass, field
32from functools import partial
33from inspect import Parameter
34from random import Random
35from threading import Lock
36from types import EllipsisType
37from typing import (
38 Any,
39 BinaryIO,
40 TypeVar,
41 overload,
42)
43from unittest import TestCase
45from hypothesis import strategies as st
46from hypothesis._settings import (
47 HealthCheck,
48 Phase,
49 Verbosity,
50 all_settings,
51 local_settings,
52 settings as Settings,
53)
54from hypothesis.control import BuildContext, currently_in_test_context
55from hypothesis.database import choices_from_bytes, choices_to_bytes
56from hypothesis.errors import (
57 BackendCannotProceed,
58 DeadlineExceeded,
59 DidNotReproduce,
60 FailedHealthCheck,
61 FlakyFailure,
62 FlakyReplay,
63 Found,
64 Frozen,
65 HypothesisException,
66 HypothesisWarning,
67 InvalidArgument,
68 NoSuchExample,
69 StopTest,
70 Unsatisfiable,
71 UnsatisfiedAssumption,
72)
73from hypothesis.internal import observability
74from hypothesis.internal.compat import (
75 PYPY,
76 BaseExceptionGroup,
77 add_note,
78 bad_django_TestCase,
79 get_type_hints,
80 int_from_bytes,
81)
82from hypothesis.internal.conjecture.choice import ChoiceT
83from hypothesis.internal.conjecture.data import ConjectureData, Status
84from hypothesis.internal.conjecture.engine import BUFFER_SIZE, ConjectureRunner
85from hypothesis.internal.conjecture.junkdrawer import (
86 ensure_free_stackframes,
87 gc_cumulative_time,
88)
89from hypothesis.internal.conjecture.providers import (
90 BytestringProvider,
91 PrimitiveProvider,
92)
93from hypothesis.internal.conjecture.shrinker import sort_key
94from hypothesis.internal.entropy import deterministic_PRNG
95from hypothesis.internal.escalation import (
96 InterestingOrigin,
97 current_pytest_item,
98 format_exception,
99 get_trimmed_traceback,
100 is_hypothesis_file,
101)
102from hypothesis.internal.healthcheck import fail_health_check
103from hypothesis.internal.observability import (
104 InfoObservation,
105 InfoObservationType,
106 deliver_observation,
107 make_testcase,
108 observability_enabled,
109)
110from hypothesis.internal.reflection import (
111 convert_positional_arguments,
112 define_function_signature,
113 function_digest,
114 get_pretty_function_description,
115 get_signature,
116 impersonate,
117 is_mock,
118 nicerepr,
119 proxies,
120 repr_call,
121)
122from hypothesis.internal.scrutineer import (
123 MONITORING_TOOL_ID,
124 Trace,
125 Tracer,
126 explanatory_lines,
127 tractable_coverage_report,
128)
129from hypothesis.internal.validation import check_type
130from hypothesis.reporting import (
131 current_verbosity,
132 report,
133 verbose_report,
134 with_reporter,
135)
136from hypothesis.statistics import describe_statistics, describe_targets, note_statistics
137from hypothesis.strategies._internal.misc import NOTHING
138from hypothesis.strategies._internal.strategies import (
139 Ex,
140 SearchStrategy,
141 check_strategy,
142)
143from hypothesis.utils.conventions import not_set
144from hypothesis.utils.threading import ThreadLocal
145from hypothesis.vendor.pretty import RepresentationPrinter
146from hypothesis.version import __version__
148TestFunc = TypeVar("TestFunc", bound=Callable)
151running_under_pytest = False
152pytest_shows_exceptiongroups = True
153global_force_seed = None
154# this variable stores "engine-global" constants, which are global relative to a
155# ConjectureRunner instance (roughly speaking). Since only one conjecture runner
156# instance can be active per thread, making engine constants thread-local prevents
157# the ConjectureRunner instances of concurrent threads from treading on each other.
158threadlocal = ThreadLocal(_hypothesis_global_random=lambda: None)
161@dataclass(slots=True, frozen=False)
162class Example:
163 args: Any
164 kwargs: Any
165 # Plus two optional arguments for .xfail()
166 raises: Any = field(default=None)
167 reason: Any = field(default=None)
170@dataclass(slots=True, frozen=True)
171class ReportableError:
172 fragments: list[str]
173 exception: BaseException
176# TODO_DOCS link to not-yet-existent patch-dumping docs
179class example:
180 """
181 Add an explicit input to a Hypothesis test, which Hypothesis will always
182 try before generating random inputs. This combines the randomized nature of
183 Hypothesis generation with a traditional parametrized test.
185 For example:
187 .. code-block:: python
189 @example("Hello world")
190 @example("some string with special significance")
191 @given(st.text())
192 def test_strings(s):
193 pass
195 will call ``test_strings("Hello World")`` and
196 ``test_strings("some string with special significance")`` before generating
197 any random inputs. |@example| may be placed in any order relative to |@given|
198 and |@settings|.
200 Explicit inputs from |@example| are run in the |Phase.explicit| phase.
201 Explicit inputs do not count towards |settings.max_examples|. Note that
202 explicit inputs added by |@example| do not shrink. If an explicit input
203 fails, Hypothesis will stop and report the failure without generating any
204 random inputs.
206 |@example| can also be used to easily reproduce a failure. For instance, if
207 Hypothesis reports that ``f(n=[0, math.nan])`` fails, you can add
208 ``@example(n=[0, math.nan])`` to your test to quickly reproduce that failure.
210 Arguments to ``@example``
211 -------------------------
213 Arguments to |@example| have the same behavior and restrictions as arguments
214 to |@given|. This means they may be either positional or keyword arguments
215 (but not both in the same |@example|):
217 .. code-block:: python
219 @example(1, 2)
220 @example(x=1, y=2)
221 @given(st.integers(), st.integers())
222 def test(x, y):
223 pass
225 Noting that while arguments to |@given| are strategies (like |st.integers|),
226 arguments to |@example| are values instead (like ``1``).
228 See the :ref:`given-arguments` section for full details.
229 """
231 def __init__(self, *args: Any, **kwargs: Any) -> None:
232 if args and kwargs:
233 raise InvalidArgument(
234 "Cannot mix positional and keyword arguments for examples"
235 )
236 if not (args or kwargs):
237 raise InvalidArgument("An example must provide at least one argument")
239 self.hypothesis_explicit_examples: list[Example] = []
240 self._this_example = Example(tuple(args), kwargs)
242 def __call__(self, test: TestFunc) -> TestFunc:
243 if not hasattr(test, "hypothesis_explicit_examples"):
244 test.hypothesis_explicit_examples = self.hypothesis_explicit_examples # type: ignore
245 test.hypothesis_explicit_examples.append(self._this_example) # type: ignore
246 return test
248 def xfail(
249 self,
250 condition: bool = True, # noqa: FBT002
251 *,
252 reason: str = "",
253 raises: type[BaseException] | tuple[type[BaseException], ...] = BaseException,
254 ) -> "example":
255 """Mark this example as an expected failure, similarly to
256 :obj:`pytest.mark.xfail(strict=True) <pytest.mark.xfail>`.
258 Expected-failing examples allow you to check that your test does fail on
259 some examples, and therefore build confidence that *passing* tests are
260 because your code is working, not because the test is missing something.
262 .. code-block:: python
264 @example(...).xfail()
265 @example(...).xfail(reason="Prices must be non-negative")
266 @example(...).xfail(raises=(KeyError, ValueError))
267 @example(...).xfail(sys.version_info[:2] >= (3, 12), reason="needs py 3.12")
268 @example(...).xfail(condition=sys.platform != "linux", raises=OSError)
269 def test(x):
270 pass
272 .. note::
274 Expected-failing examples are handled separately from those generated
275 by strategies, so you should usually ensure that there is no overlap.
277 .. code-block:: python
279 @example(x=1, y=0).xfail(raises=ZeroDivisionError)
280 @given(x=st.just(1), y=st.integers()) # Missing `.filter(bool)`!
281 def test_fraction(x, y):
282 # This test will try the explicit example and see it fail as
283 # expected, then go on to generate more examples from the
284 # strategy. If we happen to generate y=0, the test will fail
285 # because only the explicit example is treated as xfailing.
286 x / y
287 """
288 check_type(bool, condition, "condition")
289 check_type(str, reason, "reason")
290 if not (
291 isinstance(raises, type) and issubclass(raises, BaseException)
292 ) and not (
293 isinstance(raises, tuple)
294 and raises # () -> expected to fail with no error, which is impossible
295 and all(
296 isinstance(r, type) and issubclass(r, BaseException) for r in raises
297 )
298 ):
299 raise InvalidArgument(
300 f"{raises=} must be an exception type or tuple of exception types"
301 )
302 if condition:
303 self._this_example = dataclasses.replace(
304 self._this_example, raises=raises, reason=reason
305 )
306 return self
308 def via(self, whence: str, /) -> "example":
309 """Attach a machine-readable label noting what the origin of this example
310 was. |example.via| is completely optional and does not change runtime
311 behavior.
313 |example.via| is intended to support self-documenting behavior, as well as
314 tooling which might add (or remove) |@example| decorators automatically.
315 For example:
317 .. code-block:: python
319 # Annotating examples is optional and does not change runtime behavior
320 @example(...)
321 @example(...).via("regression test for issue #42")
322 @example(...).via("discovered failure")
323 def test(x):
324 pass
326 .. note::
328 `HypoFuzz <https://hypofuzz.com/>`_ uses |example.via| to tag examples
329 in the patch of its high-coverage set of explicit inputs, on
330 `the patches page <https://hypofuzz.com/example-dashboard/#/patches>`_.
331 """
332 if not isinstance(whence, str):
333 raise InvalidArgument(".via() must be passed a string")
334 # This is deliberately a no-op at runtime; the tools operate on source code.
335 return self
338def seed(seed: Hashable) -> Callable[[TestFunc], TestFunc]:
339 """
340 Seed the randomness for this test.
342 ``seed`` may be any hashable object. No exact meaning for ``seed`` is provided
343 other than that for a fixed seed value Hypothesis will produce the same
344 examples (assuming that there are no other sources of nondeterminisim, such
345 as timing, hash randomization, or external state).
347 For example, the following test function and |RuleBasedStateMachine| will
348 each generate the same series of examples each time they are executed:
350 .. code-block:: python
352 @seed(1234)
353 @given(st.integers())
354 def test(n): ...
356 @seed(6789)
357 class MyMachine(RuleBasedStateMachine): ...
359 If using pytest, you can alternatively pass ``--hypothesis-seed`` on the
360 command line.
362 Setting a seed overrides |settings.derandomize|, which is designed to enable
363 deterministic CI tests rather than reproducing observed failures.
365 Hypothesis will only print the seed which would reproduce a failure if a test
366 fails in an unexpected way, for instance inside Hypothesis internals.
367 """
369 def accept(test):
370 test._hypothesis_internal_use_seed = seed
371 current_settings = getattr(test, "_hypothesis_internal_use_settings", None)
372 test._hypothesis_internal_use_settings = Settings(
373 current_settings, database=None
374 )
375 return test
377 return accept
380# TODO_DOCS: link to /explanation/choice-sequence
383def reproduce_failure(version: str, blob: bytes) -> Callable[[TestFunc], TestFunc]:
384 """
385 Run the example corresponding to the binary ``blob`` in order to reproduce a
386 failure. ``blob`` is a serialized version of the internal input representation
387 of Hypothesis.
389 A test decorated with |@reproduce_failure| always runs exactly one example,
390 which is expected to cause a failure. If the provided ``blob`` does not
391 cause a failure, Hypothesis will raise |DidNotReproduce|.
393 Hypothesis will print an |@reproduce_failure| decorator if
394 |settings.print_blob| is ``True`` (which is the default in CI).
396 |@reproduce_failure| is intended to be temporarily added to your test suite in
397 order to reproduce a failure. It is not intended to be a permanent addition to
398 your test suite. Because of this, no compatibility guarantees are made across
399 Hypothesis versions, and |@reproduce_failure| will error if used on a different
400 Hypothesis version than it was created for.
402 .. seealso::
404 See also the :doc:`/tutorial/replaying-failures` tutorial.
405 """
407 def accept(test):
408 test._hypothesis_internal_use_reproduce_failure = (version, blob)
409 return test
411 return accept
414def reproduction_decorator(choices: Iterable[ChoiceT]) -> str:
415 return f"@reproduce_failure({__version__!r}, {encode_failure(choices)!r})"
418def encode_failure(choices: Iterable[ChoiceT]) -> bytes:
419 blob = choices_to_bytes(choices)
420 compressed = zlib.compress(blob)
421 if len(compressed) < len(blob):
422 blob = b"\1" + compressed
423 else:
424 blob = b"\0" + blob
425 return base64.b64encode(blob)
428def decode_failure(blob: bytes) -> Sequence[ChoiceT]:
429 try:
430 decoded = base64.b64decode(blob)
431 except Exception:
432 raise InvalidArgument(f"Invalid base64 encoded string: {blob!r}") from None
434 prefix = decoded[:1]
435 if prefix == b"\0":
436 decoded = decoded[1:]
437 elif prefix == b"\1":
438 try:
439 decoded = zlib.decompress(decoded[1:])
440 except zlib.error as err:
441 raise InvalidArgument(
442 f"Invalid zlib compression for blob {blob!r}"
443 ) from err
444 else:
445 raise InvalidArgument(
446 f"Could not decode blob {blob!r}: Invalid start byte {prefix!r}"
447 )
449 choices = choices_from_bytes(decoded)
450 if choices is None:
451 raise InvalidArgument(f"Invalid serialized choice sequence for blob {blob!r}")
453 return choices
456def _invalid(message, *, exc=InvalidArgument, test, given_kwargs):
457 @impersonate(test)
458 def wrapped_test(*arguments, **kwargs): # pragma: no cover # coverage limitation
459 raise exc(message)
461 wrapped_test.is_hypothesis_test = True
462 wrapped_test.hypothesis = HypothesisHandle(
463 inner_test=test,
464 _get_fuzz_target=wrapped_test,
465 _given_kwargs=given_kwargs,
466 )
467 return wrapped_test
470def is_invalid_test(test, original_sig, given_arguments, given_kwargs):
471 """Check the arguments to ``@given`` for basic usage constraints.
473 Most errors are not raised immediately; instead we return a dummy test
474 function that will raise the appropriate error if it is actually called.
475 When the user runs a subset of tests (e.g via ``pytest -k``), errors will
476 only be reported for tests that actually ran.
477 """
478 invalid = partial(_invalid, test=test, given_kwargs=given_kwargs)
480 if not (given_arguments or given_kwargs):
481 return invalid("given must be called with at least one argument")
483 params = list(original_sig.parameters.values())
484 pos_params = [p for p in params if p.kind is p.POSITIONAL_OR_KEYWORD]
485 kwonly_params = [p for p in params if p.kind is p.KEYWORD_ONLY]
486 if given_arguments and params != pos_params:
487 return invalid(
488 "positional arguments to @given are not supported with varargs, "
489 "varkeywords, positional-only, or keyword-only arguments"
490 )
492 if len(given_arguments) > len(pos_params):
493 return invalid(
494 f"Too many positional arguments for {test.__name__}() were passed to "
495 f"@given - expected at most {len(pos_params)} "
496 f"arguments, but got {len(given_arguments)} {given_arguments!r}"
497 )
499 if ... in given_arguments:
500 return invalid(
501 "... was passed as a positional argument to @given, but may only be "
502 "passed as a keyword argument or as the sole argument of @given"
503 )
505 if given_arguments and given_kwargs:
506 return invalid("cannot mix positional and keyword arguments to @given")
507 extra_kwargs = [
508 k for k in given_kwargs if k not in {p.name for p in pos_params + kwonly_params}
509 ]
510 if extra_kwargs and (params == [] or params[-1].kind is not params[-1].VAR_KEYWORD):
511 arg = extra_kwargs[0]
512 extra = ""
513 if arg in all_settings:
514 extra = f". Did you mean @settings({arg}={given_kwargs[arg]!r})?"
515 return invalid(
516 f"{test.__name__}() got an unexpected keyword argument {arg!r}, "
517 f"from `{arg}={given_kwargs[arg]!r}` in @given{extra}"
518 )
519 if any(p.default is not p.empty for p in params):
520 return invalid("Cannot apply @given to a function with defaults.")
522 # This case would raise Unsatisfiable *anyway*, but by detecting it here we can
523 # provide a much more helpful error message for people e.g. using the Ghostwriter.
524 empty = [
525 f"{s!r} (arg {idx})" for idx, s in enumerate(given_arguments) if s is NOTHING
526 ] + [f"{name}={s!r}" for name, s in given_kwargs.items() if s is NOTHING]
527 if empty:
528 strats = "strategies" if len(empty) > 1 else "strategy"
529 return invalid(
530 f"Cannot generate examples from empty {strats}: " + ", ".join(empty),
531 exc=Unsatisfiable,
532 )
535def execute_explicit_examples(state, wrapped_test, arguments, kwargs, original_sig):
536 assert isinstance(state, StateForActualGivenExecution)
537 posargs = [
538 p.name
539 for p in original_sig.parameters.values()
540 if p.kind is p.POSITIONAL_OR_KEYWORD
541 ]
543 for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
544 assert isinstance(example, Example)
545 # All of this validation is to check that @example() got "the same" arguments
546 # as @given, i.e. corresponding to the same parameters, even though they might
547 # be any mixture of positional and keyword arguments.
548 if example.args:
549 assert not example.kwargs
550 if any(
551 p.kind is p.POSITIONAL_ONLY for p in original_sig.parameters.values()
552 ):
553 raise InvalidArgument(
554 "Cannot pass positional arguments to @example() when decorating "
555 "a test function which has positional-only parameters."
556 )
557 if len(example.args) > len(posargs):
558 raise InvalidArgument(
559 "example has too many arguments for test. Expected at most "
560 f"{len(posargs)} but got {len(example.args)}"
561 )
562 example_kwargs = dict(
563 zip(posargs[-len(example.args) :], example.args, strict=True)
564 )
565 else:
566 example_kwargs = dict(example.kwargs)
567 given_kws = ", ".join(
568 repr(k) for k in sorted(wrapped_test.hypothesis._given_kwargs)
569 )
570 example_kws = ", ".join(repr(k) for k in sorted(example_kwargs))
571 if given_kws != example_kws:
572 raise InvalidArgument(
573 f"Inconsistent args: @given() got strategies for {given_kws}, "
574 f"but @example() got arguments for {example_kws}"
575 ) from None
577 # This is certainly true because the example_kwargs exactly match the params
578 # reserved by @given(), which are then remove from the function signature.
579 assert set(example_kwargs).isdisjoint(kwargs)
580 example_kwargs.update(kwargs)
582 if Phase.explicit not in state.settings.phases:
583 continue
585 with local_settings(state.settings):
586 fragments_reported = []
587 empty_data = ConjectureData.for_choices([])
588 try:
589 execute_example = partial(
590 state.execute_once,
591 empty_data,
592 is_final=True,
593 print_example=True,
594 example_kwargs=example_kwargs,
595 )
596 with with_reporter(fragments_reported.append):
597 if example.raises is None:
598 execute_example()
599 else:
600 # @example(...).xfail(...)
601 bits = ", ".join(nicerepr(x) for x in arguments) + ", ".join(
602 f"{k}={nicerepr(v)}" for k, v in example_kwargs.items()
603 )
604 try:
605 execute_example()
606 except failure_exceptions_to_catch() as err:
607 if not isinstance(err, example.raises):
608 raise
609 # Save a string form of this example; we'll warn if it's
610 # ever generated by the strategy (which can't be xfailed)
611 state.xfail_example_reprs.add(
612 repr_call(state.test, arguments, example_kwargs)
613 )
614 except example.raises as err:
615 # We'd usually check this as early as possible, but it's
616 # possible for failure_exceptions_to_catch() to grow when
617 # e.g. pytest is imported between import- and test-time.
618 raise InvalidArgument(
619 f"@example({bits}) raised an expected {err!r}, "
620 "but Hypothesis does not treat this as a test failure"
621 ) from err
622 else:
623 # Unexpectedly passing; always raise an error in this case.
624 reason = f" because {example.reason}" * bool(example.reason)
625 if example.raises is BaseException:
626 name = "exception" # special-case no raises= arg
627 elif not isinstance(example.raises, tuple):
628 name = example.raises.__name__
629 elif len(example.raises) == 1:
630 name = example.raises[0].__name__
631 else:
632 name = (
633 ", ".join(ex.__name__ for ex in example.raises[:-1])
634 + f", or {example.raises[-1].__name__}"
635 )
636 vowel = name.upper()[0] in "AEIOU"
637 raise AssertionError(
638 f"Expected a{'n' * vowel} {name} from @example({bits})"
639 f"{reason}, but no exception was raised."
640 )
641 except UnsatisfiedAssumption:
642 # Odd though it seems, we deliberately support explicit examples that
643 # are then rejected by a call to `assume()`. As well as iterative
644 # development, this is rather useful to replay Hypothesis' part of
645 # a saved failure when other arguments are supplied by e.g. pytest.
646 # See https://github.com/HypothesisWorks/hypothesis/issues/2125
647 with contextlib.suppress(StopTest):
648 empty_data.conclude_test(Status.INVALID)
649 except BaseException as err:
650 # In order to support reporting of multiple failing examples, we yield
651 # each of the (report text, error) pairs we find back to the top-level
652 # runner. This also ensures that user-facing stack traces have as few
653 # frames of Hypothesis internals as possible.
654 err = err.with_traceback(get_trimmed_traceback())
656 # One user error - whether misunderstanding or typo - we've seen a few
657 # times is to pass strategies to @example() where values are expected.
658 # Checking is easy, and false-positives not much of a problem, so:
659 if isinstance(err, failure_exceptions_to_catch()) and any(
660 isinstance(arg, SearchStrategy)
661 for arg in example.args + tuple(example.kwargs.values())
662 ):
663 new = HypothesisWarning(
664 "The @example() decorator expects to be passed values, but "
665 "you passed strategies instead. See https://hypothesis."
666 "readthedocs.io/en/latest/reference/api.html#hypothesis"
667 ".example for details."
668 )
669 new.__cause__ = err
670 err = new
672 with contextlib.suppress(StopTest):
673 empty_data.conclude_test(Status.INVALID)
674 yield ReportableError(fragments_reported, err)
675 if (
676 state.settings.report_multiple_bugs
677 and pytest_shows_exceptiongroups
678 and isinstance(err, failure_exceptions_to_catch())
679 and not isinstance(err, skip_exceptions_to_reraise())
680 ):
681 continue
682 break
683 finally:
684 if fragments_reported:
685 assert fragments_reported[0].startswith("Falsifying example")
686 fragments_reported[0] = fragments_reported[0].replace(
687 "Falsifying example", "Falsifying explicit example", 1
688 )
690 empty_data.freeze()
691 if observability_enabled():
692 tc = make_testcase(
693 run_start=state._start_timestamp,
694 property=state.test_identifier,
695 data=empty_data,
696 how_generated="explicit example",
697 representation=state._string_repr,
698 timing=state._timing_features,
699 )
700 deliver_observation(tc)
702 if fragments_reported:
703 verbose_report(fragments_reported[0].replace("Falsifying", "Trying", 1))
704 for f in fragments_reported[1:]:
705 verbose_report(f)
708def get_random_for_wrapped_test(test, wrapped_test):
709 settings = wrapped_test._hypothesis_internal_use_settings
710 wrapped_test._hypothesis_internal_use_generated_seed = None
712 if wrapped_test._hypothesis_internal_use_seed is not None:
713 return Random(wrapped_test._hypothesis_internal_use_seed)
715 if settings.derandomize:
716 return Random(int_from_bytes(function_digest(test)))
718 if global_force_seed is not None:
719 return Random(global_force_seed)
721 if threadlocal._hypothesis_global_random is None: # pragma: no cover
722 threadlocal._hypothesis_global_random = Random()
723 seed = threadlocal._hypothesis_global_random.getrandbits(128)
724 wrapped_test._hypothesis_internal_use_generated_seed = seed
725 return Random(seed)
728@dataclass(slots=True, frozen=False)
729class Stuff:
730 selfy: Any
731 args: tuple
732 kwargs: dict
733 given_kwargs: dict
736def process_arguments_to_given(
737 wrapped_test: Any,
738 arguments: Sequence[object],
739 kwargs: dict[str, object],
740 given_kwargs: dict[str, SearchStrategy],
741 params: dict[str, Parameter],
742) -> tuple[Sequence[object], dict[str, object], Stuff]:
743 selfy = None
744 arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)
746 # If the test function is a method of some kind, the bound object
747 # will be the first named argument if there are any, otherwise the
748 # first vararg (if any).
749 posargs = [p.name for p in params.values() if p.kind is p.POSITIONAL_OR_KEYWORD]
750 if posargs:
751 selfy = kwargs.get(posargs[0])
752 elif arguments:
753 selfy = arguments[0]
755 # Ensure that we don't mistake mocks for self here.
756 # This can cause the mock to be used as the test runner.
757 if is_mock(selfy):
758 selfy = None
760 arguments = tuple(arguments)
762 with ensure_free_stackframes():
763 for k, s in given_kwargs.items():
764 check_strategy(s, name=k)
765 s.validate()
767 stuff = Stuff(selfy=selfy, args=arguments, kwargs=kwargs, given_kwargs=given_kwargs)
769 return arguments, kwargs, stuff
772def skip_exceptions_to_reraise():
773 """Return a tuple of exceptions meaning 'skip this test', to re-raise.
775 This is intended to cover most common test runners; if you would
776 like another to be added please open an issue or pull request adding
777 it to this function and to tests/cover/test_lazy_import.py
778 """
779 # This is a set in case any library simply re-exports another's Skip exception
780 exceptions = set()
781 # We use this sys.modules trick to avoid importing libraries -
782 # you can't be an instance of a type from an unimported module!
783 # This is fast enough that we don't need to cache the result,
784 # and more importantly it avoids possible side-effects :-)
785 if "unittest" in sys.modules:
786 exceptions.add(sys.modules["unittest"].SkipTest)
787 if "_pytest.outcomes" in sys.modules:
788 exceptions.add(sys.modules["_pytest.outcomes"].Skipped)
789 return tuple(sorted(exceptions, key=str))
792def failure_exceptions_to_catch() -> tuple[type[BaseException], ...]:
793 """Return a tuple of exceptions meaning 'this test has failed', to catch.
795 This is intended to cover most common test runners; if you would
796 like another to be added please open an issue or pull request.
797 """
798 # While SystemExit and GeneratorExit are instances of BaseException, we also
799 # expect them to be deterministic - unlike KeyboardInterrupt - and so we treat
800 # them as standard exceptions, check for flakiness, etc.
801 # See https://github.com/HypothesisWorks/hypothesis/issues/2223 for details.
802 exceptions = [Exception, SystemExit, GeneratorExit]
803 if "_pytest.outcomes" in sys.modules:
804 exceptions.append(sys.modules["_pytest.outcomes"].Failed)
805 return tuple(exceptions)
808def new_given_signature(original_sig, given_kwargs):
809 """Make an updated signature for the wrapped test."""
810 return original_sig.replace(
811 parameters=[
812 p
813 for p in original_sig.parameters.values()
814 if not (
815 p.name in given_kwargs
816 and p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY)
817 )
818 ],
819 return_annotation=None,
820 )
823def default_executor(data, function):
824 return function(data)
827def get_executor(runner):
828 try:
829 execute_example = runner.execute_example
830 except AttributeError:
831 pass
832 else:
833 return lambda data, function: execute_example(partial(function, data))
835 if hasattr(runner, "setup_example") or hasattr(runner, "teardown_example"):
836 setup = getattr(runner, "setup_example", None) or (lambda: None)
837 teardown = getattr(runner, "teardown_example", None) or (lambda ex: None)
839 def execute(data, function):
840 token = None
841 try:
842 token = setup()
843 return function(data)
844 finally:
845 teardown(token)
847 return execute
849 return default_executor
852# This function is a crude solution, a better way of resolving it would probably
853# be to rewrite a bunch of exception handlers to use except*.
854T = TypeVar("T", bound=BaseException)
857def _flatten_group(excgroup: BaseExceptionGroup[T]) -> list[T]:
858 found_exceptions: list[T] = []
859 for exc in excgroup.exceptions:
860 if isinstance(exc, BaseExceptionGroup):
861 found_exceptions.extend(_flatten_group(exc))
862 else:
863 found_exceptions.append(exc)
864 return found_exceptions
867@contextlib.contextmanager
868def unwrap_markers_from_group() -> Generator[None, None, None]:
869 try:
870 yield
871 except BaseExceptionGroup as excgroup:
872 _frozen_exceptions, non_frozen_exceptions = excgroup.split(Frozen)
874 # group only contains Frozen, reraise the group
875 # it doesn't matter what we raise, since any exceptions get disregarded
876 # and reraised as StopTest if data got frozen.
877 if non_frozen_exceptions is None:
878 raise
879 # in all other cases they are discarded
881 # Can RewindRecursive end up in this group?
882 _, user_exceptions = non_frozen_exceptions.split(
883 lambda e: isinstance(e, (StopTest, HypothesisException))
884 )
886 # this might contain marker exceptions, or internal errors, but not frozen.
887 if user_exceptions is not None:
888 raise
890 # single marker exception - reraise it
891 flattened_non_frozen_exceptions: list[BaseException] = _flatten_group(
892 non_frozen_exceptions
893 )
894 if len(flattened_non_frozen_exceptions) == 1:
895 e = flattened_non_frozen_exceptions[0]
896 # preserve the cause of the original exception to not hinder debugging
897 # note that __context__ is still lost though
898 raise e from e.__cause__
900 # multiple marker exceptions. If we re-raise the whole group we break
901 # a bunch of logic so ....?
902 stoptests, non_stoptests = non_frozen_exceptions.split(StopTest)
904 # TODO: stoptest+hypothesisexception ...? Is it possible? If so, what do?
906 if non_stoptests:
907 # TODO: multiple marker exceptions is easy to produce, but the logic in the
908 # engine does not handle it... so we just reraise the first one for now.
909 e = _flatten_group(non_stoptests)[0]
910 raise e from e.__cause__
911 assert stoptests is not None
913 # multiple stoptests: raising the one with the lowest testcounter
914 raise min(_flatten_group(stoptests), key=lambda s_e: s_e.testcounter)
917class StateForActualGivenExecution:
918 def __init__(
919 self,
920 stuff: Stuff,
921 test: Callable[..., Any],
922 settings: Settings,
923 random: Random,
924 wrapped_test: Any,
925 *,
926 thread_overlap: dict[int, bool] | None = None,
927 ):
928 self.stuff = stuff
929 self.test = test
930 self.settings = settings
931 self.random = random
932 self.wrapped_test = wrapped_test
933 self.thread_overlap = {} if thread_overlap is None else thread_overlap
935 self.test_runner = get_executor(stuff.selfy)
936 self.print_given_args = getattr(
937 wrapped_test, "_hypothesis_internal_print_given_args", True
938 )
940 self.last_exception = None
941 self.falsifying_examples = ()
942 self.ever_executed = False
943 self.xfail_example_reprs: set[str] = set()
944 self.failed_normally = False
945 self.failed_due_to_deadline = False
947 self.explain_traces: dict[None | InterestingOrigin, set[Trace]] = defaultdict(
948 set
949 )
950 self._start_timestamp = time.time()
951 self._string_repr = ""
952 self._timing_features: dict[str, float] = {}
954 self._runner: ConjectureRunner | None = None
956 @property
957 def test_identifier(self) -> str:
958 return getattr(
959 current_pytest_item.value, "nodeid", None
960 ) or get_pretty_function_description(self.wrapped_test)
962 def _should_trace(self):
963 # NOTE: we explicitly support monkeypatching this. Keep the namespace
964 # access intact.
965 _trace_obs = (
966 observability_enabled() and observability.OBSERVABILITY_COLLECT_COVERAGE
967 )
968 _trace_failure = (
969 self.failed_normally
970 and not self.failed_due_to_deadline
971 and {Phase.shrink, Phase.explain}.issubset(self.settings.phases)
972 )
973 return _trace_obs or _trace_failure
975 def execute_once(
976 self,
977 data,
978 *,
979 print_example=False,
980 is_final=False,
981 expected_failure=None,
982 example_kwargs=None,
983 ):
984 """Run the test function once, using ``data`` as input.
986 If the test raises an exception, it will propagate through to the
987 caller of this method. Depending on its type, this could represent
988 an ordinary test failure, or a fatal error, or a control exception.
990 If this method returns normally, the test might have passed, or
991 it might have placed ``data`` in an unsuccessful state and then
992 swallowed the corresponding control exception.
993 """
995 self.ever_executed = True
997 self._string_repr = ""
998 text_repr = None
999 if self.settings.deadline is None and not observability_enabled():
1001 @proxies(self.test)
1002 def test(*args, **kwargs):
1003 with unwrap_markers_from_group(), ensure_free_stackframes():
1004 return self.test(*args, **kwargs)
1006 else:
1008 @proxies(self.test)
1009 def test(*args, **kwargs):
1010 arg_drawtime = math.fsum(data.draw_times.values())
1011 arg_stateful = math.fsum(data._stateful_run_times.values())
1012 arg_gctime = gc_cumulative_time()
1013 with unwrap_markers_from_group(), ensure_free_stackframes():
1014 start = time.perf_counter()
1015 try:
1016 result = self.test(*args, **kwargs)
1017 finally:
1018 finish = time.perf_counter()
1019 in_drawtime = math.fsum(data.draw_times.values()) - arg_drawtime
1020 in_stateful = (
1021 math.fsum(data._stateful_run_times.values()) - arg_stateful
1022 )
1023 in_gctime = gc_cumulative_time() - arg_gctime
1024 runtime = finish - start - in_drawtime - in_stateful - in_gctime
1025 self._timing_features = {
1026 "execute:test": runtime,
1027 "overall:gc": in_gctime,
1028 **data.draw_times,
1029 **data._stateful_run_times,
1030 }
1032 if (
1033 (current_deadline := self.settings.deadline) is not None
1034 # we disable the deadline check under concurrent threads, since
1035 # cpython may switch away from a thread for arbitrarily long.
1036 and not self.thread_overlap.get(threading.get_ident(), False)
1037 ):
1038 if not is_final:
1039 current_deadline = (current_deadline // 4) * 5
1040 if runtime >= current_deadline.total_seconds():
1041 raise DeadlineExceeded(
1042 datetime.timedelta(seconds=runtime), self.settings.deadline
1043 )
1044 return result
1046 def run(data: ConjectureData) -> None:
1047 # Set up dynamic context needed by a single test run.
1048 if self.stuff.selfy is not None:
1049 data.hypothesis_runner = self.stuff.selfy
1050 # Generate all arguments to the test function.
1051 args = self.stuff.args
1052 kwargs = dict(self.stuff.kwargs)
1053 if example_kwargs is None:
1054 kw, argslices = context.prep_args_kwargs_from_strategies(
1055 self.stuff.given_kwargs
1056 )
1057 else:
1058 kw = example_kwargs
1059 argslices = {}
1060 kwargs.update(kw)
1061 if expected_failure is not None:
1062 nonlocal text_repr
1063 text_repr = repr_call(test, args, kwargs)
1065 if print_example or current_verbosity() >= Verbosity.verbose:
1066 printer = RepresentationPrinter(context=context)
1067 if print_example:
1068 printer.text("Falsifying example:")
1069 else:
1070 printer.text("Trying example:")
1072 if self.print_given_args:
1073 printer.text(" ")
1074 printer.repr_call(
1075 test.__name__,
1076 args,
1077 kwargs,
1078 force_split=True,
1079 arg_slices=argslices,
1080 leading_comment=(
1081 "# " + context.data.slice_comments[(0, 0)]
1082 if (0, 0) in context.data.slice_comments
1083 else None
1084 ),
1085 avoid_realization=data.provider.avoid_realization,
1086 )
1087 report(printer.getvalue())
1089 if observability_enabled():
1090 printer = RepresentationPrinter(context=context)
1091 printer.repr_call(
1092 test.__name__,
1093 args,
1094 kwargs,
1095 force_split=True,
1096 arg_slices=argslices,
1097 leading_comment=(
1098 "# " + context.data.slice_comments[(0, 0)]
1099 if (0, 0) in context.data.slice_comments
1100 else None
1101 ),
1102 avoid_realization=data.provider.avoid_realization,
1103 )
1104 self._string_repr = printer.getvalue()
1106 try:
1107 return test(*args, **kwargs)
1108 except TypeError as e:
1109 # If we sampled from a sequence of strategies, AND failed with a
1110 # TypeError, *AND that exception mentions SearchStrategy*, add a note:
1111 if (
1112 "SearchStrategy" in str(e)
1113 and data._sampled_from_all_strategies_elements_message is not None
1114 ):
1115 msg, format_arg = data._sampled_from_all_strategies_elements_message
1116 add_note(e, msg.format(format_arg))
1117 raise
1118 finally:
1119 if data._stateful_repr_parts is not None:
1120 self._string_repr = "\n".join(data._stateful_repr_parts)
1122 if observability_enabled():
1123 printer = RepresentationPrinter(context=context)
1124 for name, value in data._observability_args.items():
1125 if name.startswith("generate:Draw "):
1126 try:
1127 value = data.provider.realize(value)
1128 except BackendCannotProceed: # pragma: no cover
1129 value = "<backend failed to realize symbolic>"
1130 printer.text(f"\n{name.removeprefix('generate:')}: ")
1131 printer.pretty(value)
1133 self._string_repr += printer.getvalue()
1135 # self.test_runner can include the execute_example method, or setup/teardown
1136 # _example, so it's important to get the PRNG and build context in place first.
1137 with (
1138 local_settings(self.settings),
1139 deterministic_PRNG(),
1140 BuildContext(
1141 data, is_final=is_final, wrapped_test=self.wrapped_test
1142 ) as context,
1143 ):
1144 # providers may throw in per_case_context_fn, and we'd like
1145 # `result` to still be set in these cases.
1146 result = None
1147 with data.provider.per_test_case_context_manager():
1148 # Run the test function once, via the executor hook.
1149 # In most cases this will delegate straight to `run(data)`.
1150 result = self.test_runner(data, run)
1152 # If a failure was expected, it should have been raised already, so
1153 # instead raise an appropriate diagnostic error.
1154 if expected_failure is not None:
1155 exception, traceback = expected_failure
1156 if isinstance(exception, DeadlineExceeded) and (
1157 runtime_secs := math.fsum(
1158 v
1159 for k, v in self._timing_features.items()
1160 if k.startswith("execute:")
1161 )
1162 ):
1163 report(
1164 "Unreliable test timings! On an initial run, this "
1165 f"test took {exception.runtime.total_seconds() * 1000:.2f}ms, "
1166 "which exceeded the deadline of "
1167 f"{self.settings.deadline.total_seconds() * 1000:.2f}ms, but "
1168 f"on a subsequent run it took {runtime_secs * 1000:.2f} ms, "
1169 "which did not. If you expect this sort of "
1170 "variability in your test timings, consider turning "
1171 "deadlines off for this test by setting deadline=None."
1172 )
1173 else:
1174 report("Failed to reproduce exception. Expected: \n" + traceback)
1175 raise FlakyFailure(
1176 f"Hypothesis {text_repr} produces unreliable results: "
1177 "Falsified on the first call but did not on a subsequent one",
1178 [exception],
1179 )
1180 return result
1182 def _flaky_replay_to_failure(
1183 self, err: FlakyReplay, context: BaseException
1184 ) -> FlakyFailure:
1185 assert self._runner is not None
1186 # Note that in the mark_interesting case, _context_ itself
1187 # is part of err._interesting_examples - but it's not in
1188 # _runner.interesting_examples - this is fine, as the context
1189 # (i.e., immediate exception) is appended.
1190 interesting_examples = [
1191 self._runner.interesting_examples[origin]
1192 for origin in err._interesting_origins
1193 if origin in self._runner.interesting_examples
1194 ]
1195 exceptions = [result.expected_exception for result in interesting_examples]
1196 exceptions.append(context) # the immediate exception
1197 return FlakyFailure(err.reason, exceptions)
1199 def _execute_once_for_engine(self, data: ConjectureData) -> None:
1200 """Wrapper around ``execute_once`` that intercepts test failure
1201 exceptions and single-test control exceptions, and turns them into
1202 appropriate method calls to `data` instead.
1204 This allows the engine to assume that any exception other than
1205 ``StopTest`` must be a fatal error, and should stop the entire engine.
1206 """
1207 trace: Trace = frozenset()
1208 try:
1209 with Tracer(should_trace=self._should_trace()) as tracer:
1210 try:
1211 result = self.execute_once(data)
1212 if (
1213 data.status == Status.VALID and tracer.branches
1214 ): # pragma: no cover
1215 # This is in fact covered by our *non-coverage* tests, but due
1216 # to the settrace() contention *not* by our coverage tests.
1217 self.explain_traces[None].add(tracer.branches)
1218 finally:
1219 trace = tracer.branches
1220 if result is not None:
1221 fail_health_check(
1222 self.settings,
1223 "Tests run under @given should return None, but "
1224 f"{self.test.__name__} returned {result!r} instead.",
1225 HealthCheck.return_value,
1226 )
1227 except UnsatisfiedAssumption as e:
1228 # An "assume" check failed, so instead we inform the engine that
1229 # this test run was invalid.
1230 try:
1231 data.mark_invalid(e.reason)
1232 except FlakyReplay as err:
1233 # This was unexpected, meaning that the assume was flaky.
1234 # Report it as such.
1235 raise self._flaky_replay_to_failure(err, e) from None
1236 except (StopTest, BackendCannotProceed):
1237 # The engine knows how to handle this control exception, so it's
1238 # OK to re-raise it.
1239 raise
1240 except (
1241 FailedHealthCheck,
1242 *skip_exceptions_to_reraise(),
1243 ):
1244 # These are fatal errors or control exceptions that should stop the
1245 # engine, so we re-raise them.
1246 raise
1247 except failure_exceptions_to_catch() as e:
1248 # If an unhandled (i.e., non-Hypothesis) error was raised by
1249 # Hypothesis-internal code, re-raise it as a fatal error instead
1250 # of treating it as a test failure.
1251 if isinstance(e, BaseExceptionGroup) and len(e.exceptions) == 1:
1252 # When a naked exception is implicitly wrapped in an ExceptionGroup
1253 # due to a re-raising "except*", the ExceptionGroup is constructed in
1254 # the caller's stack frame (see #4183). This workaround is specifically
1255 # for implicit wrapping of naked exceptions by "except*", since explicit
1256 # raising of ExceptionGroup gets the proper traceback in the first place
1257 # - there's no need to handle hierarchical groups here, at least if no
1258 # such implicit wrapping happens inside hypothesis code (we only care
1259 # about the hypothesis-or-not distinction).
1260 #
1261 # 01-25-2025: this was patched to give the correct
1262 # stacktrace in cpython https://github.com/python/cpython/issues/128799.
1263 # can remove once python3.11 is EOL.
1264 tb = e.exceptions[0].__traceback__ or e.__traceback__
1265 else:
1266 tb = e.__traceback__
1267 filepath = traceback.extract_tb(tb)[-1][0]
1268 if (
1269 is_hypothesis_file(filepath)
1270 and not isinstance(e, HypothesisException)
1271 # We expect backend authors to use the provider_conformance test
1272 # to test their backends. If an error occurs there, it is probably
1273 # from their backend, and we would like to treat it as a standard
1274 # error, not a hypothesis-internal error.
1275 and not filepath.endswith(
1276 f"internal{os.sep}conjecture{os.sep}provider_conformance.py"
1277 )
1278 ):
1279 raise
1281 if data.frozen:
1282 # This can happen if an error occurred in a finally
1283 # block somewhere, suppressing our original StopTest.
1284 # We raise a new one here to resume normal operation.
1285 raise StopTest(data.testcounter) from e
1286 else:
1287 # The test failed by raising an exception, so we inform the
1288 # engine that this test run was interesting. This is the normal
1289 # path for test runs that fail.
1290 tb = get_trimmed_traceback()
1291 data.expected_traceback = format_exception(e, tb)
1292 data.expected_exception = e
1293 assert data.expected_traceback is not None # for mypy
1294 verbose_report(data.expected_traceback)
1296 self.failed_normally = True
1298 interesting_origin = InterestingOrigin.from_exception(e)
1299 if trace: # pragma: no cover
1300 # Trace collection is explicitly disabled under coverage.
1301 self.explain_traces[interesting_origin].add(trace)
1302 if interesting_origin.exc_type == DeadlineExceeded:
1303 self.failed_due_to_deadline = True
1304 self.explain_traces.clear()
1305 try:
1306 data.mark_interesting(interesting_origin)
1307 except FlakyReplay as err:
1308 raise self._flaky_replay_to_failure(err, e) from None
1310 finally:
1311 # Conditional here so we can save some time constructing the payload; in
1312 # other cases (without coverage) it's cheap enough to do that regardless.
1313 #
1314 # Note that we have to unconditionally realize data.events, because
1315 # the statistics reported by the pytest plugin use a different flow
1316 # than observability, but still access symbolic events.
1318 try:
1319 data.events = data.provider.realize(data.events)
1320 except BackendCannotProceed:
1321 data.events = {}
1323 if observability_enabled():
1324 if runner := getattr(self, "_runner", None):
1325 phase = runner._current_phase
1326 else: # pragma: no cover # in case of messing with internals
1327 if self.failed_normally or self.failed_due_to_deadline:
1328 phase = "shrink"
1329 else:
1330 phase = "unknown"
1331 backend_desc = f", using backend={self.settings.backend!r}" * (
1332 self.settings.backend != "hypothesis"
1333 and not getattr(runner, "_switch_to_hypothesis_provider", False)
1334 )
1335 try:
1336 data._observability_args = data.provider.realize(
1337 data._observability_args
1338 )
1339 except BackendCannotProceed:
1340 data._observability_args = {}
1342 try:
1343 self._string_repr = data.provider.realize(self._string_repr)
1344 except BackendCannotProceed:
1345 self._string_repr = "<backend failed to realize symbolic arguments>"
1347 data.freeze()
1348 tc = make_testcase(
1349 run_start=self._start_timestamp,
1350 property=self.test_identifier,
1351 data=data,
1352 how_generated=f"during {phase} phase{backend_desc}",
1353 representation=self._string_repr,
1354 arguments=data._observability_args,
1355 timing=self._timing_features,
1356 coverage=tractable_coverage_report(trace) or None,
1357 phase=phase,
1358 backend_metadata=data.provider.observe_test_case(),
1359 )
1360 deliver_observation(tc)
1362 for msg in data.provider.observe_information_messages(
1363 lifetime="test_case"
1364 ):
1365 self._deliver_information_message(**msg)
1366 self._timing_features = {}
1368 def _deliver_information_message(
1369 self, *, type: InfoObservationType, title: str, content: str | dict
1370 ) -> None:
1371 deliver_observation(
1372 InfoObservation(
1373 type=type,
1374 run_start=self._start_timestamp,
1375 property=self.test_identifier,
1376 title=title,
1377 content=content,
1378 )
1379 )
1381 def run_engine(self):
1382 """Run the test function many times, on database input and generated
1383 input, using the Conjecture engine.
1384 """
1385 # Tell pytest to omit the body of this function from tracebacks
1386 __tracebackhide__ = True
1387 try:
1388 database_key = self.wrapped_test._hypothesis_internal_database_key
1389 except AttributeError:
1390 if global_force_seed is None:
1391 database_key = function_digest(self.test)
1392 else:
1393 database_key = None
1395 runner = ConjectureRunner(
1396 self._execute_once_for_engine,
1397 settings=self.settings,
1398 random=self.random,
1399 database_key=database_key,
1400 thread_overlap=self.thread_overlap,
1401 )
1402 self._runner = runner
1403 # Use the Conjecture engine to run the test function many times
1404 # on different inputs.
1405 runner.run()
1406 note_statistics(runner.statistics)
1407 if observability_enabled():
1408 self._deliver_information_message(
1409 type="info",
1410 title="Hypothesis Statistics",
1411 content=describe_statistics(runner.statistics),
1412 )
1413 for msg in (
1414 p if isinstance(p := runner.provider, PrimitiveProvider) else p(None)
1415 ).observe_information_messages(lifetime="test_function"):
1416 self._deliver_information_message(**msg)
1418 if runner.call_count == 0:
1419 return
1420 if runner.interesting_examples:
1421 self.falsifying_examples = sorted(
1422 runner.interesting_examples.values(),
1423 key=lambda d: sort_key(d.nodes),
1424 reverse=True,
1425 )
1426 else:
1427 if runner.valid_examples == 0:
1428 explanations = []
1429 # use a somewhat arbitrary cutoff to avoid recommending spurious
1430 # fixes.
1431 # eg, a few invalid examples from internal filters when the
1432 # problem is the user generating large inputs, or a
1433 # few overruns during internal mutation when the problem is
1434 # impossible user filters/assumes.
1435 if runner.invalid_examples > min(20, runner.call_count // 5):
1436 explanations.append(
1437 f"{runner.invalid_examples} of {runner.call_count} "
1438 "examples failed a .filter() or assume() condition. Try "
1439 "making your filters or assumes less strict, or rewrite "
1440 "using strategy parameters: "
1441 "st.integers().filter(lambda x: x > 0) fails less often "
1442 "(that is, never) when rewritten as st.integers(min_value=1)."
1443 )
1444 if runner.overrun_examples > min(20, runner.call_count // 5):
1445 explanations.append(
1446 f"{runner.overrun_examples} of {runner.call_count} "
1447 "examples were too large to finish generating; try "
1448 "reducing the typical size of your inputs?"
1449 )
1450 rep = get_pretty_function_description(self.test)
1451 raise Unsatisfiable(
1452 f"Unable to satisfy assumptions of {rep}. "
1453 f"{' Also, '.join(explanations)}"
1454 )
1456 # If we have not traced executions, warn about that now (but only when
1457 # we'd expect to do so reliably, i.e. on CPython>=3.12)
1458 if (
1459 hasattr(sys, "monitoring")
1460 and not PYPY
1461 and self._should_trace()
1462 and not Tracer.can_trace()
1463 ): # pragma: no cover
1464 # actually covered by our tests, but only on >= 3.12
1465 warnings.warn(
1466 "avoiding tracing test function because tool id "
1467 f"{MONITORING_TOOL_ID} is already taken by tool "
1468 f"{sys.monitoring.get_tool(MONITORING_TOOL_ID)}.",
1469 HypothesisWarning,
1470 stacklevel=3,
1471 )
1473 if not self.falsifying_examples:
1474 return
1475 elif not (self.settings.report_multiple_bugs and pytest_shows_exceptiongroups):
1476 # Pretend that we only found one failure, by discarding the others.
1477 del self.falsifying_examples[:-1]
1479 # The engine found one or more failures, so we need to reproduce and
1480 # report them.
1482 errors_to_report = []
1484 report_lines = describe_targets(runner.best_observed_targets)
1485 if report_lines:
1486 report_lines.append("")
1488 explanations = explanatory_lines(self.explain_traces, self.settings)
1489 for falsifying_example in self.falsifying_examples:
1490 fragments = []
1492 ran_example = runner.new_conjecture_data(
1493 falsifying_example.choices, max_choices=len(falsifying_example.choices)
1494 )
1495 ran_example.slice_comments = falsifying_example.slice_comments
1496 tb = None
1497 origin = None
1498 assert falsifying_example.expected_exception is not None
1499 assert falsifying_example.expected_traceback is not None
1500 try:
1501 with with_reporter(fragments.append):
1502 self.execute_once(
1503 ran_example,
1504 print_example=True,
1505 is_final=True,
1506 expected_failure=(
1507 falsifying_example.expected_exception,
1508 falsifying_example.expected_traceback,
1509 ),
1510 )
1511 except StopTest as e:
1512 # Link the expected exception from the first run. Not sure
1513 # how to access the current exception, if it failed
1514 # differently on this run. In fact, in the only known
1515 # reproducer, the StopTest is caused by OVERRUN before the
1516 # test is even executed. Possibly because all initial examples
1517 # failed until the final non-traced replay, and something was
1518 # exhausted? Possibly a FIXME, but sufficiently weird to
1519 # ignore for now.
1520 err = FlakyFailure(
1521 "Inconsistent results: An example failed on the "
1522 "first run but now succeeds (or fails with another "
1523 "error, or is for some reason not runnable).",
1524 # (note: e is a BaseException)
1525 [falsifying_example.expected_exception or e],
1526 )
1527 errors_to_report.append(ReportableError(fragments, err))
1528 except UnsatisfiedAssumption as e: # pragma: no cover # ironically flaky
1529 err = FlakyFailure(
1530 "Unreliable assumption: An example which satisfied "
1531 "assumptions on the first run now fails it.",
1532 [e],
1533 )
1534 errors_to_report.append(ReportableError(fragments, err))
1535 except BaseException as e:
1536 # If we have anything for explain-mode, this is the time to report.
1537 fragments.extend(explanations[falsifying_example.interesting_origin])
1538 error_with_tb = e.with_traceback(get_trimmed_traceback())
1539 errors_to_report.append(ReportableError(fragments, error_with_tb))
1540 tb = format_exception(e, get_trimmed_traceback(e))
1541 origin = InterestingOrigin.from_exception(e)
1542 else:
1543 # execute_once() will always raise either the expected error, or Flaky.
1544 raise NotImplementedError("This should be unreachable")
1545 finally:
1546 ran_example.freeze()
1547 if observability_enabled():
1548 # log our observability line for the final failing example
1549 tc = make_testcase(
1550 run_start=self._start_timestamp,
1551 property=self.test_identifier,
1552 data=ran_example,
1553 how_generated="minimal failing example",
1554 representation=self._string_repr,
1555 arguments=ran_example._observability_args,
1556 timing=self._timing_features,
1557 coverage=None, # Not recorded when we're replaying the MFE
1558 status="passed" if sys.exc_info()[0] else "failed",
1559 status_reason=str(origin or "unexpected/flaky pass"),
1560 metadata={"traceback": tb},
1561 )
1562 deliver_observation(tc)
1564 # Whether or not replay actually raised the exception again, we want
1565 # to print the reproduce_failure decorator for the failing example.
1566 if self.settings.print_blob:
1567 fragments.append(
1568 "\nYou can reproduce this example by temporarily adding "
1569 f"{reproduction_decorator(falsifying_example.choices)} "
1570 "as a decorator on your test case"
1571 )
1573 _raise_to_user(
1574 errors_to_report,
1575 self.settings,
1576 report_lines,
1577 # A backend might report a failure and then report verified afterwards,
1578 # which is to be interpreted as "there are no more failures *other
1579 # than what we already reported*". Do not report this as unsound.
1580 unsound_backend=(
1581 runner._verified_by_backend
1582 if runner._verified_by_backend and not runner._backend_found_failure
1583 else None
1584 ),
1585 )
1588def _simplify_explicit_errors(errors: list[ReportableError]) -> list[ReportableError]:
1589 """
1590 Group explicit example errors by their InterestingOrigin, keeping only the
1591 simplest one, and adding a note of how many other examples failed with the same
1592 error.
1593 """
1594 by_origin: dict[InterestingOrigin, list[ReportableError]] = defaultdict(list)
1595 for error in errors:
1596 origin = InterestingOrigin.from_exception(error.exception)
1597 by_origin[origin].append(error)
1599 result = []
1600 for group in by_origin.values():
1601 if len(group) == 1:
1602 result.append(group[0])
1603 else:
1604 # Sort by shortlex of representation (first fragment)
1605 def shortlex_key(error):
1606 repr_str = error.fragments[0] if error.fragments else ""
1607 return (len(repr_str), repr_str)
1609 sorted_group = sorted(group, key=shortlex_key)
1610 simplest = sorted_group[0]
1611 other_count = len(group) - 1
1612 add_note(
1613 simplest.exception,
1614 f"(note: {other_count} other explicit example{'s' * (other_count > 1)} "
1615 "also failed with this error; use Verbosity.verbose to view)",
1616 )
1617 result.append(simplest)
1619 return result
1622def _raise_to_user(
1623 errors_to_report, settings, target_lines, trailer="", *, unsound_backend=None
1624):
1625 """Helper function for attaching notes and grouping multiple errors."""
1626 failing_prefix = "Falsifying example: "
1627 ls = []
1628 for error in errors_to_report:
1629 for note in error.fragments:
1630 add_note(error.exception, note)
1631 if note.startswith(failing_prefix):
1632 ls.append(note.removeprefix(failing_prefix))
1633 if current_pytest_item.value:
1634 current_pytest_item.value._hypothesis_failing_examples = ls
1636 if len(errors_to_report) == 1:
1637 the_error_hypothesis_found = errors_to_report[0].exception
1638 else:
1639 assert errors_to_report
1640 the_error_hypothesis_found = BaseExceptionGroup(
1641 f"Hypothesis found {len(errors_to_report)} distinct failures{trailer}.",
1642 [error.exception for error in errors_to_report],
1643 )
1645 if settings.verbosity >= Verbosity.normal:
1646 for line in target_lines:
1647 add_note(the_error_hypothesis_found, line)
1649 if unsound_backend:
1650 add_note(
1651 the_error_hypothesis_found,
1652 f"backend={unsound_backend!r} claimed to verify this test passes - "
1653 "please send them a bug report!",
1654 )
1656 raise the_error_hypothesis_found
1659@contextlib.contextmanager
1660def fake_subTest(self, msg=None, **__):
1661 """Monkeypatch for `unittest.TestCase.subTest` during `@given`.
1663 If we don't patch this out, each failing example is reported as a
1664 separate failing test by the unittest test runner, which is
1665 obviously incorrect. We therefore replace it for the duration with
1666 this version.
1667 """
1668 warnings.warn(
1669 "subTest per-example reporting interacts badly with Hypothesis "
1670 "trying hundreds of examples, so we disable it for the duration of "
1671 "any test that uses `@given`.",
1672 HypothesisWarning,
1673 stacklevel=2,
1674 )
1675 yield
1678@dataclass(slots=False, frozen=False)
1679class HypothesisHandle:
1680 """This object is provided as the .hypothesis attribute on @given tests.
1682 Downstream users can reassign its attributes to insert custom logic into
1683 the execution of each case, for example by converting an async into a
1684 sync function.
1686 This must be an attribute of an attribute, because reassignment of a
1687 first-level attribute would not be visible to Hypothesis if the function
1688 had been decorated before the assignment.
1690 See https://github.com/HypothesisWorks/hypothesis/issues/1257 for more
1691 information.
1692 """
1694 inner_test: Any
1695 _get_fuzz_target: Any
1696 _given_kwargs: Any
1698 @property
1699 def fuzz_one_input(
1700 self,
1701 ) -> Callable[[bytes | bytearray | memoryview | BinaryIO], bytes | None]:
1702 """
1703 Run the test as a fuzz target, driven with the ``buffer`` of bytes.
1705 Depending on the passed ``buffer`` one of three things will happen:
1707 * If the bytestring was invalid, for example because it was too short or was
1708 filtered out by |assume| or |.filter|, |fuzz_one_input| returns ``None``.
1709 * If the bytestring was valid and the test passed, |fuzz_one_input| returns
1710 a canonicalised and pruned bytestring which will replay that test case.
1711 This is provided as an option to improve the performance of mutating
1712 fuzzers, but can safely be ignored.
1713 * If the test *failed*, i.e. raised an exception, |fuzz_one_input| will
1714 add the pruned buffer to :ref:`the Hypothesis example database <database>`
1715 and then re-raise that exception. All you need to do to reproduce,
1716 minimize, and de-duplicate all the failures found via fuzzing is run
1717 your test suite!
1719 To reduce the performance impact of database writes, |fuzz_one_input| only
1720 records failing inputs which would be valid shrinks for a known failure -
1721 meaning writes are somewhere between constant and log(N) rather than linear
1722 in runtime. However, this tracking only works within a persistent fuzzing
1723 process; for forkserver fuzzers we recommend ``database=None`` for the main
1724 run, and then replaying with a database enabled if you need to analyse
1725 failures.
1727 Note that the interpretation of both input and output bytestrings is
1728 specific to the exact version of Hypothesis you are using and the strategies
1729 given to the test, just like the :ref:`database <database>` and
1730 |@reproduce_failure|.
1732 Interaction with |@settings|
1733 ----------------------------
1735 |fuzz_one_input| uses just enough of Hypothesis' internals to drive your
1736 test function with a bytestring, and most settings therefore have no effect
1737 in this mode. We recommend running your tests the usual way before fuzzing
1738 to get the benefits of health checks, as well as afterwards to replay,
1739 shrink, deduplicate, and report whatever errors were discovered.
1741 * |settings.database| *is* used by |fuzz_one_input| - adding failures to
1742 the database to be replayed when
1743 you next run your tests is our preferred reporting mechanism and response
1744 to `the 'fuzzer taming' problem <https://blog.regehr.org/archives/925>`__.
1745 * |settings.verbosity| and |settings.stateful_step_count| work as usual.
1746 * The |~settings.deadline|, |~settings.derandomize|, |~settings.max_examples|,
1747 |~settings.phases|, |~settings.print_blob|, |~settings.report_multiple_bugs|,
1748 and |~settings.suppress_health_check| settings do not affect |fuzz_one_input|.
1750 Example Usage
1751 -------------
1753 .. code-block:: python
1755 @given(st.text())
1756 def test_foo(s): ...
1758 # This is a traditional fuzz target - call it with a bytestring,
1759 # or a binary IO object, and it runs the test once.
1760 fuzz_target = test_foo.hypothesis.fuzz_one_input
1762 # For example:
1763 fuzz_target(b"\\x00\\x00\\x00\\x00\\x00\\x00\\x00\\x00")
1764 fuzz_target(io.BytesIO(b"\\x01"))
1766 .. tip::
1768 If you expect to discover many failures while using |fuzz_one_input|,
1769 consider wrapping your database with |BackgroundWriteDatabase|, for
1770 low-overhead writes of failures.
1772 .. tip::
1774 | Want an integrated workflow for your team's local tests, CI, and continuous fuzzing?
1775 | Use `HypoFuzz <https://hypofuzz.com/>`__ to fuzz your whole test suite, and find more bugs with the same tests!
1777 .. seealso::
1779 See also the :doc:`/how-to/external-fuzzers` how-to.
1780 """
1781 # Note: most users, if they care about fuzzer performance, will access the
1782 # property and assign it to a local variable to move the attribute lookup
1783 # outside their fuzzing loop / before the fork point. We cache it anyway,
1784 # so that naive or unusual use-cases get the best possible performance too.
1785 try:
1786 return self.__cached_target # type: ignore
1787 except AttributeError:
1788 self.__cached_target = self._get_fuzz_target()
1789 return self.__cached_target
1792@overload
1793def given(
1794 _: EllipsisType, /
1795) -> Callable[
1796 [Callable[..., Coroutine[Any, Any, None] | None]], Callable[[], None]
1797]: # pragma: no cover
1798 ...
1801@overload
1802def given(
1803 *_given_arguments: SearchStrategy[Any],
1804) -> Callable[
1805 [Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]
1806]: # pragma: no cover
1807 ...
1810@overload
1811def given(
1812 **_given_kwargs: SearchStrategy[Any] | EllipsisType,
1813) -> Callable[
1814 [Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]
1815]: # pragma: no cover
1816 ...
1819def given(
1820 *_given_arguments: SearchStrategy[Any] | EllipsisType,
1821 **_given_kwargs: SearchStrategy[Any] | EllipsisType,
1822) -> Callable[[Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]]:
1823 """
1824 The |@given| decorator turns a function into a Hypothesis test. This is the
1825 main entry point to Hypothesis.
1827 .. seealso::
1829 See also the :doc:`/tutorial/introduction` tutorial, which introduces
1830 defining Hypothesis tests with |@given|.
1832 .. _given-arguments:
1834 Arguments to ``@given``
1835 -----------------------
1837 Arguments to |@given| may be either positional or keyword arguments:
1839 .. code-block:: python
1841 @given(st.integers(), st.floats())
1842 def test_one(x, y):
1843 pass
1845 @given(x=st.integers(), y=st.floats())
1846 def test_two(x, y):
1847 pass
1849 If using keyword arguments, the arguments may appear in any order, as with
1850 standard Python functions:
1852 .. code-block:: python
1854 # different order, but still equivalent to before
1855 @given(y=st.floats(), x=st.integers())
1856 def test(x, y):
1857 assert isinstance(x, int)
1858 assert isinstance(y, float)
1860 If |@given| is provided fewer positional arguments than the decorated test,
1861 the test arguments are filled in on the right side, leaving the leftmost
1862 positional arguments unfilled:
1864 .. code-block:: python
1866 @given(st.integers(), st.floats())
1867 def test(manual_string, y, z):
1868 assert manual_string == "x"
1869 assert isinstance(y, int)
1870 assert isinstance(z, float)
1872 # `test` is now a callable which takes one argument `manual_string`
1874 test("x")
1875 # or equivalently:
1876 test(manual_string="x")
1878 The reason for this "from the right" behavior is to support using |@given|
1879 with instance methods, by automatically passing through ``self``:
1881 .. code-block:: python
1883 class MyTest(TestCase):
1884 @given(st.integers())
1885 def test(self, x):
1886 assert isinstance(self, MyTest)
1887 assert isinstance(x, int)
1889 If (and only if) using keyword arguments, |@given| may be combined with
1890 ``**kwargs`` or ``*args``:
1892 .. code-block:: python
1894 @given(x=integers(), y=integers())
1895 def test(x, **kwargs):
1896 assert "y" in kwargs
1898 @given(x=integers(), y=integers())
1899 def test(x, *args, **kwargs):
1900 assert args == ()
1901 assert "x" not in kwargs
1902 assert "y" in kwargs
1904 It is an error to:
1906 * Mix positional and keyword arguments to |@given|.
1907 * Use |@given| with a function that has a default value for an argument.
1908 * Use |@given| with positional arguments with a function that uses ``*args``,
1909 ``**kwargs``, or keyword-only arguments.
1911 The function returned by given has all the same arguments as the original
1912 test, minus those that are filled in by |@given|. See the :ref:`notes on
1913 framework compatibility <framework-compatibility>` for how this interacts
1914 with features of other testing libraries, such as :pypi:`pytest` fixtures.
1915 """
1917 if currently_in_test_context():
1918 fail_health_check(
1919 Settings(),
1920 "Nesting @given tests results in quadratic generation and shrinking "
1921 "behavior, and can usually be more cleanly expressed by replacing the "
1922 "inner function with an st.data() parameter on the outer @given."
1923 "\n\n"
1924 "If it is difficult or impossible to refactor this test to remove the "
1925 "nested @given, you can disable this health check with "
1926 "@settings(suppress_health_check=[HealthCheck.nested_given]) on the "
1927 "outer @given. See "
1928 "https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck "
1929 "for details.",
1930 HealthCheck.nested_given,
1931 )
1933 def run_test_as_given(test):
1934 if inspect.isclass(test):
1935 # Provide a meaningful error to users, instead of exceptions from
1936 # internals that assume we're dealing with a function.
1937 raise InvalidArgument("@given cannot be applied to a class")
1939 if (
1940 "_pytest" in sys.modules
1941 and "_pytest.fixtures" in sys.modules
1942 and (
1943 tuple(map(int, sys.modules["_pytest"].__version__.split(".")[:2]))
1944 >= (8, 4)
1945 )
1946 and isinstance(
1947 test, sys.modules["_pytest.fixtures"].FixtureFunctionDefinition
1948 )
1949 ): # pragma: no cover # covered by pytest/test_fixtures, but not by cover/
1950 raise InvalidArgument("@given cannot be applied to a pytest fixture")
1952 given_arguments = tuple(_given_arguments)
1953 given_kwargs = dict(_given_kwargs)
1955 original_sig = get_signature(test)
1956 if given_arguments == (Ellipsis,) and not given_kwargs:
1957 # user indicated that they want to infer all arguments
1958 given_kwargs = {
1959 p.name: Ellipsis
1960 for p in original_sig.parameters.values()
1961 if p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY)
1962 }
1963 given_arguments = ()
1965 check_invalid = is_invalid_test(
1966 test, original_sig, given_arguments, given_kwargs
1967 )
1969 # If the argument check found problems, return a dummy test function
1970 # that will raise an error if it is actually called.
1971 if check_invalid is not None:
1972 return check_invalid
1974 # Because the argument check succeeded, we can convert @given's
1975 # positional arguments into keyword arguments for simplicity.
1976 if given_arguments:
1977 assert not given_kwargs
1978 posargs = [
1979 p.name
1980 for p in original_sig.parameters.values()
1981 if p.kind is p.POSITIONAL_OR_KEYWORD
1982 ]
1983 given_kwargs = dict(
1984 list(zip(posargs[::-1], given_arguments[::-1], strict=False))[::-1]
1985 )
1986 # These have been converted, so delete them to prevent accidental use.
1987 del given_arguments
1989 new_signature = new_given_signature(original_sig, given_kwargs)
1991 # Use type information to convert "infer" arguments into appropriate strategies.
1992 if ... in given_kwargs.values():
1993 hints = get_type_hints(test)
1994 for name in [name for name, value in given_kwargs.items() if value is ...]:
1995 if name not in hints:
1996 return _invalid(
1997 f"passed {name}=... for {test.__name__}, but {name} has "
1998 "no type annotation",
1999 test=test,
2000 given_kwargs=given_kwargs,
2001 )
2002 given_kwargs[name] = st.from_type(hints[name])
2004 # only raise if the same thread uses two different executors, not if two
2005 # different threads use different executors.
2006 thread_local = ThreadLocal(prev_self=lambda: not_set)
2007 # maps thread_id to whether that thread overlaps in execution with any
2008 # other thread in this @given. We use this to detect whether an @given is
2009 # being run from multiple different threads at once, which informs
2010 # decisions like whether to raise DeadlineExceeded or HealthCheck.too_slow.
2011 thread_overlap: dict[int, bool] = {}
2012 thread_overlap_lock = Lock()
2014 @impersonate(test)
2015 @define_function_signature(test.__name__, test.__doc__, new_signature)
2016 def wrapped_test(*arguments, **kwargs):
2017 # Tell pytest to omit the body of this function from tracebacks
2018 __tracebackhide__ = True
2019 with thread_overlap_lock:
2020 for overlap_thread_id in thread_overlap:
2021 thread_overlap[overlap_thread_id] = True
2023 threadid = threading.get_ident()
2024 # if there are existing threads when this thread starts, then
2025 # this thread starts at an overlapped state.
2026 has_existing_threads = len(thread_overlap) > 0
2027 thread_overlap[threadid] = has_existing_threads
2029 try:
2030 test = wrapped_test.hypothesis.inner_test
2031 if getattr(test, "is_hypothesis_test", False):
2032 raise InvalidArgument(
2033 f"You have applied @given to the test {test.__name__} more than "
2034 "once, which wraps the test several times and is extremely slow. "
2035 "A similar effect can be gained by combining the arguments "
2036 "of the two calls to given. For example, instead of "
2037 "@given(booleans()) @given(integers()), you could write "
2038 "@given(booleans(), integers())"
2039 )
2041 settings = wrapped_test._hypothesis_internal_use_settings
2042 random = get_random_for_wrapped_test(test, wrapped_test)
2043 arguments, kwargs, stuff = process_arguments_to_given(
2044 wrapped_test,
2045 arguments,
2046 kwargs,
2047 given_kwargs,
2048 new_signature.parameters,
2049 )
2051 if (
2052 inspect.iscoroutinefunction(test)
2053 and get_executor(stuff.selfy) is default_executor
2054 ):
2055 # See https://github.com/HypothesisWorks/hypothesis/issues/3054
2056 # If our custom executor doesn't handle coroutines, or we return an
2057 # awaitable from a non-async-def function, we just rely on the
2058 # return_value health check. This catches most user errors though.
2059 raise InvalidArgument(
2060 "Hypothesis doesn't know how to run async test functions like "
2061 f"{test.__name__}. You'll need to write a custom executor, "
2062 "or use a library like pytest-asyncio or pytest-trio which can "
2063 "handle the translation for you.\n See https://hypothesis."
2064 "readthedocs.io/en/latest/details.html#custom-function-execution"
2065 )
2067 runner = stuff.selfy
2068 if isinstance(stuff.selfy, TestCase) and test.__name__ in dir(TestCase):
2069 fail_health_check(
2070 settings,
2071 f"You have applied @given to the method {test.__name__}, which is "
2072 "used by the unittest runner but is not itself a test. "
2073 "This is not useful in any way.",
2074 HealthCheck.not_a_test_method,
2075 )
2076 if bad_django_TestCase(runner): # pragma: no cover
2077 # Covered by the Django tests, but not the pytest coverage task
2078 raise InvalidArgument(
2079 "You have applied @given to a method on "
2080 f"{type(runner).__qualname__}, but this "
2081 "class does not inherit from the supported versions in "
2082 "`hypothesis.extra.django`. Use the Hypothesis variants "
2083 "to ensure that each example is run in a separate "
2084 "database transaction."
2085 )
2087 nonlocal thread_local
2088 # Check selfy really is self (not e.g. a mock) before we health-check
2089 cur_self = (
2090 stuff.selfy
2091 if getattr(type(stuff.selfy), test.__name__, None) is wrapped_test
2092 else None
2093 )
2094 if thread_local.prev_self is not_set:
2095 thread_local.prev_self = cur_self
2096 elif cur_self is not thread_local.prev_self:
2097 fail_health_check(
2098 settings,
2099 f"The method {test.__qualname__} was called from multiple "
2100 "different executors. This may lead to flaky tests and "
2101 "nonreproducible errors when replaying from database."
2102 "\n\n"
2103 "Unlike most health checks, HealthCheck.differing_executors "
2104 "warns about a correctness issue with your test. We "
2105 "therefore recommend fixing the underlying issue, rather "
2106 "than suppressing this health check. However, if you are "
2107 "confident this health check can be safely disabled, you can "
2108 "do so with "
2109 "@settings(suppress_health_check=[HealthCheck.differing_executors]). "
2110 "See "
2111 "https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck "
2112 "for details.",
2113 HealthCheck.differing_executors,
2114 )
2116 state = StateForActualGivenExecution(
2117 stuff,
2118 test,
2119 settings,
2120 random,
2121 wrapped_test,
2122 thread_overlap=thread_overlap,
2123 )
2125 # If there was a @reproduce_failure decorator, use it to reproduce
2126 # the error (or complain that we couldn't). Either way, this will
2127 # always raise some kind of error.
2128 if (
2129 reproduce_failure := wrapped_test._hypothesis_internal_use_reproduce_failure
2130 ) is not None:
2131 expected_version, failure = reproduce_failure
2132 if expected_version != __version__:
2133 raise InvalidArgument(
2134 "Attempting to reproduce a failure from a different "
2135 f"version of Hypothesis. This failure is from {expected_version}, but "
2136 f"you are currently running {__version__!r}. Please change your "
2137 "Hypothesis version to a matching one."
2138 )
2139 try:
2140 state.execute_once(
2141 ConjectureData.for_choices(decode_failure(failure)),
2142 print_example=True,
2143 is_final=True,
2144 )
2145 raise DidNotReproduce(
2146 "Expected the test to raise an error, but it "
2147 "completed successfully."
2148 )
2149 except StopTest:
2150 raise DidNotReproduce(
2151 "The shape of the test data has changed in some way "
2152 "from where this blob was defined. Are you sure "
2153 "you're running the same test?"
2154 ) from None
2155 except UnsatisfiedAssumption:
2156 raise DidNotReproduce(
2157 "The test data failed to satisfy an assumption in the "
2158 "test. Have you added it since this blob was generated?"
2159 ) from None
2161 # There was no @reproduce_failure, so start by running any explicit
2162 # examples from @example decorators.
2163 if errors := list(
2164 execute_explicit_examples(
2165 state, wrapped_test, arguments, kwargs, original_sig
2166 )
2167 ):
2168 # If we're not going to report multiple bugs, we would have
2169 # stopped running explicit examples at the first failure.
2170 assert len(errors) == 1 or state.settings.report_multiple_bugs
2172 # If an explicit example raised a 'skip' exception, ensure it's never
2173 # wrapped up in an exception group. Because we break out of the loop
2174 # immediately on finding a skip, if present it's always the last error.
2175 if isinstance(errors[-1].exception, skip_exceptions_to_reraise()):
2176 # Covered by `test_issue_3453_regression`, just in a subprocess.
2177 del errors[:-1] # pragma: no cover
2179 if state.settings.verbosity < Verbosity.verbose:
2180 # keep only one error per interesting origin, unless
2181 # verbosity is high
2182 errors = _simplify_explicit_errors(errors)
2184 _raise_to_user(errors, state.settings, [], " in explicit examples")
2186 # If there were any explicit examples, they all ran successfully.
2187 # The next step is to use the Conjecture engine to run the test on
2188 # many different inputs.
2189 ran_explicit_examples = (
2190 Phase.explicit in state.settings.phases
2191 and getattr(wrapped_test, "hypothesis_explicit_examples", ())
2192 )
2193 SKIP_BECAUSE_NO_EXAMPLES = unittest.SkipTest(
2194 "Hypothesis has been told to run no examples for this test."
2195 )
2196 if not (
2197 Phase.reuse in settings.phases or Phase.generate in settings.phases
2198 ):
2199 if not ran_explicit_examples:
2200 raise SKIP_BECAUSE_NO_EXAMPLES
2201 return
2203 try:
2204 if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
2205 subTest = runner.subTest
2206 try:
2207 runner.subTest = types.MethodType(fake_subTest, runner)
2208 state.run_engine()
2209 finally:
2210 runner.subTest = subTest
2211 else:
2212 state.run_engine()
2213 except BaseException as e:
2214 # The exception caught here should either be an actual test
2215 # failure (or BaseExceptionGroup), or some kind of fatal error
2216 # that caused the engine to stop.
2217 generated_seed = (
2218 wrapped_test._hypothesis_internal_use_generated_seed
2219 )
2220 with local_settings(settings):
2221 if not (state.failed_normally or generated_seed is None):
2222 if running_under_pytest:
2223 report(
2224 f"You can add @seed({generated_seed}) to this test or "
2225 f"run pytest with --hypothesis-seed={generated_seed} "
2226 "to reproduce this failure."
2227 )
2228 else:
2229 report(
2230 f"You can add @seed({generated_seed}) to this test to "
2231 "reproduce this failure."
2232 )
2233 # The dance here is to avoid showing users long tracebacks
2234 # full of Hypothesis internals they don't care about.
2235 # We have to do this inline, to avoid adding another
2236 # internal stack frame just when we've removed the rest.
2237 #
2238 # Using a variable for our trimmed error ensures that the line
2239 # which will actually appear in tracebacks is as clear as
2240 # possible - "raise the_error_hypothesis_found".
2241 the_error_hypothesis_found = e.with_traceback(
2242 None
2243 if isinstance(e, BaseExceptionGroup)
2244 else get_trimmed_traceback()
2245 )
2246 raise the_error_hypothesis_found
2248 if not (ran_explicit_examples or state.ever_executed):
2249 raise SKIP_BECAUSE_NO_EXAMPLES
2250 finally:
2251 with thread_overlap_lock:
2252 del thread_overlap[threadid]
2254 def _get_fuzz_target() -> (
2255 Callable[[bytes | bytearray | memoryview | BinaryIO], bytes | None]
2256 ):
2257 # Because fuzzing interfaces are very performance-sensitive, we use a
2258 # somewhat more complicated structure here. `_get_fuzz_target()` is
2259 # called by the `HypothesisHandle.fuzz_one_input` property, allowing
2260 # us to defer our collection of the settings, random instance, and
2261 # reassignable `inner_test` (etc) until `fuzz_one_input` is accessed.
2262 #
2263 # We then share the performance cost of setting up `state` between
2264 # many invocations of the target. We explicitly force `deadline=None`
2265 # for performance reasons, saving ~40% the runtime of an empty test.
2266 test = wrapped_test.hypothesis.inner_test
2267 settings = Settings(
2268 parent=wrapped_test._hypothesis_internal_use_settings, deadline=None
2269 )
2270 random = get_random_for_wrapped_test(test, wrapped_test)
2271 _args, _kwargs, stuff = process_arguments_to_given(
2272 wrapped_test, (), {}, given_kwargs, new_signature.parameters
2273 )
2274 assert not _args
2275 assert not _kwargs
2276 state = StateForActualGivenExecution(
2277 stuff,
2278 test,
2279 settings,
2280 random,
2281 wrapped_test,
2282 thread_overlap=thread_overlap,
2283 )
2284 database_key = function_digest(test) + b".secondary"
2285 # We track the minimal-so-far example for each distinct origin, so
2286 # that we track log-n instead of n examples for long runs. In particular
2287 # it means that we saturate for common errors in long runs instead of
2288 # storing huge volumes of low-value data.
2289 minimal_failures: dict = {}
2291 def fuzz_one_input(
2292 buffer: bytes | bytearray | memoryview | BinaryIO,
2293 ) -> bytes | None:
2294 # This inner part is all that the fuzzer will actually run,
2295 # so we keep it as small and as fast as possible.
2296 if isinstance(buffer, io.IOBase):
2297 buffer = buffer.read(BUFFER_SIZE)
2298 assert isinstance(buffer, (bytes, bytearray, memoryview))
2299 data = ConjectureData(
2300 random=None,
2301 provider=BytestringProvider,
2302 provider_kw={"bytestring": buffer},
2303 )
2304 try:
2305 state.execute_once(data)
2306 status = Status.VALID
2307 except StopTest:
2308 status = data.status
2309 return None
2310 except UnsatisfiedAssumption:
2311 status = Status.INVALID
2312 return None
2313 except BaseException:
2314 known = minimal_failures.get(data.interesting_origin)
2315 if settings.database is not None and (
2316 known is None or sort_key(data.nodes) <= sort_key(known)
2317 ):
2318 settings.database.save(
2319 database_key, choices_to_bytes(data.choices)
2320 )
2321 minimal_failures[data.interesting_origin] = data.nodes
2322 status = Status.INTERESTING
2323 raise
2324 finally:
2325 if observability_enabled():
2326 data.freeze()
2327 tc = make_testcase(
2328 run_start=state._start_timestamp,
2329 property=state.test_identifier,
2330 data=data,
2331 how_generated="fuzz_one_input",
2332 representation=state._string_repr,
2333 arguments=data._observability_args,
2334 timing=state._timing_features,
2335 coverage=None,
2336 status=status,
2337 backend_metadata=data.provider.observe_test_case(),
2338 )
2339 deliver_observation(tc)
2340 state._timing_features = {}
2342 assert isinstance(data.provider, BytestringProvider)
2343 return bytes(data.provider.drawn)
2345 fuzz_one_input.__doc__ = HypothesisHandle.fuzz_one_input.__doc__
2346 return fuzz_one_input
2348 # After having created the decorated test function, we need to copy
2349 # over some attributes to make the switch as seamless as possible.
2351 for attrib in dir(test):
2352 if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
2353 setattr(wrapped_test, attrib, getattr(test, attrib))
2354 wrapped_test.is_hypothesis_test = True
2355 if hasattr(test, "_hypothesis_internal_settings_applied"):
2356 # Used to check if @settings is applied twice.
2357 wrapped_test._hypothesis_internal_settings_applied = True
2358 wrapped_test._hypothesis_internal_use_seed = getattr(
2359 test, "_hypothesis_internal_use_seed", None
2360 )
2361 wrapped_test._hypothesis_internal_use_settings = (
2362 getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
2363 )
2364 wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
2365 test, "_hypothesis_internal_use_reproduce_failure", None
2366 )
2367 wrapped_test.hypothesis = HypothesisHandle(test, _get_fuzz_target, given_kwargs)
2368 return wrapped_test
2370 return run_test_as_given
2373def find(
2374 specifier: SearchStrategy[Ex],
2375 condition: Callable[[Any], bool],
2376 *,
2377 settings: Settings | None = None,
2378 random: Random | None = None,
2379 database_key: bytes | None = None,
2380) -> Ex:
2381 """Returns the minimal example from the given strategy ``specifier`` that
2382 matches the predicate function ``condition``."""
2383 if settings is None:
2384 settings = Settings(max_examples=2000)
2385 settings = Settings(
2386 settings, suppress_health_check=list(HealthCheck), report_multiple_bugs=False
2387 )
2389 if database_key is None and settings.database is not None:
2390 # Note: The database key is not guaranteed to be unique. If not, replaying
2391 # of database examples may fail to reproduce due to being replayed on the
2392 # wrong condition.
2393 database_key = function_digest(condition)
2395 if not isinstance(specifier, SearchStrategy):
2396 raise InvalidArgument(
2397 f"Expected SearchStrategy but got {specifier!r} of "
2398 f"type {type(specifier).__name__}"
2399 )
2400 specifier.validate()
2402 last: list[Ex] = []
2404 @settings
2405 @given(specifier)
2406 def test(v):
2407 if condition(v):
2408 last[:] = [v]
2409 raise Found
2411 if random is not None:
2412 test = seed(random.getrandbits(64))(test)
2414 test._hypothesis_internal_database_key = database_key # type: ignore
2416 try:
2417 test()
2418 except Found:
2419 return last[0]
2421 raise NoSuchExample(get_pretty_function_description(condition))