Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/hypothesis/core.py: 35%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# This file is part of Hypothesis, which may be found at
2# https://github.com/HypothesisWorks/hypothesis/
3#
4# Copyright the Hypothesis Authors.
5# Individual contributors are listed in AUTHORS.rst and the git log.
6#
7# This Source Code Form is subject to the terms of the Mozilla Public License,
8# v. 2.0. If a copy of the MPL was not distributed with this file, You can
9# obtain one at https://mozilla.org/MPL/2.0/.
11"""This module provides the core primitives of Hypothesis, such as given."""
12import base64
13import contextlib
14import dataclasses
15import datetime
16import inspect
17import io
18import math
19import os
20import sys
21import threading
22import time
23import traceback
24import types
25import unittest
26import warnings
27import zlib
28from collections import defaultdict
29from collections.abc import Callable, Coroutine, Generator, Hashable, Iterable, Sequence
30from dataclasses import dataclass, field
31from functools import partial
32from inspect import Parameter
33from random import Random
34from threading import Lock
35from types import EllipsisType
36from typing import (
37 Any,
38 BinaryIO,
39 TypeVar,
40 overload,
41)
42from unittest import TestCase
44from hypothesis import strategies as st
45from hypothesis._settings import (
46 HealthCheck,
47 Phase,
48 Verbosity,
49 all_settings,
50 local_settings,
51 settings as Settings,
52)
53from hypothesis.control import BuildContext, currently_in_test_context
54from hypothesis.database import choices_from_bytes, choices_to_bytes
55from hypothesis.errors import (
56 BackendCannotProceed,
57 DeadlineExceeded,
58 DidNotReproduce,
59 FailedHealthCheck,
60 FlakyFailure,
61 FlakyReplay,
62 Found,
63 Frozen,
64 HypothesisException,
65 HypothesisWarning,
66 InvalidArgument,
67 NoSuchExample,
68 StopTest,
69 Unsatisfiable,
70 UnsatisfiedAssumption,
71)
72from hypothesis.internal import observability
73from hypothesis.internal.compat import (
74 PYPY,
75 BaseExceptionGroup,
76 add_note,
77 bad_django_TestCase,
78 get_type_hints,
79 int_from_bytes,
80)
81from hypothesis.internal.conjecture.choice import ChoiceT
82from hypothesis.internal.conjecture.data import ConjectureData, Status
83from hypothesis.internal.conjecture.engine import BUFFER_SIZE, ConjectureRunner
84from hypothesis.internal.conjecture.junkdrawer import (
85 ensure_free_stackframes,
86 gc_cumulative_time,
87)
88from hypothesis.internal.conjecture.providers import (
89 BytestringProvider,
90 PrimitiveProvider,
91)
92from hypothesis.internal.conjecture.shrinker import sort_key
93from hypothesis.internal.entropy import deterministic_PRNG
94from hypothesis.internal.escalation import (
95 InterestingOrigin,
96 current_pytest_item,
97 format_exception,
98 get_trimmed_traceback,
99 is_hypothesis_file,
100)
101from hypothesis.internal.healthcheck import fail_health_check
102from hypothesis.internal.observability import (
103 InfoObservation,
104 InfoObservationType,
105 deliver_observation,
106 make_testcase,
107 observability_enabled,
108)
109from hypothesis.internal.reflection import (
110 convert_positional_arguments,
111 define_function_signature,
112 function_digest,
113 get_pretty_function_description,
114 get_signature,
115 impersonate,
116 is_mock,
117 nicerepr,
118 proxies,
119 repr_call,
120)
121from hypothesis.internal.scrutineer import (
122 MONITORING_TOOL_ID,
123 Trace,
124 Tracer,
125 explanatory_lines,
126 tractable_coverage_report,
127)
128from hypothesis.internal.validation import check_type
129from hypothesis.reporting import (
130 current_verbosity,
131 report,
132 verbose_report,
133 with_reporter,
134)
135from hypothesis.statistics import describe_statistics, describe_targets, note_statistics
136from hypothesis.strategies._internal.misc import NOTHING
137from hypothesis.strategies._internal.strategies import (
138 Ex,
139 SearchStrategy,
140 check_strategy,
141)
142from hypothesis.utils.conventions import not_set
143from hypothesis.utils.threading import ThreadLocal
144from hypothesis.vendor.pretty import RepresentationPrinter
145from hypothesis.version import __version__
147TestFunc = TypeVar("TestFunc", bound=Callable)
150running_under_pytest = False
151pytest_shows_exceptiongroups = True
152global_force_seed = None
153# `threadlocal` stores "engine-global" constants, which are global relative to a
154# ConjectureRunner instance (roughly speaking). Since only one conjecture runner
155# instance can be active per thread, making engine constants thread-local prevents
156# the ConjectureRunner instances of concurrent threads from treading on each other.
157threadlocal = ThreadLocal(_hypothesis_global_random=lambda: None)
160@dataclass(slots=True, frozen=False)
161class Example:
162 args: Any
163 kwargs: Any
164 # Plus two optional arguments for .xfail()
165 raises: Any = field(default=None)
166 reason: Any = field(default=None)
169# TODO_DOCS link to not-yet-existent patch-dumping docs
172class example:
173 """
174 Add an explicit input to a Hypothesis test, which Hypothesis will always
175 try before generating random inputs. This combines the randomized nature of
176 Hypothesis generation with a traditional parametrized test.
178 For example:
180 .. code-block:: python
182 @example("Hello world")
183 @example("some string with special significance")
184 @given(st.text())
185 def test_strings(s):
186 pass
188 will call ``test_strings("Hello World")`` and
189 ``test_strings("some string with special significance")`` before generating
190 any random inputs. |@example| may be placed in any order relative to |@given|
191 and |@settings|.
193 Explicit inputs from |@example| are run in the |Phase.explicit| phase.
194 Explicit inputs do not count towards |settings.max_examples|. Note that
195 explicit inputs added by |@example| do not shrink. If an explicit input
196 fails, Hypothesis will stop and report the failure without generating any
197 random inputs.
199 |@example| can also be used to easily reproduce a failure. For instance, if
200 Hypothesis reports that ``f(n=[0, math.nan])`` fails, you can add
201 ``@example(n=[0, math.nan])`` to your test to quickly reproduce that failure.
203 Arguments to ``@example``
204 -------------------------
206 Arguments to |@example| have the same behavior and restrictions as arguments
207 to |@given|. This means they may be either positional or keyword arguments
208 (but not both in the same |@example|):
210 .. code-block:: python
212 @example(1, 2)
213 @example(x=1, y=2)
214 @given(st.integers(), st.integers())
215 def test(x, y):
216 pass
218 Noting that while arguments to |@given| are strategies (like |st.integers|),
219 arguments to |@example| are values instead (like ``1``).
221 See the :ref:`given-arguments` section for full details.
222 """
224 def __init__(self, *args: Any, **kwargs: Any) -> None:
225 if args and kwargs:
226 raise InvalidArgument(
227 "Cannot mix positional and keyword arguments for examples"
228 )
229 if not (args or kwargs):
230 raise InvalidArgument("An example must provide at least one argument")
232 self.hypothesis_explicit_examples: list[Example] = []
233 self._this_example = Example(tuple(args), kwargs)
235 def __call__(self, test: TestFunc) -> TestFunc:
236 if not hasattr(test, "hypothesis_explicit_examples"):
237 test.hypothesis_explicit_examples = self.hypothesis_explicit_examples # type: ignore
238 test.hypothesis_explicit_examples.append(self._this_example) # type: ignore
239 return test
241 def xfail(
242 self,
243 condition: bool = True, # noqa: FBT002
244 *,
245 reason: str = "",
246 raises: type[BaseException] | tuple[type[BaseException], ...] = BaseException,
247 ) -> "example":
248 """Mark this example as an expected failure, similarly to
249 :obj:`pytest.mark.xfail(strict=True) <pytest.mark.xfail>`.
251 Expected-failing examples allow you to check that your test does fail on
252 some examples, and therefore build confidence that *passing* tests are
253 because your code is working, not because the test is missing something.
255 .. code-block:: python
257 @example(...).xfail()
258 @example(...).xfail(reason="Prices must be non-negative")
259 @example(...).xfail(raises=(KeyError, ValueError))
260 @example(...).xfail(sys.version_info[:2] >= (3, 12), reason="needs py 3.12")
261 @example(...).xfail(condition=sys.platform != "linux", raises=OSError)
262 def test(x):
263 pass
265 .. note::
267 Expected-failing examples are handled separately from those generated
268 by strategies, so you should usually ensure that there is no overlap.
270 .. code-block:: python
272 @example(x=1, y=0).xfail(raises=ZeroDivisionError)
273 @given(x=st.just(1), y=st.integers()) # Missing `.filter(bool)`!
274 def test_fraction(x, y):
275 # This test will try the explicit example and see it fail as
276 # expected, then go on to generate more examples from the
277 # strategy. If we happen to generate y=0, the test will fail
278 # because only the explicit example is treated as xfailing.
279 x / y
280 """
281 check_type(bool, condition, "condition")
282 check_type(str, reason, "reason")
283 if not (
284 isinstance(raises, type) and issubclass(raises, BaseException)
285 ) and not (
286 isinstance(raises, tuple)
287 and raises # () -> expected to fail with no error, which is impossible
288 and all(
289 isinstance(r, type) and issubclass(r, BaseException) for r in raises
290 )
291 ):
292 raise InvalidArgument(
293 f"{raises=} must be an exception type or tuple of exception types"
294 )
295 if condition:
296 self._this_example = dataclasses.replace(
297 self._this_example, raises=raises, reason=reason
298 )
299 return self
301 def via(self, whence: str, /) -> "example":
302 """Attach a machine-readable label noting what the origin of this example
303 was. |example.via| is completely optional and does not change runtime
304 behavior.
306 |example.via| is intended to support self-documenting behavior, as well as
307 tooling which might add (or remove) |@example| decorators automatically.
308 For example:
310 .. code-block:: python
312 # Annotating examples is optional and does not change runtime behavior
313 @example(...)
314 @example(...).via("regression test for issue #42")
315 @example(...).via("discovered failure")
316 def test(x):
317 pass
319 .. note::
321 `HypoFuzz <https://hypofuzz.com/>`_ uses |example.via| to tag examples
322 in the patch of its high-coverage set of explicit inputs, on
323 `the patches page <https://hypofuzz.com/example-dashboard/#/patches>`_.
324 """
325 if not isinstance(whence, str):
326 raise InvalidArgument(".via() must be passed a string")
327 # This is deliberately a no-op at runtime; the tools operate on source code.
328 return self
331def seed(seed: Hashable) -> Callable[[TestFunc], TestFunc]:
332 """
333 Seed the randomness for this test.
335 ``seed`` may be any hashable object. No exact meaning for ``seed`` is provided
336 other than that for a fixed seed value Hypothesis will produce the same
337 examples (assuming that there are no other sources of nondeterminisim, such
338 as timing, hash randomization, or external state).
340 For example, the following test function and |RuleBasedStateMachine| will
341 each generate the same series of examples each time they are executed:
343 .. code-block:: python
345 @seed(1234)
346 @given(st.integers())
347 def test(n): ...
349 @seed(6789)
350 class MyMachine(RuleBasedStateMachine): ...
352 If using pytest, you can alternatively pass ``--hypothesis-seed`` on the
353 command line.
355 Setting a seed overrides |settings.derandomize|, which is designed to enable
356 deterministic CI tests rather than reproducing observed failures.
358 Hypothesis will only print the seed which would reproduce a failure if a test
359 fails in an unexpected way, for instance inside Hypothesis internals.
360 """
362 def accept(test):
363 test._hypothesis_internal_use_seed = seed
364 current_settings = getattr(test, "_hypothesis_internal_use_settings", None)
365 test._hypothesis_internal_use_settings = Settings(
366 current_settings, database=None
367 )
368 return test
370 return accept
373# TODO_DOCS: link to /explanation/choice-sequence
376def reproduce_failure(version: str, blob: bytes) -> Callable[[TestFunc], TestFunc]:
377 """
378 Run the example corresponding to the binary ``blob`` in order to reproduce a
379 failure. ``blob`` is a serialized version of the internal input representation
380 of Hypothesis.
382 A test decorated with |@reproduce_failure| always runs exactly one example,
383 which is expected to cause a failure. If the provided ``blob`` does not
384 cause a failure, Hypothesis will raise |DidNotReproduce|.
386 Hypothesis will print an |@reproduce_failure| decorator if
387 |settings.print_blob| is ``True`` (which is the default in CI).
389 |@reproduce_failure| is intended to be temporarily added to your test suite in
390 order to reproduce a failure. It is not intended to be a permanent addition to
391 your test suite. Because of this, no compatibility guarantees are made across
392 Hypothesis versions, and |@reproduce_failure| will error if used on a different
393 Hypothesis version than it was created for.
395 .. seealso::
397 See also the :doc:`/tutorial/replaying-failures` tutorial.
398 """
400 def accept(test):
401 test._hypothesis_internal_use_reproduce_failure = (version, blob)
402 return test
404 return accept
407def reproduction_decorator(choices: Iterable[ChoiceT]) -> str:
408 return f"@reproduce_failure({__version__!r}, {encode_failure(choices)!r})"
411def encode_failure(choices: Iterable[ChoiceT]) -> bytes:
412 blob = choices_to_bytes(choices)
413 compressed = zlib.compress(blob)
414 if len(compressed) < len(blob):
415 blob = b"\1" + compressed
416 else:
417 blob = b"\0" + blob
418 return base64.b64encode(blob)
421def decode_failure(blob: bytes) -> Sequence[ChoiceT]:
422 try:
423 decoded = base64.b64decode(blob)
424 except Exception:
425 raise InvalidArgument(f"Invalid base64 encoded string: {blob!r}") from None
427 prefix = decoded[:1]
428 if prefix == b"\0":
429 decoded = decoded[1:]
430 elif prefix == b"\1":
431 try:
432 decoded = zlib.decompress(decoded[1:])
433 except zlib.error as err:
434 raise InvalidArgument(
435 f"Invalid zlib compression for blob {blob!r}"
436 ) from err
437 else:
438 raise InvalidArgument(
439 f"Could not decode blob {blob!r}: Invalid start byte {prefix!r}"
440 )
442 choices = choices_from_bytes(decoded)
443 if choices is None:
444 raise InvalidArgument(f"Invalid serialized choice sequence for blob {blob!r}")
446 return choices
449def _invalid(message, *, exc=InvalidArgument, test, given_kwargs):
450 @impersonate(test)
451 def wrapped_test(*arguments, **kwargs): # pragma: no cover # coverage limitation
452 raise exc(message)
454 wrapped_test.is_hypothesis_test = True
455 wrapped_test.hypothesis = HypothesisHandle(
456 inner_test=test,
457 _get_fuzz_target=wrapped_test,
458 _given_kwargs=given_kwargs,
459 )
460 return wrapped_test
463def is_invalid_test(test, original_sig, given_arguments, given_kwargs):
464 """Check the arguments to ``@given`` for basic usage constraints.
466 Most errors are not raised immediately; instead we return a dummy test
467 function that will raise the appropriate error if it is actually called.
468 When the user runs a subset of tests (e.g via ``pytest -k``), errors will
469 only be reported for tests that actually ran.
470 """
471 invalid = partial(_invalid, test=test, given_kwargs=given_kwargs)
473 if not (given_arguments or given_kwargs):
474 return invalid("given must be called with at least one argument")
476 params = list(original_sig.parameters.values())
477 pos_params = [p for p in params if p.kind is p.POSITIONAL_OR_KEYWORD]
478 kwonly_params = [p for p in params if p.kind is p.KEYWORD_ONLY]
479 if given_arguments and params != pos_params:
480 return invalid(
481 "positional arguments to @given are not supported with varargs, "
482 "varkeywords, positional-only, or keyword-only arguments"
483 )
485 if len(given_arguments) > len(pos_params):
486 return invalid(
487 f"Too many positional arguments for {test.__name__}() were passed to "
488 f"@given - expected at most {len(pos_params)} "
489 f"arguments, but got {len(given_arguments)} {given_arguments!r}"
490 )
492 if ... in given_arguments:
493 return invalid(
494 "... was passed as a positional argument to @given, but may only be "
495 "passed as a keyword argument or as the sole argument of @given"
496 )
498 if given_arguments and given_kwargs:
499 return invalid("cannot mix positional and keyword arguments to @given")
500 extra_kwargs = [
501 k for k in given_kwargs if k not in {p.name for p in pos_params + kwonly_params}
502 ]
503 if extra_kwargs and (params == [] or params[-1].kind is not params[-1].VAR_KEYWORD):
504 arg = extra_kwargs[0]
505 extra = ""
506 if arg in all_settings:
507 extra = f". Did you mean @settings({arg}={given_kwargs[arg]!r})?"
508 return invalid(
509 f"{test.__name__}() got an unexpected keyword argument {arg!r}, "
510 f"from `{arg}={given_kwargs[arg]!r}` in @given{extra}"
511 )
512 if any(p.default is not p.empty for p in params):
513 return invalid("Cannot apply @given to a function with defaults.")
515 # This case would raise Unsatisfiable *anyway*, but by detecting it here we can
516 # provide a much more helpful error message for people e.g. using the Ghostwriter.
517 empty = [
518 f"{s!r} (arg {idx})" for idx, s in enumerate(given_arguments) if s is NOTHING
519 ] + [f"{name}={s!r}" for name, s in given_kwargs.items() if s is NOTHING]
520 if empty:
521 strats = "strategies" if len(empty) > 1 else "strategy"
522 return invalid(
523 f"Cannot generate examples from empty {strats}: " + ", ".join(empty),
524 exc=Unsatisfiable,
525 )
528def execute_explicit_examples(state, wrapped_test, arguments, kwargs, original_sig):
529 assert isinstance(state, StateForActualGivenExecution)
530 posargs = [
531 p.name
532 for p in original_sig.parameters.values()
533 if p.kind is p.POSITIONAL_OR_KEYWORD
534 ]
536 for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())):
537 assert isinstance(example, Example)
538 # All of this validation is to check that @example() got "the same" arguments
539 # as @given, i.e. corresponding to the same parameters, even though they might
540 # be any mixture of positional and keyword arguments.
541 if example.args:
542 assert not example.kwargs
543 if any(
544 p.kind is p.POSITIONAL_ONLY for p in original_sig.parameters.values()
545 ):
546 raise InvalidArgument(
547 "Cannot pass positional arguments to @example() when decorating "
548 "a test function which has positional-only parameters."
549 )
550 if len(example.args) > len(posargs):
551 raise InvalidArgument(
552 "example has too many arguments for test. Expected at most "
553 f"{len(posargs)} but got {len(example.args)}"
554 )
555 example_kwargs = dict(
556 zip(posargs[-len(example.args) :], example.args, strict=True)
557 )
558 else:
559 example_kwargs = dict(example.kwargs)
560 given_kws = ", ".join(
561 repr(k) for k in sorted(wrapped_test.hypothesis._given_kwargs)
562 )
563 example_kws = ", ".join(repr(k) for k in sorted(example_kwargs))
564 if given_kws != example_kws:
565 raise InvalidArgument(
566 f"Inconsistent args: @given() got strategies for {given_kws}, "
567 f"but @example() got arguments for {example_kws}"
568 ) from None
570 # This is certainly true because the example_kwargs exactly match the params
571 # reserved by @given(), which are then remove from the function signature.
572 assert set(example_kwargs).isdisjoint(kwargs)
573 example_kwargs.update(kwargs)
575 if Phase.explicit not in state.settings.phases:
576 continue
578 with local_settings(state.settings):
579 fragments_reported = []
580 empty_data = ConjectureData.for_choices([])
581 try:
582 execute_example = partial(
583 state.execute_once,
584 empty_data,
585 is_final=True,
586 print_example=True,
587 example_kwargs=example_kwargs,
588 )
589 with with_reporter(fragments_reported.append):
590 if example.raises is None:
591 execute_example()
592 else:
593 # @example(...).xfail(...)
594 bits = ", ".join(nicerepr(x) for x in arguments) + ", ".join(
595 f"{k}={nicerepr(v)}" for k, v in example_kwargs.items()
596 )
597 try:
598 execute_example()
599 except failure_exceptions_to_catch() as err:
600 if not isinstance(err, example.raises):
601 raise
602 # Save a string form of this example; we'll warn if it's
603 # ever generated by the strategy (which can't be xfailed)
604 state.xfail_example_reprs.add(
605 repr_call(state.test, arguments, example_kwargs)
606 )
607 except example.raises as err:
608 # We'd usually check this as early as possible, but it's
609 # possible for failure_exceptions_to_catch() to grow when
610 # e.g. pytest is imported between import- and test-time.
611 raise InvalidArgument(
612 f"@example({bits}) raised an expected {err!r}, "
613 "but Hypothesis does not treat this as a test failure"
614 ) from err
615 else:
616 # Unexpectedly passing; always raise an error in this case.
617 reason = f" because {example.reason}" * bool(example.reason)
618 if example.raises is BaseException:
619 name = "exception" # special-case no raises= arg
620 elif not isinstance(example.raises, tuple):
621 name = example.raises.__name__
622 elif len(example.raises) == 1:
623 name = example.raises[0].__name__
624 else:
625 name = (
626 ", ".join(ex.__name__ for ex in example.raises[:-1])
627 + f", or {example.raises[-1].__name__}"
628 )
629 vowel = name.upper()[0] in "AEIOU"
630 raise AssertionError(
631 f"Expected a{'n' * vowel} {name} from @example({bits})"
632 f"{reason}, but no exception was raised."
633 )
634 except UnsatisfiedAssumption:
635 # Odd though it seems, we deliberately support explicit examples that
636 # are then rejected by a call to `assume()`. As well as iterative
637 # development, this is rather useful to replay Hypothesis' part of
638 # a saved failure when other arguments are supplied by e.g. pytest.
639 # See https://github.com/HypothesisWorks/hypothesis/issues/2125
640 with contextlib.suppress(StopTest):
641 empty_data.conclude_test(Status.INVALID)
642 except BaseException as err:
643 # In order to support reporting of multiple failing examples, we yield
644 # each of the (report text, error) pairs we find back to the top-level
645 # runner. This also ensures that user-facing stack traces have as few
646 # frames of Hypothesis internals as possible.
647 err = err.with_traceback(get_trimmed_traceback())
649 # One user error - whether misunderstanding or typo - we've seen a few
650 # times is to pass strategies to @example() where values are expected.
651 # Checking is easy, and false-positives not much of a problem, so:
652 if isinstance(err, failure_exceptions_to_catch()) and any(
653 isinstance(arg, SearchStrategy)
654 for arg in example.args + tuple(example.kwargs.values())
655 ):
656 new = HypothesisWarning(
657 "The @example() decorator expects to be passed values, but "
658 "you passed strategies instead. See https://hypothesis."
659 "readthedocs.io/en/latest/reference/api.html#hypothesis"
660 ".example for details."
661 )
662 new.__cause__ = err
663 err = new
665 with contextlib.suppress(StopTest):
666 empty_data.conclude_test(Status.INVALID)
667 yield (fragments_reported, err)
668 if (
669 state.settings.report_multiple_bugs
670 and pytest_shows_exceptiongroups
671 and isinstance(err, failure_exceptions_to_catch())
672 and not isinstance(err, skip_exceptions_to_reraise())
673 ):
674 continue
675 break
676 finally:
677 if fragments_reported:
678 assert fragments_reported[0].startswith("Falsifying example")
679 fragments_reported[0] = fragments_reported[0].replace(
680 "Falsifying example", "Falsifying explicit example", 1
681 )
683 empty_data.freeze()
684 if observability_enabled():
685 tc = make_testcase(
686 run_start=state._start_timestamp,
687 property=state.test_identifier,
688 data=empty_data,
689 how_generated="explicit example",
690 representation=state._string_repr,
691 timing=state._timing_features,
692 )
693 deliver_observation(tc)
695 if fragments_reported:
696 verbose_report(fragments_reported[0].replace("Falsifying", "Trying", 1))
697 for f in fragments_reported[1:]:
698 verbose_report(f)
701def get_random_for_wrapped_test(test, wrapped_test):
702 settings = wrapped_test._hypothesis_internal_use_settings
703 wrapped_test._hypothesis_internal_use_generated_seed = None
705 if wrapped_test._hypothesis_internal_use_seed is not None:
706 return Random(wrapped_test._hypothesis_internal_use_seed)
708 if settings.derandomize:
709 return Random(int_from_bytes(function_digest(test)))
711 if global_force_seed is not None:
712 return Random(global_force_seed)
714 if threadlocal._hypothesis_global_random is None: # pragma: no cover
715 threadlocal._hypothesis_global_random = Random()
716 seed = threadlocal._hypothesis_global_random.getrandbits(128)
717 wrapped_test._hypothesis_internal_use_generated_seed = seed
718 return Random(seed)
721@dataclass(slots=True, frozen=False)
722class Stuff:
723 selfy: Any
724 args: tuple
725 kwargs: dict
726 given_kwargs: dict
729def process_arguments_to_given(
730 wrapped_test: Any,
731 arguments: Sequence[object],
732 kwargs: dict[str, object],
733 given_kwargs: dict[str, SearchStrategy],
734 params: dict[str, Parameter],
735) -> tuple[Sequence[object], dict[str, object], Stuff]:
736 selfy = None
737 arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs)
739 # If the test function is a method of some kind, the bound object
740 # will be the first named argument if there are any, otherwise the
741 # first vararg (if any).
742 posargs = [p.name for p in params.values() if p.kind is p.POSITIONAL_OR_KEYWORD]
743 if posargs:
744 selfy = kwargs.get(posargs[0])
745 elif arguments:
746 selfy = arguments[0]
748 # Ensure that we don't mistake mocks for self here.
749 # This can cause the mock to be used as the test runner.
750 if is_mock(selfy):
751 selfy = None
753 arguments = tuple(arguments)
755 with ensure_free_stackframes():
756 for k, s in given_kwargs.items():
757 check_strategy(s, name=k)
758 s.validate()
760 stuff = Stuff(selfy=selfy, args=arguments, kwargs=kwargs, given_kwargs=given_kwargs)
762 return arguments, kwargs, stuff
765def skip_exceptions_to_reraise():
766 """Return a tuple of exceptions meaning 'skip this test', to re-raise.
768 This is intended to cover most common test runners; if you would
769 like another to be added please open an issue or pull request adding
770 it to this function and to tests/cover/test_lazy_import.py
771 """
772 # This is a set in case any library simply re-exports another's Skip exception
773 exceptions = set()
774 # We use this sys.modules trick to avoid importing libraries -
775 # you can't be an instance of a type from an unimported module!
776 # This is fast enough that we don't need to cache the result,
777 # and more importantly it avoids possible side-effects :-)
778 if "unittest" in sys.modules:
779 exceptions.add(sys.modules["unittest"].SkipTest)
780 if "_pytest.outcomes" in sys.modules:
781 exceptions.add(sys.modules["_pytest.outcomes"].Skipped)
782 return tuple(sorted(exceptions, key=str))
785def failure_exceptions_to_catch() -> tuple[type[BaseException], ...]:
786 """Return a tuple of exceptions meaning 'this test has failed', to catch.
788 This is intended to cover most common test runners; if you would
789 like another to be added please open an issue or pull request.
790 """
791 # While SystemExit and GeneratorExit are instances of BaseException, we also
792 # expect them to be deterministic - unlike KeyboardInterrupt - and so we treat
793 # them as standard exceptions, check for flakiness, etc.
794 # See https://github.com/HypothesisWorks/hypothesis/issues/2223 for details.
795 exceptions = [Exception, SystemExit, GeneratorExit]
796 if "_pytest.outcomes" in sys.modules:
797 exceptions.append(sys.modules["_pytest.outcomes"].Failed)
798 return tuple(exceptions)
801def new_given_signature(original_sig, given_kwargs):
802 """Make an updated signature for the wrapped test."""
803 return original_sig.replace(
804 parameters=[
805 p
806 for p in original_sig.parameters.values()
807 if not (
808 p.name in given_kwargs
809 and p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY)
810 )
811 ],
812 return_annotation=None,
813 )
816def default_executor(data, function):
817 return function(data)
820def get_executor(runner):
821 try:
822 execute_example = runner.execute_example
823 except AttributeError:
824 pass
825 else:
826 return lambda data, function: execute_example(partial(function, data))
828 if hasattr(runner, "setup_example") or hasattr(runner, "teardown_example"):
829 setup = getattr(runner, "setup_example", None) or (lambda: None)
830 teardown = getattr(runner, "teardown_example", None) or (lambda ex: None)
832 def execute(data, function):
833 token = None
834 try:
835 token = setup()
836 return function(data)
837 finally:
838 teardown(token)
840 return execute
842 return default_executor
845# This function is a crude solution, a better way of resolving it would probably
846# be to rewrite a bunch of exception handlers to use except*.
847T = TypeVar("T", bound=BaseException)
850def _flatten_group(excgroup: BaseExceptionGroup[T]) -> list[T]:
851 found_exceptions: list[T] = []
852 for exc in excgroup.exceptions:
853 if isinstance(exc, BaseExceptionGroup):
854 found_exceptions.extend(_flatten_group(exc))
855 else:
856 found_exceptions.append(exc)
857 return found_exceptions
860@contextlib.contextmanager
861def unwrap_markers_from_group() -> Generator[None, None, None]:
862 try:
863 yield
864 except BaseExceptionGroup as excgroup:
865 _frozen_exceptions, non_frozen_exceptions = excgroup.split(Frozen)
867 # group only contains Frozen, reraise the group
868 # it doesn't matter what we raise, since any exceptions get disregarded
869 # and reraised as StopTest if data got frozen.
870 if non_frozen_exceptions is None:
871 raise
872 # in all other cases they are discarded
874 # Can RewindRecursive end up in this group?
875 _, user_exceptions = non_frozen_exceptions.split(
876 lambda e: isinstance(e, (StopTest, HypothesisException))
877 )
879 # this might contain marker exceptions, or internal errors, but not frozen.
880 if user_exceptions is not None:
881 raise
883 # single marker exception - reraise it
884 flattened_non_frozen_exceptions: list[BaseException] = _flatten_group(
885 non_frozen_exceptions
886 )
887 if len(flattened_non_frozen_exceptions) == 1:
888 e = flattened_non_frozen_exceptions[0]
889 # preserve the cause of the original exception to not hinder debugging
890 # note that __context__ is still lost though
891 raise e from e.__cause__
893 # multiple marker exceptions. If we re-raise the whole group we break
894 # a bunch of logic so ....?
895 stoptests, non_stoptests = non_frozen_exceptions.split(StopTest)
897 # TODO: stoptest+hypothesisexception ...? Is it possible? If so, what do?
899 if non_stoptests:
900 # TODO: multiple marker exceptions is easy to produce, but the logic in the
901 # engine does not handle it... so we just reraise the first one for now.
902 e = _flatten_group(non_stoptests)[0]
903 raise e from e.__cause__
904 assert stoptests is not None
906 # multiple stoptests: raising the one with the lowest testcounter
907 raise min(_flatten_group(stoptests), key=lambda s_e: s_e.testcounter)
910class StateForActualGivenExecution:
911 def __init__(
912 self, stuff, test, settings, random, wrapped_test, *, thread_overlap=None
913 ):
914 self.stuff = stuff
915 self.test = test
916 self.settings = settings
917 self.random = random
918 self.wrapped_test = wrapped_test
919 self.thread_overlap = {} if thread_overlap is None else thread_overlap
921 self.test_runner = get_executor(stuff.selfy)
922 self.print_given_args = getattr(
923 wrapped_test, "_hypothesis_internal_print_given_args", True
924 )
926 self.last_exception = None
927 self.falsifying_examples = ()
928 self.ever_executed = False
929 self.xfail_example_reprs = set()
930 self.files_to_propagate = set()
931 self.failed_normally = False
932 self.failed_due_to_deadline = False
934 self.explain_traces = defaultdict(set)
935 self._start_timestamp = time.time()
936 self._string_repr = ""
937 self._timing_features = {}
939 @property
940 def test_identifier(self) -> str:
941 return getattr(
942 current_pytest_item.value, "nodeid", None
943 ) or get_pretty_function_description(self.wrapped_test)
945 def _should_trace(self):
946 # NOTE: we explicitly support monkeypatching this. Keep the namespace
947 # access intact.
948 _trace_obs = (
949 observability_enabled() and observability.OBSERVABILITY_COLLECT_COVERAGE
950 )
951 _trace_failure = (
952 self.failed_normally
953 and not self.failed_due_to_deadline
954 and {Phase.shrink, Phase.explain}.issubset(self.settings.phases)
955 )
956 return _trace_obs or _trace_failure
958 def execute_once(
959 self,
960 data,
961 *,
962 print_example=False,
963 is_final=False,
964 expected_failure=None,
965 example_kwargs=None,
966 ):
967 """Run the test function once, using ``data`` as input.
969 If the test raises an exception, it will propagate through to the
970 caller of this method. Depending on its type, this could represent
971 an ordinary test failure, or a fatal error, or a control exception.
973 If this method returns normally, the test might have passed, or
974 it might have placed ``data`` in an unsuccessful state and then
975 swallowed the corresponding control exception.
976 """
978 self.ever_executed = True
980 self._string_repr = ""
981 text_repr = None
982 if self.settings.deadline is None and not observability_enabled():
984 @proxies(self.test)
985 def test(*args, **kwargs):
986 with unwrap_markers_from_group(), ensure_free_stackframes():
987 return self.test(*args, **kwargs)
989 else:
991 @proxies(self.test)
992 def test(*args, **kwargs):
993 arg_drawtime = math.fsum(data.draw_times.values())
994 arg_stateful = math.fsum(data._stateful_run_times.values())
995 arg_gctime = gc_cumulative_time()
996 with unwrap_markers_from_group(), ensure_free_stackframes():
997 start = time.perf_counter()
998 try:
999 result = self.test(*args, **kwargs)
1000 finally:
1001 finish = time.perf_counter()
1002 in_drawtime = math.fsum(data.draw_times.values()) - arg_drawtime
1003 in_stateful = (
1004 math.fsum(data._stateful_run_times.values()) - arg_stateful
1005 )
1006 in_gctime = gc_cumulative_time() - arg_gctime
1007 runtime = finish - start - in_drawtime - in_stateful - in_gctime
1008 self._timing_features = {
1009 "execute:test": runtime,
1010 "overall:gc": in_gctime,
1011 **data.draw_times,
1012 **data._stateful_run_times,
1013 }
1015 if (
1016 (current_deadline := self.settings.deadline) is not None
1017 # we disable the deadline check under concurrent threads, since
1018 # cpython may switch away from a thread for arbitrarily long.
1019 and not self.thread_overlap.get(threading.get_ident(), False)
1020 ):
1021 if not is_final:
1022 current_deadline = (current_deadline // 4) * 5
1023 if runtime >= current_deadline.total_seconds():
1024 raise DeadlineExceeded(
1025 datetime.timedelta(seconds=runtime), self.settings.deadline
1026 )
1027 return result
1029 def run(data: ConjectureData) -> None:
1030 # Set up dynamic context needed by a single test run.
1031 if self.stuff.selfy is not None:
1032 data.hypothesis_runner = self.stuff.selfy
1033 # Generate all arguments to the test function.
1034 args = self.stuff.args
1035 kwargs = dict(self.stuff.kwargs)
1036 if example_kwargs is None:
1037 kw, argslices = context.prep_args_kwargs_from_strategies(
1038 self.stuff.given_kwargs
1039 )
1040 else:
1041 kw = example_kwargs
1042 argslices = {}
1043 kwargs.update(kw)
1044 if expected_failure is not None:
1045 nonlocal text_repr
1046 text_repr = repr_call(test, args, kwargs)
1048 if print_example or current_verbosity() >= Verbosity.verbose:
1049 printer = RepresentationPrinter(context=context)
1050 if print_example:
1051 printer.text("Falsifying example:")
1052 else:
1053 printer.text("Trying example:")
1055 if self.print_given_args:
1056 printer.text(" ")
1057 printer.repr_call(
1058 test.__name__,
1059 args,
1060 kwargs,
1061 force_split=True,
1062 arg_slices=argslices,
1063 leading_comment=(
1064 "# " + context.data.slice_comments[(0, 0)]
1065 if (0, 0) in context.data.slice_comments
1066 else None
1067 ),
1068 avoid_realization=data.provider.avoid_realization,
1069 )
1070 report(printer.getvalue())
1072 if observability_enabled():
1073 printer = RepresentationPrinter(context=context)
1074 printer.repr_call(
1075 test.__name__,
1076 args,
1077 kwargs,
1078 force_split=True,
1079 arg_slices=argslices,
1080 leading_comment=(
1081 "# " + context.data.slice_comments[(0, 0)]
1082 if (0, 0) in context.data.slice_comments
1083 else None
1084 ),
1085 avoid_realization=data.provider.avoid_realization,
1086 )
1087 self._string_repr = printer.getvalue()
1089 try:
1090 return test(*args, **kwargs)
1091 except TypeError as e:
1092 # If we sampled from a sequence of strategies, AND failed with a
1093 # TypeError, *AND that exception mentions SearchStrategy*, add a note:
1094 if (
1095 "SearchStrategy" in str(e)
1096 and data._sampled_from_all_strategies_elements_message is not None
1097 ):
1098 msg, format_arg = data._sampled_from_all_strategies_elements_message
1099 add_note(e, msg.format(format_arg))
1100 raise
1101 finally:
1102 if data._stateful_repr_parts is not None:
1103 self._string_repr = "\n".join(data._stateful_repr_parts)
1105 if observability_enabled():
1106 printer = RepresentationPrinter(context=context)
1107 for name, value in data._observability_args.items():
1108 if name.startswith("generate:Draw "):
1109 try:
1110 value = data.provider.realize(value)
1111 except BackendCannotProceed: # pragma: no cover
1112 value = "<backend failed to realize symbolic>"
1113 printer.text(f"\n{name.removeprefix('generate:')}: ")
1114 printer.pretty(value)
1116 self._string_repr += printer.getvalue()
1118 # self.test_runner can include the execute_example method, or setup/teardown
1119 # _example, so it's important to get the PRNG and build context in place first.
1120 with (
1121 local_settings(self.settings),
1122 deterministic_PRNG(),
1123 BuildContext(
1124 data, is_final=is_final, wrapped_test=self.wrapped_test
1125 ) as context,
1126 ):
1127 # providers may throw in per_case_context_fn, and we'd like
1128 # `result` to still be set in these cases.
1129 result = None
1130 with data.provider.per_test_case_context_manager():
1131 # Run the test function once, via the executor hook.
1132 # In most cases this will delegate straight to `run(data)`.
1133 result = self.test_runner(data, run)
1135 # If a failure was expected, it should have been raised already, so
1136 # instead raise an appropriate diagnostic error.
1137 if expected_failure is not None:
1138 exception, traceback = expected_failure
1139 if isinstance(exception, DeadlineExceeded) and (
1140 runtime_secs := math.fsum(
1141 v
1142 for k, v in self._timing_features.items()
1143 if k.startswith("execute:")
1144 )
1145 ):
1146 report(
1147 "Unreliable test timings! On an initial run, this "
1148 f"test took {exception.runtime.total_seconds() * 1000:.2f}ms, "
1149 "which exceeded the deadline of "
1150 f"{self.settings.deadline.total_seconds() * 1000:.2f}ms, but "
1151 f"on a subsequent run it took {runtime_secs * 1000:.2f} ms, "
1152 "which did not. If you expect this sort of "
1153 "variability in your test timings, consider turning "
1154 "deadlines off for this test by setting deadline=None."
1155 )
1156 else:
1157 report("Failed to reproduce exception. Expected: \n" + traceback)
1158 raise FlakyFailure(
1159 f"Hypothesis {text_repr} produces unreliable results: "
1160 "Falsified on the first call but did not on a subsequent one",
1161 [exception],
1162 )
1163 return result
1165 def _flaky_replay_to_failure(
1166 self, err: FlakyReplay, context: BaseException
1167 ) -> FlakyFailure:
1168 # Note that in the mark_interesting case, _context_ itself
1169 # is part of err._interesting_examples - but it's not in
1170 # _runner.interesting_examples - this is fine, as the context
1171 # (i.e., immediate exception) is appended.
1172 interesting_examples = [
1173 self._runner.interesting_examples[origin]
1174 for origin in err._interesting_origins
1175 if origin in self._runner.interesting_examples
1176 ]
1177 exceptions = [result.expected_exception for result in interesting_examples]
1178 exceptions.append(context) # the immediate exception
1179 return FlakyFailure(err.reason, exceptions)
1181 def _execute_once_for_engine(self, data: ConjectureData) -> None:
1182 """Wrapper around ``execute_once`` that intercepts test failure
1183 exceptions and single-test control exceptions, and turns them into
1184 appropriate method calls to `data` instead.
1186 This allows the engine to assume that any exception other than
1187 ``StopTest`` must be a fatal error, and should stop the entire engine.
1188 """
1189 trace: Trace = set()
1190 try:
1191 with Tracer(should_trace=self._should_trace()) as tracer:
1192 try:
1193 result = self.execute_once(data)
1194 if (
1195 data.status == Status.VALID and tracer.branches
1196 ): # pragma: no cover
1197 # This is in fact covered by our *non-coverage* tests, but due
1198 # to the settrace() contention *not* by our coverage tests.
1199 self.explain_traces[None].add(frozenset(tracer.branches))
1200 finally:
1201 trace = tracer.branches
1202 if result is not None:
1203 fail_health_check(
1204 self.settings,
1205 "Tests run under @given should return None, but "
1206 f"{self.test.__name__} returned {result!r} instead.",
1207 HealthCheck.return_value,
1208 )
1209 except UnsatisfiedAssumption as e:
1210 # An "assume" check failed, so instead we inform the engine that
1211 # this test run was invalid.
1212 try:
1213 data.mark_invalid(e.reason)
1214 except FlakyReplay as err:
1215 # This was unexpected, meaning that the assume was flaky.
1216 # Report it as such.
1217 raise self._flaky_replay_to_failure(err, e) from None
1218 except (StopTest, BackendCannotProceed):
1219 # The engine knows how to handle this control exception, so it's
1220 # OK to re-raise it.
1221 raise
1222 except (
1223 FailedHealthCheck,
1224 *skip_exceptions_to_reraise(),
1225 ):
1226 # These are fatal errors or control exceptions that should stop the
1227 # engine, so we re-raise them.
1228 raise
1229 except failure_exceptions_to_catch() as e:
1230 # If an unhandled (i.e., non-Hypothesis) error was raised by
1231 # Hypothesis-internal code, re-raise it as a fatal error instead
1232 # of treating it as a test failure.
1233 if isinstance(e, BaseExceptionGroup) and len(e.exceptions) == 1:
1234 # When a naked exception is implicitly wrapped in an ExceptionGroup
1235 # due to a re-raising "except*", the ExceptionGroup is constructed in
1236 # the caller's stack frame (see #4183). This workaround is specifically
1237 # for implicit wrapping of naked exceptions by "except*", since explicit
1238 # raising of ExceptionGroup gets the proper traceback in the first place
1239 # - there's no need to handle hierarchical groups here, at least if no
1240 # such implicit wrapping happens inside hypothesis code (we only care
1241 # about the hypothesis-or-not distinction).
1242 #
1243 # 01-25-2025: this was patched to give the correct
1244 # stacktrace in cpython https://github.com/python/cpython/issues/128799.
1245 # can remove once python3.11 is EOL.
1246 tb = e.exceptions[0].__traceback__ or e.__traceback__
1247 else:
1248 tb = e.__traceback__
1249 filepath = traceback.extract_tb(tb)[-1][0]
1250 if (
1251 is_hypothesis_file(filepath)
1252 and not isinstance(e, HypothesisException)
1253 # We expect backend authors to use the provider_conformance test
1254 # to test their backends. If an error occurs there, it is probably
1255 # from their backend, and we would like to treat it as a standard
1256 # error, not a hypothesis-internal error.
1257 and not filepath.endswith(
1258 f"internal{os.sep}conjecture{os.sep}provider_conformance.py"
1259 )
1260 ):
1261 raise
1263 if data.frozen:
1264 # This can happen if an error occurred in a finally
1265 # block somewhere, suppressing our original StopTest.
1266 # We raise a new one here to resume normal operation.
1267 raise StopTest(data.testcounter) from e
1268 else:
1269 # The test failed by raising an exception, so we inform the
1270 # engine that this test run was interesting. This is the normal
1271 # path for test runs that fail.
1272 tb = get_trimmed_traceback()
1273 data.expected_traceback = format_exception(e, tb)
1274 data.expected_exception = e
1275 assert data.expected_traceback is not None # for mypy
1276 verbose_report(data.expected_traceback)
1278 self.failed_normally = True
1280 interesting_origin = InterestingOrigin.from_exception(e)
1281 if trace: # pragma: no cover
1282 # Trace collection is explicitly disabled under coverage.
1283 self.explain_traces[interesting_origin].add(frozenset(trace))
1284 if interesting_origin.exc_type == DeadlineExceeded:
1285 self.failed_due_to_deadline = True
1286 self.explain_traces.clear()
1287 try:
1288 data.mark_interesting(interesting_origin)
1289 except FlakyReplay as err:
1290 raise self._flaky_replay_to_failure(err, e) from None
1292 finally:
1293 # Conditional here so we can save some time constructing the payload; in
1294 # other cases (without coverage) it's cheap enough to do that regardless.
1295 if observability_enabled():
1296 if runner := getattr(self, "_runner", None):
1297 phase = runner._current_phase
1298 else: # pragma: no cover # in case of messing with internals
1299 if self.failed_normally or self.failed_due_to_deadline:
1300 phase = "shrink"
1301 else:
1302 phase = "unknown"
1303 backend_desc = f", using backend={self.settings.backend!r}" * (
1304 self.settings.backend != "hypothesis"
1305 and not getattr(runner, "_switch_to_hypothesis_provider", False)
1306 )
1307 try:
1308 data._observability_args = data.provider.realize(
1309 data._observability_args
1310 )
1311 except BackendCannotProceed:
1312 data._observability_args = {}
1314 try:
1315 self._string_repr = data.provider.realize(self._string_repr)
1316 except BackendCannotProceed:
1317 self._string_repr = "<backend failed to realize symbolic arguments>"
1319 try:
1320 data.events = data.provider.realize(data.events)
1321 except BackendCannotProceed:
1322 data.events = {}
1324 data.freeze()
1325 tc = make_testcase(
1326 run_start=self._start_timestamp,
1327 property=self.test_identifier,
1328 data=data,
1329 how_generated=f"during {phase} phase{backend_desc}",
1330 representation=self._string_repr,
1331 arguments=data._observability_args,
1332 timing=self._timing_features,
1333 coverage=tractable_coverage_report(trace) or None,
1334 phase=phase,
1335 backend_metadata=data.provider.observe_test_case(),
1336 )
1337 deliver_observation(tc)
1339 for msg in data.provider.observe_information_messages(
1340 lifetime="test_case"
1341 ):
1342 self._deliver_information_message(**msg)
1343 self._timing_features = {}
1345 def _deliver_information_message(
1346 self, *, type: InfoObservationType, title: str, content: str | dict
1347 ) -> None:
1348 deliver_observation(
1349 InfoObservation(
1350 type=type,
1351 run_start=self._start_timestamp,
1352 property=self.test_identifier,
1353 title=title,
1354 content=content,
1355 )
1356 )
1358 def run_engine(self):
1359 """Run the test function many times, on database input and generated
1360 input, using the Conjecture engine.
1361 """
1362 # Tell pytest to omit the body of this function from tracebacks
1363 __tracebackhide__ = True
1364 try:
1365 database_key = self.wrapped_test._hypothesis_internal_database_key
1366 except AttributeError:
1367 if global_force_seed is None:
1368 database_key = function_digest(self.test)
1369 else:
1370 database_key = None
1372 runner = self._runner = ConjectureRunner(
1373 self._execute_once_for_engine,
1374 settings=self.settings,
1375 random=self.random,
1376 database_key=database_key,
1377 thread_overlap=self.thread_overlap,
1378 )
1379 # Use the Conjecture engine to run the test function many times
1380 # on different inputs.
1381 runner.run()
1382 note_statistics(runner.statistics)
1383 if observability_enabled():
1384 self._deliver_information_message(
1385 type="info",
1386 title="Hypothesis Statistics",
1387 content=describe_statistics(runner.statistics),
1388 )
1389 for msg in (
1390 p if isinstance(p := runner.provider, PrimitiveProvider) else p(None)
1391 ).observe_information_messages(lifetime="test_function"):
1392 self._deliver_information_message(**msg)
1394 if runner.call_count == 0:
1395 return
1396 if runner.interesting_examples:
1397 self.falsifying_examples = sorted(
1398 runner.interesting_examples.values(),
1399 key=lambda d: sort_key(d.nodes),
1400 reverse=True,
1401 )
1402 else:
1403 if runner.valid_examples == 0:
1404 explanations = []
1405 # use a somewhat arbitrary cutoff to avoid recommending spurious
1406 # fixes.
1407 # eg, a few invalid examples from internal filters when the
1408 # problem is the user generating large inputs, or a
1409 # few overruns during internal mutation when the problem is
1410 # impossible user filters/assumes.
1411 if runner.invalid_examples > min(20, runner.call_count // 5):
1412 explanations.append(
1413 f"{runner.invalid_examples} of {runner.call_count} "
1414 "examples failed a .filter() or assume() condition. Try "
1415 "making your filters or assumes less strict, or rewrite "
1416 "using strategy parameters: "
1417 "st.integers().filter(lambda x: x > 0) fails less often "
1418 "(that is, never) when rewritten as st.integers(min_value=1)."
1419 )
1420 if runner.overrun_examples > min(20, runner.call_count // 5):
1421 explanations.append(
1422 f"{runner.overrun_examples} of {runner.call_count} "
1423 "examples were too large to finish generating; try "
1424 "reducing the typical size of your inputs?"
1425 )
1426 rep = get_pretty_function_description(self.test)
1427 raise Unsatisfiable(
1428 f"Unable to satisfy assumptions of {rep}. "
1429 f"{' Also, '.join(explanations)}"
1430 )
1432 # If we have not traced executions, warn about that now (but only when
1433 # we'd expect to do so reliably, i.e. on CPython>=3.12)
1434 if (
1435 hasattr(sys, "monitoring")
1436 and not PYPY
1437 and self._should_trace()
1438 and not Tracer.can_trace()
1439 ): # pragma: no cover
1440 # actually covered by our tests, but only on >= 3.12
1441 warnings.warn(
1442 "avoiding tracing test function because tool id "
1443 f"{MONITORING_TOOL_ID} is already taken by tool "
1444 f"{sys.monitoring.get_tool(MONITORING_TOOL_ID)}.",
1445 HypothesisWarning,
1446 stacklevel=3,
1447 )
1449 if not self.falsifying_examples:
1450 return
1451 elif not (self.settings.report_multiple_bugs and pytest_shows_exceptiongroups):
1452 # Pretend that we only found one failure, by discarding the others.
1453 del self.falsifying_examples[:-1]
1455 # The engine found one or more failures, so we need to reproduce and
1456 # report them.
1458 errors_to_report = []
1460 report_lines = describe_targets(runner.best_observed_targets)
1461 if report_lines:
1462 report_lines.append("")
1464 explanations = explanatory_lines(self.explain_traces, self.settings)
1465 for falsifying_example in self.falsifying_examples:
1466 fragments = []
1468 ran_example = runner.new_conjecture_data(
1469 falsifying_example.choices, max_choices=len(falsifying_example.choices)
1470 )
1471 ran_example.slice_comments = falsifying_example.slice_comments
1472 tb = None
1473 origin = None
1474 assert falsifying_example.expected_exception is not None
1475 assert falsifying_example.expected_traceback is not None
1476 try:
1477 with with_reporter(fragments.append):
1478 self.execute_once(
1479 ran_example,
1480 print_example=True,
1481 is_final=True,
1482 expected_failure=(
1483 falsifying_example.expected_exception,
1484 falsifying_example.expected_traceback,
1485 ),
1486 )
1487 except StopTest as e:
1488 # Link the expected exception from the first run. Not sure
1489 # how to access the current exception, if it failed
1490 # differently on this run. In fact, in the only known
1491 # reproducer, the StopTest is caused by OVERRUN before the
1492 # test is even executed. Possibly because all initial examples
1493 # failed until the final non-traced replay, and something was
1494 # exhausted? Possibly a FIXME, but sufficiently weird to
1495 # ignore for now.
1496 err = FlakyFailure(
1497 "Inconsistent results: An example failed on the "
1498 "first run but now succeeds (or fails with another "
1499 "error, or is for some reason not runnable).",
1500 # (note: e is a BaseException)
1501 [falsifying_example.expected_exception or e],
1502 )
1503 errors_to_report.append((fragments, err))
1504 except UnsatisfiedAssumption as e: # pragma: no cover # ironically flaky
1505 err = FlakyFailure(
1506 "Unreliable assumption: An example which satisfied "
1507 "assumptions on the first run now fails it.",
1508 [e],
1509 )
1510 errors_to_report.append((fragments, err))
1511 except BaseException as e:
1512 # If we have anything for explain-mode, this is the time to report.
1513 fragments.extend(explanations[falsifying_example.interesting_origin])
1514 errors_to_report.append(
1515 (fragments, e.with_traceback(get_trimmed_traceback()))
1516 )
1517 tb = format_exception(e, get_trimmed_traceback(e))
1518 origin = InterestingOrigin.from_exception(e)
1519 else:
1520 # execute_once() will always raise either the expected error, or Flaky.
1521 raise NotImplementedError("This should be unreachable")
1522 finally:
1523 ran_example.freeze()
1524 if observability_enabled():
1525 # log our observability line for the final failing example
1526 tc = make_testcase(
1527 run_start=self._start_timestamp,
1528 property=self.test_identifier,
1529 data=ran_example,
1530 how_generated="minimal failing example",
1531 representation=self._string_repr,
1532 arguments=ran_example._observability_args,
1533 timing=self._timing_features,
1534 coverage=None, # Not recorded when we're replaying the MFE
1535 status="passed" if sys.exc_info()[0] else "failed",
1536 status_reason=str(origin or "unexpected/flaky pass"),
1537 metadata={"traceback": tb},
1538 )
1539 deliver_observation(tc)
1541 # Whether or not replay actually raised the exception again, we want
1542 # to print the reproduce_failure decorator for the failing example.
1543 if self.settings.print_blob:
1544 fragments.append(
1545 "\nYou can reproduce this example by temporarily adding "
1546 f"{reproduction_decorator(falsifying_example.choices)} "
1547 "as a decorator on your test case"
1548 )
1550 _raise_to_user(
1551 errors_to_report,
1552 self.settings,
1553 report_lines,
1554 # A backend might report a failure and then report verified afterwards,
1555 # which is to be interpreted as "there are no more failures *other
1556 # than what we already reported*". Do not report this as unsound.
1557 unsound_backend=(
1558 runner._verified_by
1559 if runner._verified_by and not runner._backend_found_failure
1560 else None
1561 ),
1562 )
1565def _raise_to_user(
1566 errors_to_report, settings, target_lines, trailer="", *, unsound_backend=None
1567):
1568 """Helper function for attaching notes and grouping multiple errors."""
1569 failing_prefix = "Falsifying example: "
1570 ls = []
1571 for fragments, err in errors_to_report:
1572 for note in fragments:
1573 add_note(err, note)
1574 if note.startswith(failing_prefix):
1575 ls.append(note.removeprefix(failing_prefix))
1576 if current_pytest_item.value:
1577 current_pytest_item.value._hypothesis_failing_examples = ls
1579 if len(errors_to_report) == 1:
1580 _, the_error_hypothesis_found = errors_to_report[0]
1581 else:
1582 assert errors_to_report
1583 the_error_hypothesis_found = BaseExceptionGroup(
1584 f"Hypothesis found {len(errors_to_report)} distinct failures{trailer}.",
1585 [e for _, e in errors_to_report],
1586 )
1588 if settings.verbosity >= Verbosity.normal:
1589 for line in target_lines:
1590 add_note(the_error_hypothesis_found, line)
1592 if unsound_backend:
1593 add_note(
1594 err,
1595 f"backend={unsound_backend!r} claimed to verify this test passes - "
1596 "please send them a bug report!",
1597 )
1599 raise the_error_hypothesis_found
1602@contextlib.contextmanager
1603def fake_subTest(self, msg=None, **__):
1604 """Monkeypatch for `unittest.TestCase.subTest` during `@given`.
1606 If we don't patch this out, each failing example is reported as a
1607 separate failing test by the unittest test runner, which is
1608 obviously incorrect. We therefore replace it for the duration with
1609 this version.
1610 """
1611 warnings.warn(
1612 "subTest per-example reporting interacts badly with Hypothesis "
1613 "trying hundreds of examples, so we disable it for the duration of "
1614 "any test that uses `@given`.",
1615 HypothesisWarning,
1616 stacklevel=2,
1617 )
1618 yield
1621@dataclass(slots=False, frozen=False)
1622class HypothesisHandle:
1623 """This object is provided as the .hypothesis attribute on @given tests.
1625 Downstream users can reassign its attributes to insert custom logic into
1626 the execution of each case, for example by converting an async into a
1627 sync function.
1629 This must be an attribute of an attribute, because reassignment of a
1630 first-level attribute would not be visible to Hypothesis if the function
1631 had been decorated before the assignment.
1633 See https://github.com/HypothesisWorks/hypothesis/issues/1257 for more
1634 information.
1635 """
1637 inner_test: Any
1638 _get_fuzz_target: Any
1639 _given_kwargs: Any
1641 @property
1642 def fuzz_one_input(
1643 self,
1644 ) -> Callable[[bytes | bytearray | memoryview | BinaryIO], bytes | None]:
1645 """Run the test as a fuzz target, driven with the `buffer` of bytes.
1647 Returns None if buffer invalid for the strategy, canonical pruned
1648 bytes if the buffer was valid, and leaves raised exceptions alone.
1649 """
1650 # Note: most users, if they care about fuzzer performance, will access the
1651 # property and assign it to a local variable to move the attribute lookup
1652 # outside their fuzzing loop / before the fork point. We cache it anyway,
1653 # so that naive or unusual use-cases get the best possible performance too.
1654 try:
1655 return self.__cached_target # type: ignore
1656 except AttributeError:
1657 self.__cached_target = self._get_fuzz_target()
1658 return self.__cached_target
1661@overload
1662def given(
1663 _: EllipsisType, /
1664) -> Callable[
1665 [Callable[..., Coroutine[Any, Any, None] | None]], Callable[[], None]
1666]: # pragma: no cover
1667 ...
1670@overload
1671def given(
1672 *_given_arguments: SearchStrategy[Any],
1673) -> Callable[
1674 [Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]
1675]: # pragma: no cover
1676 ...
1679@overload
1680def given(
1681 **_given_kwargs: SearchStrategy[Any] | EllipsisType,
1682) -> Callable[
1683 [Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]
1684]: # pragma: no cover
1685 ...
1688def given(
1689 *_given_arguments: SearchStrategy[Any] | EllipsisType,
1690 **_given_kwargs: SearchStrategy[Any] | EllipsisType,
1691) -> Callable[[Callable[..., Coroutine[Any, Any, None] | None]], Callable[..., None]]:
1692 """
1693 The |@given| decorator turns a function into a Hypothesis test. This is the
1694 main entry point to Hypothesis.
1696 .. seealso::
1698 See also the :doc:`/tutorial/introduction` tutorial, which introduces
1699 defining Hypothesis tests with |@given|.
1701 .. _given-arguments:
1703 Arguments to ``@given``
1704 -----------------------
1706 Arguments to |@given| may be either positional or keyword arguments:
1708 .. code-block:: python
1710 @given(st.integers(), st.floats())
1711 def test_one(x, y):
1712 pass
1714 @given(x=st.integers(), y=st.floats())
1715 def test_two(x, y):
1716 pass
1718 If using keyword arguments, the arguments may appear in any order, as with
1719 standard Python functions:
1721 .. code-block:: python
1723 # different order, but still equivalent to before
1724 @given(y=st.floats(), x=st.integers())
1725 def test(x, y):
1726 assert isinstance(x, int)
1727 assert isinstance(y, float)
1729 If |@given| is provided fewer positional arguments than the decorated test,
1730 the test arguments are filled in on the right side, leaving the leftmost
1731 positional arguments unfilled:
1733 .. code-block:: python
1735 @given(st.integers(), st.floats())
1736 def test(manual_string, y, z):
1737 assert manual_string == "x"
1738 assert isinstance(y, int)
1739 assert isinstance(z, float)
1741 # `test` is now a callable which takes one argument `manual_string`
1743 test("x")
1744 # or equivalently:
1745 test(manual_string="x")
1747 The reason for this "from the right" behavior is to support using |@given|
1748 with instance methods, by automatically passing through ``self``:
1750 .. code-block:: python
1752 class MyTest(TestCase):
1753 @given(st.integers())
1754 def test(self, x):
1755 assert isinstance(self, MyTest)
1756 assert isinstance(x, int)
1758 If (and only if) using keyword arguments, |@given| may be combined with
1759 ``**kwargs`` or ``*args``:
1761 .. code-block:: python
1763 @given(x=integers(), y=integers())
1764 def test(x, **kwargs):
1765 assert "y" in kwargs
1767 @given(x=integers(), y=integers())
1768 def test(x, *args, **kwargs):
1769 assert args == ()
1770 assert "x" not in kwargs
1771 assert "y" in kwargs
1773 It is an error to:
1775 * Mix positional and keyword arguments to |@given|.
1776 * Use |@given| with a function that has a default value for an argument.
1777 * Use |@given| with positional arguments with a function that uses ``*args``,
1778 ``**kwargs``, or keyword-only arguments.
1780 The function returned by given has all the same arguments as the original
1781 test, minus those that are filled in by |@given|. See the :ref:`notes on
1782 framework compatibility <framework-compatibility>` for how this interacts
1783 with features of other testing libraries, such as :pypi:`pytest` fixtures.
1784 """
1786 if currently_in_test_context():
1787 fail_health_check(
1788 Settings(),
1789 "Nesting @given tests results in quadratic generation and shrinking "
1790 "behavior, and can usually be more cleanly expressed by replacing the "
1791 "inner function with an st.data() parameter on the outer @given."
1792 "\n\n"
1793 "If it is difficult or impossible to refactor this test to remove the "
1794 "nested @given, you can disable this health check with "
1795 "@settings(suppress_health_check=[HealthCheck.nested_given]) on the "
1796 "outer @given. See "
1797 "https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck "
1798 "for details.",
1799 HealthCheck.nested_given,
1800 )
1802 def run_test_as_given(test):
1803 if inspect.isclass(test):
1804 # Provide a meaningful error to users, instead of exceptions from
1805 # internals that assume we're dealing with a function.
1806 raise InvalidArgument("@given cannot be applied to a class")
1808 if (
1809 "_pytest" in sys.modules
1810 and "_pytest.fixtures" in sys.modules
1811 and (
1812 tuple(map(int, sys.modules["_pytest"].__version__.split(".")[:2]))
1813 >= (8, 4)
1814 )
1815 and isinstance(
1816 test, sys.modules["_pytest.fixtures"].FixtureFunctionDefinition
1817 )
1818 ): # pragma: no cover # covered by pytest/test_fixtures, but not by cover/
1819 raise InvalidArgument("@given cannot be applied to a pytest fixture")
1821 given_arguments = tuple(_given_arguments)
1822 given_kwargs = dict(_given_kwargs)
1824 original_sig = get_signature(test)
1825 if given_arguments == (Ellipsis,) and not given_kwargs:
1826 # user indicated that they want to infer all arguments
1827 given_kwargs = {
1828 p.name: Ellipsis
1829 for p in original_sig.parameters.values()
1830 if p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY)
1831 }
1832 given_arguments = ()
1834 check_invalid = is_invalid_test(
1835 test, original_sig, given_arguments, given_kwargs
1836 )
1838 # If the argument check found problems, return a dummy test function
1839 # that will raise an error if it is actually called.
1840 if check_invalid is not None:
1841 return check_invalid
1843 # Because the argument check succeeded, we can convert @given's
1844 # positional arguments into keyword arguments for simplicity.
1845 if given_arguments:
1846 assert not given_kwargs
1847 posargs = [
1848 p.name
1849 for p in original_sig.parameters.values()
1850 if p.kind is p.POSITIONAL_OR_KEYWORD
1851 ]
1852 given_kwargs = dict(
1853 list(zip(posargs[::-1], given_arguments[::-1], strict=False))[::-1]
1854 )
1855 # These have been converted, so delete them to prevent accidental use.
1856 del given_arguments
1858 new_signature = new_given_signature(original_sig, given_kwargs)
1860 # Use type information to convert "infer" arguments into appropriate strategies.
1861 if ... in given_kwargs.values():
1862 hints = get_type_hints(test)
1863 for name in [name for name, value in given_kwargs.items() if value is ...]:
1864 if name not in hints:
1865 return _invalid(
1866 f"passed {name}=... for {test.__name__}, but {name} has "
1867 "no type annotation",
1868 test=test,
1869 given_kwargs=given_kwargs,
1870 )
1871 given_kwargs[name] = st.from_type(hints[name])
1873 # only raise if the same thread uses two different executors, not if two
1874 # different threads use different executors.
1875 thread_local = ThreadLocal(prev_self=lambda: not_set)
1876 # maps thread_id to whether that thread overlaps in execution with any
1877 # other thread in this @given. We use this to detect whether an @given is
1878 # being run from multiple different threads at once, which informs
1879 # decisions like whether to raise DeadlineExceeded or HealthCheck.too_slow.
1880 thread_overlap: dict[int, bool] = {}
1881 thread_overlap_lock = Lock()
1883 @impersonate(test)
1884 @define_function_signature(test.__name__, test.__doc__, new_signature)
1885 def wrapped_test(*arguments, **kwargs):
1886 # Tell pytest to omit the body of this function from tracebacks
1887 __tracebackhide__ = True
1888 with thread_overlap_lock:
1889 for overlap_thread_id in thread_overlap:
1890 thread_overlap[overlap_thread_id] = True
1892 threadid = threading.get_ident()
1893 # if there are existing threads when this thread starts, then
1894 # this thread starts at an overlapped state.
1895 has_existing_threads = len(thread_overlap) > 0
1896 thread_overlap[threadid] = has_existing_threads
1898 try:
1899 test = wrapped_test.hypothesis.inner_test
1900 if getattr(test, "is_hypothesis_test", False):
1901 raise InvalidArgument(
1902 f"You have applied @given to the test {test.__name__} more than "
1903 "once, which wraps the test several times and is extremely slow. "
1904 "A similar effect can be gained by combining the arguments "
1905 "of the two calls to given. For example, instead of "
1906 "@given(booleans()) @given(integers()), you could write "
1907 "@given(booleans(), integers())"
1908 )
1910 settings = wrapped_test._hypothesis_internal_use_settings
1911 random = get_random_for_wrapped_test(test, wrapped_test)
1912 arguments, kwargs, stuff = process_arguments_to_given(
1913 wrapped_test,
1914 arguments,
1915 kwargs,
1916 given_kwargs,
1917 new_signature.parameters,
1918 )
1920 if (
1921 inspect.iscoroutinefunction(test)
1922 and get_executor(stuff.selfy) is default_executor
1923 ):
1924 # See https://github.com/HypothesisWorks/hypothesis/issues/3054
1925 # If our custom executor doesn't handle coroutines, or we return an
1926 # awaitable from a non-async-def function, we just rely on the
1927 # return_value health check. This catches most user errors though.
1928 raise InvalidArgument(
1929 "Hypothesis doesn't know how to run async test functions like "
1930 f"{test.__name__}. You'll need to write a custom executor, "
1931 "or use a library like pytest-asyncio or pytest-trio which can "
1932 "handle the translation for you.\n See https://hypothesis."
1933 "readthedocs.io/en/latest/details.html#custom-function-execution"
1934 )
1936 runner = stuff.selfy
1937 if isinstance(stuff.selfy, TestCase) and test.__name__ in dir(TestCase):
1938 fail_health_check(
1939 settings,
1940 f"You have applied @given to the method {test.__name__}, which is "
1941 "used by the unittest runner but is not itself a test. "
1942 "This is not useful in any way.",
1943 HealthCheck.not_a_test_method,
1944 )
1945 if bad_django_TestCase(runner): # pragma: no cover
1946 # Covered by the Django tests, but not the pytest coverage task
1947 raise InvalidArgument(
1948 "You have applied @given to a method on "
1949 f"{type(runner).__qualname__}, but this "
1950 "class does not inherit from the supported versions in "
1951 "`hypothesis.extra.django`. Use the Hypothesis variants "
1952 "to ensure that each example is run in a separate "
1953 "database transaction."
1954 )
1956 nonlocal thread_local
1957 # Check selfy really is self (not e.g. a mock) before we health-check
1958 cur_self = (
1959 stuff.selfy
1960 if getattr(type(stuff.selfy), test.__name__, None) is wrapped_test
1961 else None
1962 )
1963 if thread_local.prev_self is not_set:
1964 thread_local.prev_self = cur_self
1965 elif cur_self is not thread_local.prev_self:
1966 fail_health_check(
1967 settings,
1968 f"The method {test.__qualname__} was called from multiple "
1969 "different executors. This may lead to flaky tests and "
1970 "nonreproducible errors when replaying from database."
1971 "\n\n"
1972 "Unlike most health checks, HealthCheck.differing_executors "
1973 "warns about a correctness issue with your test. We "
1974 "therefore recommend fixing the underlying issue, rather "
1975 "than suppressing this health check. However, if you are "
1976 "confident this health check can be safely disabled, you can "
1977 "do so with "
1978 "@settings(suppress_health_check=[HealthCheck.differing_executors]). "
1979 "See "
1980 "https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck "
1981 "for details.",
1982 HealthCheck.differing_executors,
1983 )
1985 state = StateForActualGivenExecution(
1986 stuff,
1987 test,
1988 settings,
1989 random,
1990 wrapped_test,
1991 thread_overlap=thread_overlap,
1992 )
1994 # If there was a @reproduce_failure decorator, use it to reproduce
1995 # the error (or complain that we couldn't). Either way, this will
1996 # always raise some kind of error.
1997 if (
1998 reproduce_failure := wrapped_test._hypothesis_internal_use_reproduce_failure
1999 ) is not None:
2000 expected_version, failure = reproduce_failure
2001 if expected_version != __version__:
2002 raise InvalidArgument(
2003 "Attempting to reproduce a failure from a different "
2004 f"version of Hypothesis. This failure is from {expected_version}, but "
2005 f"you are currently running {__version__!r}. Please change your "
2006 "Hypothesis version to a matching one."
2007 )
2008 try:
2009 state.execute_once(
2010 ConjectureData.for_choices(decode_failure(failure)),
2011 print_example=True,
2012 is_final=True,
2013 )
2014 raise DidNotReproduce(
2015 "Expected the test to raise an error, but it "
2016 "completed successfully."
2017 )
2018 except StopTest:
2019 raise DidNotReproduce(
2020 "The shape of the test data has changed in some way "
2021 "from where this blob was defined. Are you sure "
2022 "you're running the same test?"
2023 ) from None
2024 except UnsatisfiedAssumption:
2025 raise DidNotReproduce(
2026 "The test data failed to satisfy an assumption in the "
2027 "test. Have you added it since this blob was generated?"
2028 ) from None
2030 # There was no @reproduce_failure, so start by running any explicit
2031 # examples from @example decorators.
2032 if errors := list(
2033 execute_explicit_examples(
2034 state, wrapped_test, arguments, kwargs, original_sig
2035 )
2036 ):
2037 # If we're not going to report multiple bugs, we would have
2038 # stopped running explicit examples at the first failure.
2039 assert len(errors) == 1 or state.settings.report_multiple_bugs
2041 # If an explicit example raised a 'skip' exception, ensure it's never
2042 # wrapped up in an exception group. Because we break out of the loop
2043 # immediately on finding a skip, if present it's always the last error.
2044 if isinstance(errors[-1][1], skip_exceptions_to_reraise()):
2045 # Covered by `test_issue_3453_regression`, just in a subprocess.
2046 del errors[:-1] # pragma: no cover
2048 _raise_to_user(errors, state.settings, [], " in explicit examples")
2050 # If there were any explicit examples, they all ran successfully.
2051 # The next step is to use the Conjecture engine to run the test on
2052 # many different inputs.
2053 ran_explicit_examples = (
2054 Phase.explicit in state.settings.phases
2055 and getattr(wrapped_test, "hypothesis_explicit_examples", ())
2056 )
2057 SKIP_BECAUSE_NO_EXAMPLES = unittest.SkipTest(
2058 "Hypothesis has been told to run no examples for this test."
2059 )
2060 if not (
2061 Phase.reuse in settings.phases or Phase.generate in settings.phases
2062 ):
2063 if not ran_explicit_examples:
2064 raise SKIP_BECAUSE_NO_EXAMPLES
2065 return
2067 try:
2068 if isinstance(runner, TestCase) and hasattr(runner, "subTest"):
2069 subTest = runner.subTest
2070 try:
2071 runner.subTest = types.MethodType(fake_subTest, runner)
2072 state.run_engine()
2073 finally:
2074 runner.subTest = subTest
2075 else:
2076 state.run_engine()
2077 except BaseException as e:
2078 # The exception caught here should either be an actual test
2079 # failure (or BaseExceptionGroup), or some kind of fatal error
2080 # that caused the engine to stop.
2081 generated_seed = (
2082 wrapped_test._hypothesis_internal_use_generated_seed
2083 )
2084 with local_settings(settings):
2085 if not (state.failed_normally or generated_seed is None):
2086 if running_under_pytest:
2087 report(
2088 f"You can add @seed({generated_seed}) to this test or "
2089 f"run pytest with --hypothesis-seed={generated_seed} "
2090 "to reproduce this failure."
2091 )
2092 else:
2093 report(
2094 f"You can add @seed({generated_seed}) to this test to "
2095 "reproduce this failure."
2096 )
2097 # The dance here is to avoid showing users long tracebacks
2098 # full of Hypothesis internals they don't care about.
2099 # We have to do this inline, to avoid adding another
2100 # internal stack frame just when we've removed the rest.
2101 #
2102 # Using a variable for our trimmed error ensures that the line
2103 # which will actually appear in tracebacks is as clear as
2104 # possible - "raise the_error_hypothesis_found".
2105 the_error_hypothesis_found = e.with_traceback(
2106 None
2107 if isinstance(e, BaseExceptionGroup)
2108 else get_trimmed_traceback()
2109 )
2110 raise the_error_hypothesis_found
2112 if not (ran_explicit_examples or state.ever_executed):
2113 raise SKIP_BECAUSE_NO_EXAMPLES
2114 finally:
2115 with thread_overlap_lock:
2116 del thread_overlap[threadid]
2118 def _get_fuzz_target() -> (
2119 Callable[[bytes | bytearray | memoryview | BinaryIO], bytes | None]
2120 ):
2121 # Because fuzzing interfaces are very performance-sensitive, we use a
2122 # somewhat more complicated structure here. `_get_fuzz_target()` is
2123 # called by the `HypothesisHandle.fuzz_one_input` property, allowing
2124 # us to defer our collection of the settings, random instance, and
2125 # reassignable `inner_test` (etc) until `fuzz_one_input` is accessed.
2126 #
2127 # We then share the performance cost of setting up `state` between
2128 # many invocations of the target. We explicitly force `deadline=None`
2129 # for performance reasons, saving ~40% the runtime of an empty test.
2130 test = wrapped_test.hypothesis.inner_test
2131 settings = Settings(
2132 parent=wrapped_test._hypothesis_internal_use_settings, deadline=None
2133 )
2134 random = get_random_for_wrapped_test(test, wrapped_test)
2135 _args, _kwargs, stuff = process_arguments_to_given(
2136 wrapped_test, (), {}, given_kwargs, new_signature.parameters
2137 )
2138 assert not _args
2139 assert not _kwargs
2140 state = StateForActualGivenExecution(
2141 stuff,
2142 test,
2143 settings,
2144 random,
2145 wrapped_test,
2146 thread_overlap=thread_overlap,
2147 )
2148 database_key = function_digest(test) + b".secondary"
2149 # We track the minimal-so-far example for each distinct origin, so
2150 # that we track log-n instead of n examples for long runs. In particular
2151 # it means that we saturate for common errors in long runs instead of
2152 # storing huge volumes of low-value data.
2153 minimal_failures: dict = {}
2155 def fuzz_one_input(
2156 buffer: bytes | bytearray | memoryview | BinaryIO,
2157 ) -> bytes | None:
2158 # This inner part is all that the fuzzer will actually run,
2159 # so we keep it as small and as fast as possible.
2160 if isinstance(buffer, io.IOBase):
2161 buffer = buffer.read(BUFFER_SIZE)
2162 assert isinstance(buffer, (bytes, bytearray, memoryview))
2163 data = ConjectureData(
2164 random=None,
2165 provider=BytestringProvider,
2166 provider_kw={"bytestring": buffer},
2167 )
2168 try:
2169 state.execute_once(data)
2170 status = Status.VALID
2171 except StopTest:
2172 status = data.status
2173 return None
2174 except UnsatisfiedAssumption:
2175 status = Status.INVALID
2176 return None
2177 except BaseException:
2178 known = minimal_failures.get(data.interesting_origin)
2179 if settings.database is not None and (
2180 known is None or sort_key(data.nodes) <= sort_key(known)
2181 ):
2182 settings.database.save(
2183 database_key, choices_to_bytes(data.choices)
2184 )
2185 minimal_failures[data.interesting_origin] = data.nodes
2186 status = Status.INTERESTING
2187 raise
2188 finally:
2189 if observability_enabled():
2190 data.freeze()
2191 tc = make_testcase(
2192 run_start=state._start_timestamp,
2193 property=state.test_identifier,
2194 data=data,
2195 how_generated="fuzz_one_input",
2196 representation=state._string_repr,
2197 arguments=data._observability_args,
2198 timing=state._timing_features,
2199 coverage=None,
2200 status=status,
2201 backend_metadata=data.provider.observe_test_case(),
2202 )
2203 deliver_observation(tc)
2204 state._timing_features = {}
2206 assert isinstance(data.provider, BytestringProvider)
2207 return bytes(data.provider.drawn)
2209 fuzz_one_input.__doc__ = HypothesisHandle.fuzz_one_input.__doc__
2210 return fuzz_one_input
2212 # After having created the decorated test function, we need to copy
2213 # over some attributes to make the switch as seamless as possible.
2215 for attrib in dir(test):
2216 if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)):
2217 setattr(wrapped_test, attrib, getattr(test, attrib))
2218 wrapped_test.is_hypothesis_test = True
2219 if hasattr(test, "_hypothesis_internal_settings_applied"):
2220 # Used to check if @settings is applied twice.
2221 wrapped_test._hypothesis_internal_settings_applied = True
2222 wrapped_test._hypothesis_internal_use_seed = getattr(
2223 test, "_hypothesis_internal_use_seed", None
2224 )
2225 wrapped_test._hypothesis_internal_use_settings = (
2226 getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default
2227 )
2228 wrapped_test._hypothesis_internal_use_reproduce_failure = getattr(
2229 test, "_hypothesis_internal_use_reproduce_failure", None
2230 )
2231 wrapped_test.hypothesis = HypothesisHandle(test, _get_fuzz_target, given_kwargs)
2232 return wrapped_test
2234 return run_test_as_given
2237def find(
2238 specifier: SearchStrategy[Ex],
2239 condition: Callable[[Any], bool],
2240 *,
2241 settings: Settings | None = None,
2242 random: Random | None = None,
2243 database_key: bytes | None = None,
2244) -> Ex:
2245 """Returns the minimal example from the given strategy ``specifier`` that
2246 matches the predicate function ``condition``."""
2247 if settings is None:
2248 settings = Settings(max_examples=2000)
2249 settings = Settings(
2250 settings, suppress_health_check=list(HealthCheck), report_multiple_bugs=False
2251 )
2253 if database_key is None and settings.database is not None:
2254 # Note: The database key is not guaranteed to be unique. If not, replaying
2255 # of database examples may fail to reproduce due to being replayed on the
2256 # wrong condition.
2257 database_key = function_digest(condition)
2259 if not isinstance(specifier, SearchStrategy):
2260 raise InvalidArgument(
2261 f"Expected SearchStrategy but got {specifier!r} of "
2262 f"type {type(specifier).__name__}"
2263 )
2264 specifier.validate()
2266 last: list[Ex] = []
2268 @settings
2269 @given(specifier)
2270 def test(v):
2271 if condition(v):
2272 last[:] = [v]
2273 raise Found
2275 if random is not None:
2276 test = seed(random.getrandbits(64))(test)
2278 test._hypothesis_internal_database_key = database_key # type: ignore
2280 try:
2281 test()
2282 except Found:
2283 return last[0]
2285 raise NoSuchExample(get_pretty_function_description(condition))