Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/hypothesis/core.py: 34%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

802 statements  

1# This file is part of Hypothesis, which may be found at 

2# https://github.com/HypothesisWorks/hypothesis/ 

3# 

4# Copyright the Hypothesis Authors. 

5# Individual contributors are listed in AUTHORS.rst and the git log. 

6# 

7# This Source Code Form is subject to the terms of the Mozilla Public License, 

8# v. 2.0. If a copy of the MPL was not distributed with this file, You can 

9# obtain one at https://mozilla.org/MPL/2.0/. 

10 

11"""This module provides the core primitives of Hypothesis, such as given.""" 

12import base64 

13import contextlib 

14import dataclasses 

15import datetime 

16import inspect 

17import io 

18import math 

19import os 

20import sys 

21import threading 

22import time 

23import traceback 

24import types 

25import unittest 

26import warnings 

27import zlib 

28from collections import defaultdict 

29from collections.abc import Coroutine, Generator, Hashable, Iterable, Sequence 

30from dataclasses import dataclass, field 

31from functools import partial 

32from inspect import Parameter 

33from random import Random 

34from threading import Lock 

35from typing import ( 

36 Any, 

37 BinaryIO, 

38 Callable, 

39 Optional, 

40 TypeVar, 

41 Union, 

42 overload, 

43) 

44from unittest import TestCase 

45 

46from hypothesis import strategies as st 

47from hypothesis._settings import ( 

48 HealthCheck, 

49 Phase, 

50 Verbosity, 

51 all_settings, 

52 local_settings, 

53 settings as Settings, 

54) 

55from hypothesis.control import BuildContext, currently_in_test_context 

56from hypothesis.database import choices_from_bytes, choices_to_bytes 

57from hypothesis.errors import ( 

58 BackendCannotProceed, 

59 DeadlineExceeded, 

60 DidNotReproduce, 

61 FailedHealthCheck, 

62 FlakyFailure, 

63 FlakyReplay, 

64 Found, 

65 Frozen, 

66 HypothesisException, 

67 HypothesisWarning, 

68 InvalidArgument, 

69 NoSuchExample, 

70 StopTest, 

71 Unsatisfiable, 

72 UnsatisfiedAssumption, 

73) 

74from hypothesis.internal import observability 

75from hypothesis.internal.compat import ( 

76 PYPY, 

77 BaseExceptionGroup, 

78 EllipsisType, 

79 add_note, 

80 bad_django_TestCase, 

81 get_type_hints, 

82 int_from_bytes, 

83) 

84from hypothesis.internal.conjecture.choice import ChoiceT 

85from hypothesis.internal.conjecture.data import ConjectureData, Status 

86from hypothesis.internal.conjecture.engine import BUFFER_SIZE, ConjectureRunner 

87from hypothesis.internal.conjecture.junkdrawer import ( 

88 ensure_free_stackframes, 

89 gc_cumulative_time, 

90) 

91from hypothesis.internal.conjecture.providers import ( 

92 BytestringProvider, 

93 PrimitiveProvider, 

94) 

95from hypothesis.internal.conjecture.shrinker import sort_key 

96from hypothesis.internal.entropy import deterministic_PRNG 

97from hypothesis.internal.escalation import ( 

98 InterestingOrigin, 

99 current_pytest_item, 

100 format_exception, 

101 get_trimmed_traceback, 

102 is_hypothesis_file, 

103) 

104from hypothesis.internal.healthcheck import fail_health_check 

105from hypothesis.internal.observability import ( 

106 InfoObservation, 

107 InfoObservationType, 

108 deliver_observation, 

109 make_testcase, 

110 observability_enabled, 

111) 

112from hypothesis.internal.reflection import ( 

113 convert_positional_arguments, 

114 define_function_signature, 

115 function_digest, 

116 get_pretty_function_description, 

117 get_signature, 

118 impersonate, 

119 is_mock, 

120 nicerepr, 

121 proxies, 

122 repr_call, 

123) 

124from hypothesis.internal.scrutineer import ( 

125 MONITORING_TOOL_ID, 

126 Trace, 

127 Tracer, 

128 explanatory_lines, 

129 tractable_coverage_report, 

130) 

131from hypothesis.internal.validation import check_type 

132from hypothesis.reporting import ( 

133 current_verbosity, 

134 report, 

135 verbose_report, 

136 with_reporter, 

137) 

138from hypothesis.statistics import describe_statistics, describe_targets, note_statistics 

139from hypothesis.strategies._internal.misc import NOTHING 

140from hypothesis.strategies._internal.strategies import ( 

141 Ex, 

142 SearchStrategy, 

143 check_strategy, 

144) 

145from hypothesis.utils.conventions import not_set 

146from hypothesis.utils.threading import ThreadLocal 

147from hypothesis.vendor.pretty import RepresentationPrinter 

148from hypothesis.version import __version__ 

149 

150TestFunc = TypeVar("TestFunc", bound=Callable) 

151 

152 

153running_under_pytest = False 

154pytest_shows_exceptiongroups = True 

155global_force_seed = None 

156# `threadlocal` stores "engine-global" constants, which are global relative to a 

157# ConjectureRunner instance (roughly speaking). Since only one conjecture runner 

158# instance can be active per thread, making engine constants thread-local prevents 

159# the ConjectureRunner instances of concurrent threads from treading on each other. 

160threadlocal = ThreadLocal(_hypothesis_global_random=lambda: None) 

161 

162 

163@dataclass 

164class Example: 

165 args: Any 

166 kwargs: Any 

167 # Plus two optional arguments for .xfail() 

168 raises: Any = field(default=None) 

169 reason: Any = field(default=None) 

170 

171 

172# TODO_DOCS link to not-yet-existent patch-dumping docs 

173 

174 

175class example: 

176 """ 

177 Add an explicit input to a Hypothesis test, which Hypothesis will always 

178 try before generating random inputs. This combines the randomized nature of 

179 Hypothesis generation with a traditional parametrized test. 

180 

181 For example: 

182 

183 .. code-block:: python 

184 

185 @example("Hello world") 

186 @example("some string with special significance") 

187 @given(st.text()) 

188 def test_strings(s): 

189 pass 

190 

191 will call ``test_strings("Hello World")`` and 

192 ``test_strings("some string with special significance")`` before generating 

193 any random inputs. |@example| may be placed in any order relative to |@given| 

194 and |@settings|. 

195 

196 Explicit inputs from |@example| are run in the |Phase.explicit| phase. 

197 Explicit inputs do not count towards |settings.max_examples|. Note that 

198 explicit inputs added by |@example| do not shrink. If an explicit input 

199 fails, Hypothesis will stop and report the failure without generating any 

200 random inputs. 

201 

202 |@example| can also be used to easily reproduce a failure. For instance, if 

203 Hypothesis reports that ``f(n=[0, math.nan])`` fails, you can add 

204 ``@example(n=[0, math.nan])`` to your test to quickly reproduce that failure. 

205 

206 Arguments to ``@example`` 

207 ------------------------- 

208 

209 Arguments to |@example| have the same behavior and restrictions as arguments 

210 to |@given|. This means they may be either positional or keyword arguments 

211 (but not both in the same |@example|): 

212 

213 .. code-block:: python 

214 

215 @example(1, 2) 

216 @example(x=1, y=2) 

217 @given(st.integers(), st.integers()) 

218 def test(x, y): 

219 pass 

220 

221 Noting that while arguments to |@given| are strategies (like |st.integers|), 

222 arguments to |@example| are values instead (like ``1``). 

223 

224 See the :ref:`given-arguments` section for full details. 

225 """ 

226 

227 def __init__(self, *args: Any, **kwargs: Any) -> None: 

228 if args and kwargs: 

229 raise InvalidArgument( 

230 "Cannot mix positional and keyword arguments for examples" 

231 ) 

232 if not (args or kwargs): 

233 raise InvalidArgument("An example must provide at least one argument") 

234 

235 self.hypothesis_explicit_examples: list[Example] = [] 

236 self._this_example = Example(tuple(args), kwargs) 

237 

238 def __call__(self, test: TestFunc) -> TestFunc: 

239 if not hasattr(test, "hypothesis_explicit_examples"): 

240 test.hypothesis_explicit_examples = self.hypothesis_explicit_examples # type: ignore 

241 test.hypothesis_explicit_examples.append(self._this_example) # type: ignore 

242 return test 

243 

244 def xfail( 

245 self, 

246 condition: bool = True, # noqa: FBT002 

247 *, 

248 reason: str = "", 

249 raises: Union[ 

250 type[BaseException], tuple[type[BaseException], ...] 

251 ] = BaseException, 

252 ) -> "example": 

253 """Mark this example as an expected failure, similarly to 

254 :obj:`pytest.mark.xfail(strict=True) <pytest.mark.xfail>`. 

255 

256 Expected-failing examples allow you to check that your test does fail on 

257 some examples, and therefore build confidence that *passing* tests are 

258 because your code is working, not because the test is missing something. 

259 

260 .. code-block:: python 

261 

262 @example(...).xfail() 

263 @example(...).xfail(reason="Prices must be non-negative") 

264 @example(...).xfail(raises=(KeyError, ValueError)) 

265 @example(...).xfail(sys.version_info[:2] >= (3, 12), reason="needs py 3.12") 

266 @example(...).xfail(condition=sys.platform != "linux", raises=OSError) 

267 def test(x): 

268 pass 

269 

270 .. note:: 

271 

272 Expected-failing examples are handled separately from those generated 

273 by strategies, so you should usually ensure that there is no overlap. 

274 

275 .. code-block:: python 

276 

277 @example(x=1, y=0).xfail(raises=ZeroDivisionError) 

278 @given(x=st.just(1), y=st.integers()) # Missing `.filter(bool)`! 

279 def test_fraction(x, y): 

280 # This test will try the explicit example and see it fail as 

281 # expected, then go on to generate more examples from the 

282 # strategy. If we happen to generate y=0, the test will fail 

283 # because only the explicit example is treated as xfailing. 

284 x / y 

285 """ 

286 check_type(bool, condition, "condition") 

287 check_type(str, reason, "reason") 

288 if not ( 

289 isinstance(raises, type) and issubclass(raises, BaseException) 

290 ) and not ( 

291 isinstance(raises, tuple) 

292 and raises # () -> expected to fail with no error, which is impossible 

293 and all( 

294 isinstance(r, type) and issubclass(r, BaseException) for r in raises 

295 ) 

296 ): 

297 raise InvalidArgument( 

298 f"{raises=} must be an exception type or tuple of exception types" 

299 ) 

300 if condition: 

301 self._this_example = dataclasses.replace( 

302 self._this_example, raises=raises, reason=reason 

303 ) 

304 return self 

305 

306 def via(self, whence: str, /) -> "example": 

307 """Attach a machine-readable label noting what the origin of this example 

308 was. |example.via| is completely optional and does not change runtime 

309 behavior. 

310 

311 |example.via| is intended to support self-documenting behavior, as well as 

312 tooling which might add (or remove) |@example| decorators automatically. 

313 For example: 

314 

315 .. code-block:: python 

316 

317 # Annotating examples is optional and does not change runtime behavior 

318 @example(...) 

319 @example(...).via("regression test for issue #42") 

320 @example(...).via("discovered failure") 

321 def test(x): 

322 pass 

323 

324 .. note:: 

325 

326 `HypoFuzz <https://hypofuzz.com/>`_ uses |example.via| to tag examples 

327 in the patch of its high-coverage set of explicit inputs, on 

328 `the patches page <https://hypofuzz.com/example-dashboard/#/patches>`_. 

329 """ 

330 if not isinstance(whence, str): 

331 raise InvalidArgument(".via() must be passed a string") 

332 # This is deliberately a no-op at runtime; the tools operate on source code. 

333 return self 

334 

335 

336def seed(seed: Hashable) -> Callable[[TestFunc], TestFunc]: 

337 """ 

338 Seed the randomness for this test. 

339 

340 ``seed`` may be any hashable object. No exact meaning for ``seed`` is provided 

341 other than that for a fixed seed value Hypothesis will produce the same 

342 examples (assuming that there are no other sources of nondeterminisim, such 

343 as timing, hash randomization, or external state). 

344 

345 For example, the following test function and |RuleBasedStateMachine| will 

346 each generate the same series of examples each time they are executed: 

347 

348 .. code-block:: python 

349 

350 @seed(1234) 

351 @given(st.integers()) 

352 def test(n): ... 

353 

354 @seed(6789) 

355 class MyMachine(RuleBasedStateMachine): ... 

356 

357 If using pytest, you can alternatively pass ``--hypothesis-seed`` on the 

358 command line. 

359 

360 Setting a seed overrides |settings.derandomize|, which is designed to enable 

361 deterministic CI tests rather than reproducing observed failures. 

362 

363 Hypothesis will only print the seed which would reproduce a failure if a test 

364 fails in an unexpected way, for instance inside Hypothesis internals. 

365 """ 

366 

367 def accept(test): 

368 test._hypothesis_internal_use_seed = seed 

369 current_settings = getattr(test, "_hypothesis_internal_use_settings", None) 

370 test._hypothesis_internal_use_settings = Settings( 

371 current_settings, database=None 

372 ) 

373 return test 

374 

375 return accept 

376 

377 

378# TODO_DOCS: link to /explanation/choice-sequence 

379 

380 

381def reproduce_failure(version: str, blob: bytes) -> Callable[[TestFunc], TestFunc]: 

382 """ 

383 Run the example corresponding to the binary ``blob`` in order to reproduce a 

384 failure. ``blob`` is a serialized version of the internal input representation 

385 of Hypothesis. 

386 

387 A test decorated with |@reproduce_failure| always runs exactly one example, 

388 which is expected to cause a failure. If the provided ``blob`` does not 

389 cause a failure, Hypothesis will raise |DidNotReproduce|. 

390 

391 Hypothesis will print an |@reproduce_failure| decorator if 

392 |settings.print_blob| is ``True`` (which is the default in CI). 

393 

394 |@reproduce_failure| is intended to be temporarily added to your test suite in 

395 order to reproduce a failure. It is not intended to be a permanent addition to 

396 your test suite. Because of this, no compatibility guarantees are made across 

397 Hypothesis versions, and |@reproduce_failure| will error if used on a different 

398 Hypothesis version than it was created for. 

399 

400 .. seealso:: 

401 

402 See also the :doc:`/tutorial/replaying-failures` tutorial. 

403 """ 

404 

405 def accept(test): 

406 test._hypothesis_internal_use_reproduce_failure = (version, blob) 

407 return test 

408 

409 return accept 

410 

411 

412def reproduction_decorator(choices: Iterable[ChoiceT]) -> str: 

413 return f"@reproduce_failure({__version__!r}, {encode_failure(choices)!r})" 

414 

415 

416def encode_failure(choices: Iterable[ChoiceT]) -> bytes: 

417 blob = choices_to_bytes(choices) 

418 compressed = zlib.compress(blob) 

419 if len(compressed) < len(blob): 

420 blob = b"\1" + compressed 

421 else: 

422 blob = b"\0" + blob 

423 return base64.b64encode(blob) 

424 

425 

426def decode_failure(blob: bytes) -> Sequence[ChoiceT]: 

427 try: 

428 decoded = base64.b64decode(blob) 

429 except Exception: 

430 raise InvalidArgument(f"Invalid base64 encoded string: {blob!r}") from None 

431 

432 prefix = decoded[:1] 

433 if prefix == b"\0": 

434 decoded = decoded[1:] 

435 elif prefix == b"\1": 

436 try: 

437 decoded = zlib.decompress(decoded[1:]) 

438 except zlib.error as err: 

439 raise InvalidArgument( 

440 f"Invalid zlib compression for blob {blob!r}" 

441 ) from err 

442 else: 

443 raise InvalidArgument( 

444 f"Could not decode blob {blob!r}: Invalid start byte {prefix!r}" 

445 ) 

446 

447 choices = choices_from_bytes(decoded) 

448 if choices is None: 

449 raise InvalidArgument(f"Invalid serialized choice sequence for blob {blob!r}") 

450 

451 return choices 

452 

453 

454def _invalid(message, *, exc=InvalidArgument, test, given_kwargs): 

455 @impersonate(test) 

456 def wrapped_test(*arguments, **kwargs): # pragma: no cover # coverage limitation 

457 raise exc(message) 

458 

459 wrapped_test.is_hypothesis_test = True 

460 wrapped_test.hypothesis = HypothesisHandle( 

461 inner_test=test, 

462 _get_fuzz_target=wrapped_test, 

463 _given_kwargs=given_kwargs, 

464 ) 

465 return wrapped_test 

466 

467 

468def is_invalid_test(test, original_sig, given_arguments, given_kwargs): 

469 """Check the arguments to ``@given`` for basic usage constraints. 

470 

471 Most errors are not raised immediately; instead we return a dummy test 

472 function that will raise the appropriate error if it is actually called. 

473 When the user runs a subset of tests (e.g via ``pytest -k``), errors will 

474 only be reported for tests that actually ran. 

475 """ 

476 invalid = partial(_invalid, test=test, given_kwargs=given_kwargs) 

477 

478 if not (given_arguments or given_kwargs): 

479 return invalid("given must be called with at least one argument") 

480 

481 params = list(original_sig.parameters.values()) 

482 pos_params = [p for p in params if p.kind is p.POSITIONAL_OR_KEYWORD] 

483 kwonly_params = [p for p in params if p.kind is p.KEYWORD_ONLY] 

484 if given_arguments and params != pos_params: 

485 return invalid( 

486 "positional arguments to @given are not supported with varargs, " 

487 "varkeywords, positional-only, or keyword-only arguments" 

488 ) 

489 

490 if len(given_arguments) > len(pos_params): 

491 return invalid( 

492 f"Too many positional arguments for {test.__name__}() were passed to " 

493 f"@given - expected at most {len(pos_params)} " 

494 f"arguments, but got {len(given_arguments)} {given_arguments!r}" 

495 ) 

496 

497 if ... in given_arguments: 

498 return invalid( 

499 "... was passed as a positional argument to @given, but may only be " 

500 "passed as a keyword argument or as the sole argument of @given" 

501 ) 

502 

503 if given_arguments and given_kwargs: 

504 return invalid("cannot mix positional and keyword arguments to @given") 

505 extra_kwargs = [ 

506 k for k in given_kwargs if k not in {p.name for p in pos_params + kwonly_params} 

507 ] 

508 if extra_kwargs and (params == [] or params[-1].kind is not params[-1].VAR_KEYWORD): 

509 arg = extra_kwargs[0] 

510 extra = "" 

511 if arg in all_settings: 

512 extra = f". Did you mean @settings({arg}={given_kwargs[arg]!r})?" 

513 return invalid( 

514 f"{test.__name__}() got an unexpected keyword argument {arg!r}, " 

515 f"from `{arg}={given_kwargs[arg]!r}` in @given{extra}" 

516 ) 

517 if any(p.default is not p.empty for p in params): 

518 return invalid("Cannot apply @given to a function with defaults.") 

519 

520 # This case would raise Unsatisfiable *anyway*, but by detecting it here we can 

521 # provide a much more helpful error message for people e.g. using the Ghostwriter. 

522 empty = [ 

523 f"{s!r} (arg {idx})" for idx, s in enumerate(given_arguments) if s is NOTHING 

524 ] + [f"{name}={s!r}" for name, s in given_kwargs.items() if s is NOTHING] 

525 if empty: 

526 strats = "strategies" if len(empty) > 1 else "strategy" 

527 return invalid( 

528 f"Cannot generate examples from empty {strats}: " + ", ".join(empty), 

529 exc=Unsatisfiable, 

530 ) 

531 

532 

533def execute_explicit_examples(state, wrapped_test, arguments, kwargs, original_sig): 

534 assert isinstance(state, StateForActualGivenExecution) 

535 posargs = [ 

536 p.name 

537 for p in original_sig.parameters.values() 

538 if p.kind is p.POSITIONAL_OR_KEYWORD 

539 ] 

540 

541 for example in reversed(getattr(wrapped_test, "hypothesis_explicit_examples", ())): 

542 assert isinstance(example, Example) 

543 # All of this validation is to check that @example() got "the same" arguments 

544 # as @given, i.e. corresponding to the same parameters, even though they might 

545 # be any mixture of positional and keyword arguments. 

546 if example.args: 

547 assert not example.kwargs 

548 if any( 

549 p.kind is p.POSITIONAL_ONLY for p in original_sig.parameters.values() 

550 ): 

551 raise InvalidArgument( 

552 "Cannot pass positional arguments to @example() when decorating " 

553 "a test function which has positional-only parameters." 

554 ) 

555 if len(example.args) > len(posargs): 

556 raise InvalidArgument( 

557 "example has too many arguments for test. Expected at most " 

558 f"{len(posargs)} but got {len(example.args)}" 

559 ) 

560 example_kwargs = dict(zip(posargs[-len(example.args) :], example.args)) 

561 else: 

562 example_kwargs = dict(example.kwargs) 

563 given_kws = ", ".join( 

564 repr(k) for k in sorted(wrapped_test.hypothesis._given_kwargs) 

565 ) 

566 example_kws = ", ".join(repr(k) for k in sorted(example_kwargs)) 

567 if given_kws != example_kws: 

568 raise InvalidArgument( 

569 f"Inconsistent args: @given() got strategies for {given_kws}, " 

570 f"but @example() got arguments for {example_kws}" 

571 ) from None 

572 

573 # This is certainly true because the example_kwargs exactly match the params 

574 # reserved by @given(), which are then remove from the function signature. 

575 assert set(example_kwargs).isdisjoint(kwargs) 

576 example_kwargs.update(kwargs) 

577 

578 if Phase.explicit not in state.settings.phases: 

579 continue 

580 

581 with local_settings(state.settings): 

582 fragments_reported = [] 

583 empty_data = ConjectureData.for_choices([]) 

584 try: 

585 execute_example = partial( 

586 state.execute_once, 

587 empty_data, 

588 is_final=True, 

589 print_example=True, 

590 example_kwargs=example_kwargs, 

591 ) 

592 with with_reporter(fragments_reported.append): 

593 if example.raises is None: 

594 execute_example() 

595 else: 

596 # @example(...).xfail(...) 

597 bits = ", ".join(nicerepr(x) for x in arguments) + ", ".join( 

598 f"{k}={nicerepr(v)}" for k, v in example_kwargs.items() 

599 ) 

600 try: 

601 execute_example() 

602 except failure_exceptions_to_catch() as err: 

603 if not isinstance(err, example.raises): 

604 raise 

605 # Save a string form of this example; we'll warn if it's 

606 # ever generated by the strategy (which can't be xfailed) 

607 state.xfail_example_reprs.add( 

608 repr_call(state.test, arguments, example_kwargs) 

609 ) 

610 except example.raises as err: 

611 # We'd usually check this as early as possible, but it's 

612 # possible for failure_exceptions_to_catch() to grow when 

613 # e.g. pytest is imported between import- and test-time. 

614 raise InvalidArgument( 

615 f"@example({bits}) raised an expected {err!r}, " 

616 "but Hypothesis does not treat this as a test failure" 

617 ) from err 

618 else: 

619 # Unexpectedly passing; always raise an error in this case. 

620 reason = f" because {example.reason}" * bool(example.reason) 

621 if example.raises is BaseException: 

622 name = "exception" # special-case no raises= arg 

623 elif not isinstance(example.raises, tuple): 

624 name = example.raises.__name__ 

625 elif len(example.raises) == 1: 

626 name = example.raises[0].__name__ 

627 else: 

628 name = ( 

629 ", ".join(ex.__name__ for ex in example.raises[:-1]) 

630 + f", or {example.raises[-1].__name__}" 

631 ) 

632 vowel = name.upper()[0] in "AEIOU" 

633 raise AssertionError( 

634 f"Expected a{'n' * vowel} {name} from @example({bits})" 

635 f"{reason}, but no exception was raised." 

636 ) 

637 except UnsatisfiedAssumption: 

638 # Odd though it seems, we deliberately support explicit examples that 

639 # are then rejected by a call to `assume()`. As well as iterative 

640 # development, this is rather useful to replay Hypothesis' part of 

641 # a saved failure when other arguments are supplied by e.g. pytest. 

642 # See https://github.com/HypothesisWorks/hypothesis/issues/2125 

643 with contextlib.suppress(StopTest): 

644 empty_data.conclude_test(Status.INVALID) 

645 except BaseException as err: 

646 # In order to support reporting of multiple failing examples, we yield 

647 # each of the (report text, error) pairs we find back to the top-level 

648 # runner. This also ensures that user-facing stack traces have as few 

649 # frames of Hypothesis internals as possible. 

650 err = err.with_traceback(get_trimmed_traceback()) 

651 

652 # One user error - whether misunderstanding or typo - we've seen a few 

653 # times is to pass strategies to @example() where values are expected. 

654 # Checking is easy, and false-positives not much of a problem, so: 

655 if isinstance(err, failure_exceptions_to_catch()) and any( 

656 isinstance(arg, SearchStrategy) 

657 for arg in example.args + tuple(example.kwargs.values()) 

658 ): 

659 new = HypothesisWarning( 

660 "The @example() decorator expects to be passed values, but " 

661 "you passed strategies instead. See https://hypothesis." 

662 "readthedocs.io/en/latest/reference/api.html#hypothesis" 

663 ".example for details." 

664 ) 

665 new.__cause__ = err 

666 err = new 

667 

668 with contextlib.suppress(StopTest): 

669 empty_data.conclude_test(Status.INVALID) 

670 yield (fragments_reported, err) 

671 if ( 

672 state.settings.report_multiple_bugs 

673 and pytest_shows_exceptiongroups 

674 and isinstance(err, failure_exceptions_to_catch()) 

675 and not isinstance(err, skip_exceptions_to_reraise()) 

676 ): 

677 continue 

678 break 

679 finally: 

680 if fragments_reported: 

681 assert fragments_reported[0].startswith("Falsifying example") 

682 fragments_reported[0] = fragments_reported[0].replace( 

683 "Falsifying example", "Falsifying explicit example", 1 

684 ) 

685 

686 empty_data.freeze() 

687 if observability_enabled(): 

688 tc = make_testcase( 

689 run_start=state._start_timestamp, 

690 property=state.test_identifier, 

691 data=empty_data, 

692 how_generated="explicit example", 

693 representation=state._string_repr, 

694 timing=state._timing_features, 

695 ) 

696 deliver_observation(tc) 

697 

698 if fragments_reported: 

699 verbose_report(fragments_reported[0].replace("Falsifying", "Trying", 1)) 

700 for f in fragments_reported[1:]: 

701 verbose_report(f) 

702 

703 

704def get_random_for_wrapped_test(test, wrapped_test): 

705 settings = wrapped_test._hypothesis_internal_use_settings 

706 wrapped_test._hypothesis_internal_use_generated_seed = None 

707 

708 if wrapped_test._hypothesis_internal_use_seed is not None: 

709 return Random(wrapped_test._hypothesis_internal_use_seed) 

710 elif settings.derandomize: 

711 return Random(int_from_bytes(function_digest(test))) 

712 elif global_force_seed is not None: 

713 return Random(global_force_seed) 

714 else: 

715 if threadlocal._hypothesis_global_random is None: # pragma: no cover 

716 threadlocal._hypothesis_global_random = Random() 

717 seed = threadlocal._hypothesis_global_random.getrandbits(128) 

718 wrapped_test._hypothesis_internal_use_generated_seed = seed 

719 return Random(seed) 

720 

721 

722@dataclass 

723class Stuff: 

724 selfy: Any 

725 args: tuple 

726 kwargs: dict 

727 given_kwargs: dict 

728 

729 

730def process_arguments_to_given( 

731 wrapped_test: Any, 

732 arguments: Sequence[object], 

733 kwargs: dict[str, object], 

734 given_kwargs: dict[str, SearchStrategy], 

735 params: dict[str, Parameter], 

736) -> tuple[Sequence[object], dict[str, object], Stuff]: 

737 selfy = None 

738 arguments, kwargs = convert_positional_arguments(wrapped_test, arguments, kwargs) 

739 

740 # If the test function is a method of some kind, the bound object 

741 # will be the first named argument if there are any, otherwise the 

742 # first vararg (if any). 

743 posargs = [p.name for p in params.values() if p.kind is p.POSITIONAL_OR_KEYWORD] 

744 if posargs: 

745 selfy = kwargs.get(posargs[0]) 

746 elif arguments: 

747 selfy = arguments[0] 

748 

749 # Ensure that we don't mistake mocks for self here. 

750 # This can cause the mock to be used as the test runner. 

751 if is_mock(selfy): 

752 selfy = None 

753 

754 arguments = tuple(arguments) 

755 

756 with ensure_free_stackframes(): 

757 for k, s in given_kwargs.items(): 

758 check_strategy(s, name=k) 

759 s.validate() 

760 

761 stuff = Stuff(selfy=selfy, args=arguments, kwargs=kwargs, given_kwargs=given_kwargs) 

762 

763 return arguments, kwargs, stuff 

764 

765 

766def skip_exceptions_to_reraise(): 

767 """Return a tuple of exceptions meaning 'skip this test', to re-raise. 

768 

769 This is intended to cover most common test runners; if you would 

770 like another to be added please open an issue or pull request adding 

771 it to this function and to tests/cover/test_lazy_import.py 

772 """ 

773 # This is a set because nose may simply re-export unittest.SkipTest 

774 exceptions = set() 

775 # We use this sys.modules trick to avoid importing libraries - 

776 # you can't be an instance of a type from an unimported module! 

777 # This is fast enough that we don't need to cache the result, 

778 # and more importantly it avoids possible side-effects :-) 

779 if "unittest" in sys.modules: 

780 exceptions.add(sys.modules["unittest"].SkipTest) 

781 if "unittest2" in sys.modules: 

782 exceptions.add(sys.modules["unittest2"].SkipTest) 

783 if "nose" in sys.modules: 

784 exceptions.add(sys.modules["nose"].SkipTest) 

785 if "_pytest.outcomes" in sys.modules: 

786 exceptions.add(sys.modules["_pytest.outcomes"].Skipped) 

787 return tuple(sorted(exceptions, key=str)) 

788 

789 

790def failure_exceptions_to_catch() -> tuple[type[BaseException], ...]: 

791 """Return a tuple of exceptions meaning 'this test has failed', to catch. 

792 

793 This is intended to cover most common test runners; if you would 

794 like another to be added please open an issue or pull request. 

795 """ 

796 # While SystemExit and GeneratorExit are instances of BaseException, we also 

797 # expect them to be deterministic - unlike KeyboardInterrupt - and so we treat 

798 # them as standard exceptions, check for flakiness, etc. 

799 # See https://github.com/HypothesisWorks/hypothesis/issues/2223 for details. 

800 exceptions = [Exception, SystemExit, GeneratorExit] 

801 if "_pytest.outcomes" in sys.modules: 

802 exceptions.append(sys.modules["_pytest.outcomes"].Failed) 

803 return tuple(exceptions) 

804 

805 

806def new_given_signature(original_sig, given_kwargs): 

807 """Make an updated signature for the wrapped test.""" 

808 return original_sig.replace( 

809 parameters=[ 

810 p 

811 for p in original_sig.parameters.values() 

812 if not ( 

813 p.name in given_kwargs 

814 and p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY) 

815 ) 

816 ], 

817 return_annotation=None, 

818 ) 

819 

820 

821def default_executor(data, function): 

822 return function(data) 

823 

824 

825def get_executor(runner): 

826 try: 

827 execute_example = runner.execute_example 

828 except AttributeError: 

829 pass 

830 else: 

831 return lambda data, function: execute_example(partial(function, data)) 

832 

833 if hasattr(runner, "setup_example") or hasattr(runner, "teardown_example"): 

834 setup = getattr(runner, "setup_example", None) or (lambda: None) 

835 teardown = getattr(runner, "teardown_example", None) or (lambda ex: None) 

836 

837 def execute(data, function): 

838 token = None 

839 try: 

840 token = setup() 

841 return function(data) 

842 finally: 

843 teardown(token) 

844 

845 return execute 

846 

847 return default_executor 

848 

849 

850# This function is a crude solution, a better way of resolving it would probably 

851# be to rewrite a bunch of exception handlers to use except*. 

852T = TypeVar("T", bound=BaseException) 

853 

854 

855def _flatten_group(excgroup: BaseExceptionGroup[T]) -> list[T]: 

856 found_exceptions: list[T] = [] 

857 for exc in excgroup.exceptions: 

858 if isinstance(exc, BaseExceptionGroup): 

859 found_exceptions.extend(_flatten_group(exc)) 

860 else: 

861 found_exceptions.append(exc) 

862 return found_exceptions 

863 

864 

865@contextlib.contextmanager 

866def unwrap_markers_from_group() -> Generator[None, None, None]: 

867 try: 

868 yield 

869 except BaseExceptionGroup as excgroup: 

870 frozen_exceptions, non_frozen_exceptions = excgroup.split(Frozen) 

871 

872 # group only contains Frozen, reraise the group 

873 # it doesn't matter what we raise, since any exceptions get disregarded 

874 # and reraised as StopTest if data got frozen. 

875 if non_frozen_exceptions is None: 

876 raise 

877 # in all other cases they are discarded 

878 

879 # Can RewindRecursive end up in this group? 

880 _, user_exceptions = non_frozen_exceptions.split( 

881 lambda e: isinstance(e, (StopTest, HypothesisException)) 

882 ) 

883 

884 # this might contain marker exceptions, or internal errors, but not frozen. 

885 if user_exceptions is not None: 

886 raise 

887 

888 # single marker exception - reraise it 

889 flattened_non_frozen_exceptions: list[BaseException] = _flatten_group( 

890 non_frozen_exceptions 

891 ) 

892 if len(flattened_non_frozen_exceptions) == 1: 

893 e = flattened_non_frozen_exceptions[0] 

894 # preserve the cause of the original exception to not hinder debugging 

895 # note that __context__ is still lost though 

896 raise e from e.__cause__ 

897 

898 # multiple marker exceptions. If we re-raise the whole group we break 

899 # a bunch of logic so ....? 

900 stoptests, non_stoptests = non_frozen_exceptions.split(StopTest) 

901 

902 # TODO: stoptest+hypothesisexception ...? Is it possible? If so, what do? 

903 

904 if non_stoptests: 

905 # TODO: multiple marker exceptions is easy to produce, but the logic in the 

906 # engine does not handle it... so we just reraise the first one for now. 

907 e = _flatten_group(non_stoptests)[0] 

908 raise e from e.__cause__ 

909 assert stoptests is not None 

910 

911 # multiple stoptests: raising the one with the lowest testcounter 

912 raise min(_flatten_group(stoptests), key=lambda s_e: s_e.testcounter) 

913 

914 

915class StateForActualGivenExecution: 

916 def __init__( 

917 self, stuff, test, settings, random, wrapped_test, *, thread_overlap=None 

918 ): 

919 self.stuff = stuff 

920 self.test = test 

921 self.settings = settings 

922 self.random = random 

923 self.wrapped_test = wrapped_test 

924 self.thread_overlap = {} if thread_overlap is None else thread_overlap 

925 

926 self.test_runner = get_executor(stuff.selfy) 

927 self.print_given_args = getattr( 

928 wrapped_test, "_hypothesis_internal_print_given_args", True 

929 ) 

930 

931 self.last_exception = None 

932 self.falsifying_examples = () 

933 self.ever_executed = False 

934 self.xfail_example_reprs = set() 

935 self.files_to_propagate = set() 

936 self.failed_normally = False 

937 self.failed_due_to_deadline = False 

938 

939 self.explain_traces = defaultdict(set) 

940 self._start_timestamp = time.time() 

941 self._string_repr = "" 

942 self._timing_features = {} 

943 

944 @property 

945 def test_identifier(self): 

946 return getattr( 

947 current_pytest_item.value, "nodeid", None 

948 ) or get_pretty_function_description(self.wrapped_test) 

949 

950 def _should_trace(self): 

951 # NOTE: we explicitly support monkeypatching this. Keep the namespace 

952 # access intact. 

953 _trace_obs = ( 

954 observability_enabled() and observability.OBSERVABILITY_COLLECT_COVERAGE 

955 ) 

956 _trace_failure = ( 

957 self.failed_normally 

958 and not self.failed_due_to_deadline 

959 and {Phase.shrink, Phase.explain}.issubset(self.settings.phases) 

960 ) 

961 return _trace_obs or _trace_failure 

962 

963 def execute_once( 

964 self, 

965 data, 

966 *, 

967 print_example=False, 

968 is_final=False, 

969 expected_failure=None, 

970 example_kwargs=None, 

971 ): 

972 """Run the test function once, using ``data`` as input. 

973 

974 If the test raises an exception, it will propagate through to the 

975 caller of this method. Depending on its type, this could represent 

976 an ordinary test failure, or a fatal error, or a control exception. 

977 

978 If this method returns normally, the test might have passed, or 

979 it might have placed ``data`` in an unsuccessful state and then 

980 swallowed the corresponding control exception. 

981 """ 

982 

983 self.ever_executed = True 

984 

985 self._string_repr = "" 

986 text_repr = None 

987 if self.settings.deadline is None and not observability_enabled(): 

988 

989 @proxies(self.test) 

990 def test(*args, **kwargs): 

991 with unwrap_markers_from_group(): 

992 # NOTE: For compatibility with Python 3.9's LL(1) 

993 # parser, this is written as a nested with-statement, 

994 # instead of a compound one. 

995 with ensure_free_stackframes(): 

996 return self.test(*args, **kwargs) 

997 

998 else: 

999 

1000 @proxies(self.test) 

1001 def test(*args, **kwargs): 

1002 arg_drawtime = math.fsum(data.draw_times.values()) 

1003 arg_stateful = math.fsum(data._stateful_run_times.values()) 

1004 arg_gctime = gc_cumulative_time() 

1005 start = time.perf_counter() 

1006 try: 

1007 with unwrap_markers_from_group(): 

1008 # NOTE: For compatibility with Python 3.9's LL(1) 

1009 # parser, this is written as a nested with-statement, 

1010 # instead of a compound one. 

1011 with ensure_free_stackframes(): 

1012 result = self.test(*args, **kwargs) 

1013 finally: 

1014 finish = time.perf_counter() 

1015 in_drawtime = math.fsum(data.draw_times.values()) - arg_drawtime 

1016 in_stateful = ( 

1017 math.fsum(data._stateful_run_times.values()) - arg_stateful 

1018 ) 

1019 in_gctime = gc_cumulative_time() - arg_gctime 

1020 runtime = finish - start - in_drawtime - in_stateful - in_gctime 

1021 self._timing_features = { 

1022 "execute:test": runtime, 

1023 "overall:gc": in_gctime, 

1024 **data.draw_times, 

1025 **data._stateful_run_times, 

1026 } 

1027 

1028 if ( 

1029 (current_deadline := self.settings.deadline) is not None 

1030 # we disable the deadline check under concurrent threads, since 

1031 # cpython may switch away from a thread for arbitrarily long. 

1032 and not self.thread_overlap.get(threading.get_ident(), False) 

1033 ): 

1034 if not is_final: 

1035 current_deadline = (current_deadline // 4) * 5 

1036 if runtime >= current_deadline.total_seconds(): 

1037 raise DeadlineExceeded( 

1038 datetime.timedelta(seconds=runtime), self.settings.deadline 

1039 ) 

1040 return result 

1041 

1042 def run(data: ConjectureData) -> None: 

1043 # Set up dynamic context needed by a single test run. 

1044 if self.stuff.selfy is not None: 

1045 data.hypothesis_runner = self.stuff.selfy 

1046 # Generate all arguments to the test function. 

1047 args = self.stuff.args 

1048 kwargs = dict(self.stuff.kwargs) 

1049 if example_kwargs is None: 

1050 kw, argslices = context.prep_args_kwargs_from_strategies( 

1051 self.stuff.given_kwargs 

1052 ) 

1053 else: 

1054 kw = example_kwargs 

1055 argslices = {} 

1056 kwargs.update(kw) 

1057 if expected_failure is not None: 

1058 nonlocal text_repr 

1059 text_repr = repr_call(test, args, kwargs) 

1060 

1061 if print_example or current_verbosity() >= Verbosity.verbose: 

1062 printer = RepresentationPrinter(context=context) 

1063 if print_example: 

1064 printer.text("Falsifying example:") 

1065 else: 

1066 printer.text("Trying example:") 

1067 

1068 if self.print_given_args: 

1069 printer.text(" ") 

1070 printer.repr_call( 

1071 test.__name__, 

1072 args, 

1073 kwargs, 

1074 force_split=True, 

1075 arg_slices=argslices, 

1076 leading_comment=( 

1077 "# " + context.data.slice_comments[(0, 0)] 

1078 if (0, 0) in context.data.slice_comments 

1079 else None 

1080 ), 

1081 avoid_realization=data.provider.avoid_realization, 

1082 ) 

1083 report(printer.getvalue()) 

1084 

1085 if observability_enabled(): 

1086 printer = RepresentationPrinter(context=context) 

1087 printer.repr_call( 

1088 test.__name__, 

1089 args, 

1090 kwargs, 

1091 force_split=True, 

1092 arg_slices=argslices, 

1093 leading_comment=( 

1094 "# " + context.data.slice_comments[(0, 0)] 

1095 if (0, 0) in context.data.slice_comments 

1096 else None 

1097 ), 

1098 avoid_realization=data.provider.avoid_realization, 

1099 ) 

1100 self._string_repr = printer.getvalue() 

1101 

1102 try: 

1103 return test(*args, **kwargs) 

1104 except TypeError as e: 

1105 # If we sampled from a sequence of strategies, AND failed with a 

1106 # TypeError, *AND that exception mentions SearchStrategy*, add a note: 

1107 if ( 

1108 "SearchStrategy" in str(e) 

1109 and data._sampled_from_all_strategies_elements_message is not None 

1110 ): 

1111 msg, format_arg = data._sampled_from_all_strategies_elements_message 

1112 add_note(e, msg.format(format_arg)) 

1113 raise 

1114 finally: 

1115 if data._stateful_repr_parts is not None: 

1116 self._string_repr = "\n".join(data._stateful_repr_parts) 

1117 

1118 if observability_enabled(): 

1119 printer = RepresentationPrinter(context=context) 

1120 for name, value in data._observability_args.items(): 

1121 if name.startswith("generate:Draw "): 

1122 try: 

1123 value = data.provider.realize(value) 

1124 except BackendCannotProceed: # pragma: no cover 

1125 value = "<backend failed to realize symbolic>" 

1126 printer.text(f"\n{name.removeprefix('generate:')}: ") 

1127 printer.pretty(value) 

1128 

1129 self._string_repr += printer.getvalue() 

1130 

1131 # self.test_runner can include the execute_example method, or setup/teardown 

1132 # _example, so it's important to get the PRNG and build context in place first. 

1133 # 

1134 # NOTE: For compatibility with Python 3.9's LL(1) parser, this is written as 

1135 # three nested with-statements, instead of one compound statement. 

1136 with local_settings(self.settings): 

1137 with deterministic_PRNG(): 

1138 with BuildContext( 

1139 data, is_final=is_final, wrapped_test=self.wrapped_test 

1140 ) as context: 

1141 # providers may throw in per_case_context_fn, and we'd like 

1142 # `result` to still be set in these cases. 

1143 result = None 

1144 with data.provider.per_test_case_context_manager(): 

1145 # Run the test function once, via the executor hook. 

1146 # In most cases this will delegate straight to `run(data)`. 

1147 result = self.test_runner(data, run) 

1148 

1149 # If a failure was expected, it should have been raised already, so 

1150 # instead raise an appropriate diagnostic error. 

1151 if expected_failure is not None: 

1152 exception, traceback = expected_failure 

1153 if isinstance(exception, DeadlineExceeded) and ( 

1154 runtime_secs := math.fsum( 

1155 v 

1156 for k, v in self._timing_features.items() 

1157 if k.startswith("execute:") 

1158 ) 

1159 ): 

1160 report( 

1161 "Unreliable test timings! On an initial run, this " 

1162 f"test took {exception.runtime.total_seconds() * 1000:.2f}ms, " 

1163 "which exceeded the deadline of " 

1164 f"{self.settings.deadline.total_seconds() * 1000:.2f}ms, but " 

1165 f"on a subsequent run it took {runtime_secs * 1000:.2f} ms, " 

1166 "which did not. If you expect this sort of " 

1167 "variability in your test timings, consider turning " 

1168 "deadlines off for this test by setting deadline=None." 

1169 ) 

1170 else: 

1171 report("Failed to reproduce exception. Expected: \n" + traceback) 

1172 raise FlakyFailure( 

1173 f"Hypothesis {text_repr} produces unreliable results: " 

1174 "Falsified on the first call but did not on a subsequent one", 

1175 [exception], 

1176 ) 

1177 return result 

1178 

1179 def _flaky_replay_to_failure( 

1180 self, err: FlakyReplay, context: BaseException 

1181 ) -> FlakyFailure: 

1182 # Note that in the mark_interesting case, _context_ itself 

1183 # is part of err._interesting_examples - but it's not in 

1184 # _runner.interesting_examples - this is fine, as the context 

1185 # (i.e., immediate exception) is appended. 

1186 interesting_examples = [ 

1187 self._runner.interesting_examples[origin] 

1188 for origin in err._interesting_origins 

1189 if origin in self._runner.interesting_examples 

1190 ] 

1191 exceptions = [result.expected_exception for result in interesting_examples] 

1192 exceptions.append(context) # the immediate exception 

1193 return FlakyFailure(err.reason, exceptions) 

1194 

1195 def _execute_once_for_engine(self, data: ConjectureData) -> None: 

1196 """Wrapper around ``execute_once`` that intercepts test failure 

1197 exceptions and single-test control exceptions, and turns them into 

1198 appropriate method calls to `data` instead. 

1199 

1200 This allows the engine to assume that any exception other than 

1201 ``StopTest`` must be a fatal error, and should stop the entire engine. 

1202 """ 

1203 trace: Trace = set() 

1204 try: 

1205 with Tracer(should_trace=self._should_trace()) as tracer: 

1206 try: 

1207 result = self.execute_once(data) 

1208 if ( 

1209 data.status == Status.VALID and tracer.branches 

1210 ): # pragma: no cover 

1211 # This is in fact covered by our *non-coverage* tests, but due 

1212 # to the settrace() contention *not* by our coverage tests. 

1213 self.explain_traces[None].add(frozenset(tracer.branches)) 

1214 finally: 

1215 trace = tracer.branches 

1216 if result is not None: 

1217 fail_health_check( 

1218 self.settings, 

1219 "Tests run under @given should return None, but " 

1220 f"{self.test.__name__} returned {result!r} instead.", 

1221 HealthCheck.return_value, 

1222 ) 

1223 except UnsatisfiedAssumption as e: 

1224 # An "assume" check failed, so instead we inform the engine that 

1225 # this test run was invalid. 

1226 try: 

1227 data.mark_invalid(e.reason) 

1228 except FlakyReplay as err: 

1229 # This was unexpected, meaning that the assume was flaky. 

1230 # Report it as such. 

1231 raise self._flaky_replay_to_failure(err, e) from None 

1232 except (StopTest, BackendCannotProceed): 

1233 # The engine knows how to handle this control exception, so it's 

1234 # OK to re-raise it. 

1235 raise 

1236 except ( 

1237 FailedHealthCheck, 

1238 *skip_exceptions_to_reraise(), 

1239 ): 

1240 # These are fatal errors or control exceptions that should stop the 

1241 # engine, so we re-raise them. 

1242 raise 

1243 except failure_exceptions_to_catch() as e: 

1244 # If an unhandled (i.e., non-Hypothesis) error was raised by 

1245 # Hypothesis-internal code, re-raise it as a fatal error instead 

1246 # of treating it as a test failure. 

1247 if isinstance(e, BaseExceptionGroup) and len(e.exceptions) == 1: 

1248 # When a naked exception is implicitly wrapped in an ExceptionGroup 

1249 # due to a re-raising "except*", the ExceptionGroup is constructed in 

1250 # the caller's stack frame (see #4183). This workaround is specifically 

1251 # for implicit wrapping of naked exceptions by "except*", since explicit 

1252 # raising of ExceptionGroup gets the proper traceback in the first place 

1253 # - there's no need to handle hierarchical groups here, at least if no 

1254 # such implicit wrapping happens inside hypothesis code (we only care 

1255 # about the hypothesis-or-not distinction). 

1256 # 

1257 # 01-25-2025: this was patched to give the correct 

1258 # stacktrace in cpython https://github.com/python/cpython/issues/128799. 

1259 # can remove once python3.11 is EOL. 

1260 tb = e.exceptions[0].__traceback__ or e.__traceback__ 

1261 else: 

1262 tb = e.__traceback__ 

1263 filepath = traceback.extract_tb(tb)[-1][0] 

1264 if ( 

1265 is_hypothesis_file(filepath) 

1266 and not isinstance(e, HypothesisException) 

1267 # We expect backend authors to use the provider_conformance test 

1268 # to test their backends. If an error occurs there, it is probably 

1269 # from their backend, and we would like to treat it as a standard 

1270 # error, not a hypothesis-internal error. 

1271 and not filepath.endswith( 

1272 f"internal{os.sep}conjecture{os.sep}provider_conformance.py" 

1273 ) 

1274 ): 

1275 raise 

1276 

1277 if data.frozen: 

1278 # This can happen if an error occurred in a finally 

1279 # block somewhere, suppressing our original StopTest. 

1280 # We raise a new one here to resume normal operation. 

1281 raise StopTest(data.testcounter) from e 

1282 else: 

1283 # The test failed by raising an exception, so we inform the 

1284 # engine that this test run was interesting. This is the normal 

1285 # path for test runs that fail. 

1286 tb = get_trimmed_traceback() 

1287 data.expected_traceback = format_exception(e, tb) 

1288 data.expected_exception = e 

1289 assert data.expected_traceback is not None # for mypy 

1290 verbose_report(data.expected_traceback) 

1291 

1292 self.failed_normally = True 

1293 

1294 interesting_origin = InterestingOrigin.from_exception(e) 

1295 if trace: # pragma: no cover 

1296 # Trace collection is explicitly disabled under coverage. 

1297 self.explain_traces[interesting_origin].add(frozenset(trace)) 

1298 if interesting_origin.exc_type == DeadlineExceeded: 

1299 self.failed_due_to_deadline = True 

1300 self.explain_traces.clear() 

1301 try: 

1302 data.mark_interesting(interesting_origin) 

1303 except FlakyReplay as err: 

1304 raise self._flaky_replay_to_failure(err, e) from None 

1305 

1306 finally: 

1307 # Conditional here so we can save some time constructing the payload; in 

1308 # other cases (without coverage) it's cheap enough to do that regardless. 

1309 if observability_enabled(): 

1310 if runner := getattr(self, "_runner", None): 

1311 phase = runner._current_phase 

1312 else: # pragma: no cover # in case of messing with internals 

1313 if self.failed_normally or self.failed_due_to_deadline: 

1314 phase = "shrink" 

1315 else: 

1316 phase = "unknown" 

1317 backend_desc = f", using backend={self.settings.backend!r}" * ( 

1318 self.settings.backend != "hypothesis" 

1319 and not getattr(runner, "_switch_to_hypothesis_provider", False) 

1320 ) 

1321 try: 

1322 data._observability_args = data.provider.realize( 

1323 data._observability_args 

1324 ) 

1325 except BackendCannotProceed: 

1326 data._observability_args = {} 

1327 

1328 try: 

1329 self._string_repr = data.provider.realize(self._string_repr) 

1330 except BackendCannotProceed: 

1331 self._string_repr = "<backend failed to realize symbolic arguments>" 

1332 

1333 try: 

1334 data.events = data.provider.realize(data.events) 

1335 except BackendCannotProceed: 

1336 data.events = {} 

1337 

1338 data.freeze() 

1339 tc = make_testcase( 

1340 run_start=self._start_timestamp, 

1341 property=self.test_identifier, 

1342 data=data, 

1343 how_generated=f"during {phase} phase{backend_desc}", 

1344 representation=self._string_repr, 

1345 arguments=data._observability_args, 

1346 timing=self._timing_features, 

1347 coverage=tractable_coverage_report(trace) or None, 

1348 phase=phase, 

1349 backend_metadata=data.provider.observe_test_case(), 

1350 ) 

1351 deliver_observation(tc) 

1352 

1353 for msg in data.provider.observe_information_messages( 

1354 lifetime="test_case" 

1355 ): 

1356 self._deliver_information_message(**msg) 

1357 self._timing_features = {} 

1358 

1359 def _deliver_information_message( 

1360 self, *, type: InfoObservationType, title: str, content: Union[str, dict] 

1361 ) -> None: 

1362 deliver_observation( 

1363 InfoObservation( 

1364 type=type, 

1365 run_start=self._start_timestamp, 

1366 property=self.test_identifier, 

1367 title=title, 

1368 content=content, 

1369 ) 

1370 ) 

1371 

1372 def run_engine(self): 

1373 """Run the test function many times, on database input and generated 

1374 input, using the Conjecture engine. 

1375 """ 

1376 # Tell pytest to omit the body of this function from tracebacks 

1377 __tracebackhide__ = True 

1378 try: 

1379 database_key = self.wrapped_test._hypothesis_internal_database_key 

1380 except AttributeError: 

1381 if global_force_seed is None: 

1382 database_key = function_digest(self.test) 

1383 else: 

1384 database_key = None 

1385 

1386 runner = self._runner = ConjectureRunner( 

1387 self._execute_once_for_engine, 

1388 settings=self.settings, 

1389 random=self.random, 

1390 database_key=database_key, 

1391 thread_overlap=self.thread_overlap, 

1392 ) 

1393 # Use the Conjecture engine to run the test function many times 

1394 # on different inputs. 

1395 runner.run() 

1396 note_statistics(runner.statistics) 

1397 if observability_enabled(): 

1398 self._deliver_information_message( 

1399 type="info", 

1400 title="Hypothesis Statistics", 

1401 content=describe_statistics(runner.statistics), 

1402 ) 

1403 for msg in ( 

1404 p if isinstance(p := runner.provider, PrimitiveProvider) else p(None) 

1405 ).observe_information_messages(lifetime="test_function"): 

1406 self._deliver_information_message(**msg) 

1407 

1408 if runner.call_count == 0: 

1409 return 

1410 if runner.interesting_examples: 

1411 self.falsifying_examples = sorted( 

1412 runner.interesting_examples.values(), 

1413 key=lambda d: sort_key(d.nodes), 

1414 reverse=True, 

1415 ) 

1416 else: 

1417 if runner.valid_examples == 0: 

1418 explanations = [] 

1419 # use a somewhat arbitrary cutoff to avoid recommending spurious 

1420 # fixes. 

1421 # eg, a few invalid examples from internal filters when the 

1422 # problem is the user generating large inputs, or a 

1423 # few overruns during internal mutation when the problem is 

1424 # impossible user filters/assumes. 

1425 if runner.invalid_examples > min(20, runner.call_count // 5): 

1426 explanations.append( 

1427 f"{runner.invalid_examples} of {runner.call_count} " 

1428 "examples failed a .filter() or assume() condition. Try " 

1429 "making your filters or assumes less strict, or rewrite " 

1430 "using strategy parameters: " 

1431 "st.integers().filter(lambda x: x > 0) fails less often " 

1432 "(that is, never) when rewritten as st.integers(min_value=1)." 

1433 ) 

1434 if runner.overrun_examples > min(20, runner.call_count // 5): 

1435 explanations.append( 

1436 f"{runner.overrun_examples} of {runner.call_count} " 

1437 "examples were too large to finish generating; try " 

1438 "reducing the typical size of your inputs?" 

1439 ) 

1440 rep = get_pretty_function_description(self.test) 

1441 raise Unsatisfiable( 

1442 f"Unable to satisfy assumptions of {rep}. " 

1443 f"{' Also, '.join(explanations)}" 

1444 ) 

1445 

1446 # If we have not traced executions, warn about that now (but only when 

1447 # we'd expect to do so reliably, i.e. on CPython>=3.12) 

1448 if ( 

1449 hasattr(sys, "monitoring") 

1450 and not PYPY 

1451 and self._should_trace() 

1452 and not Tracer.can_trace() 

1453 ): # pragma: no cover 

1454 # actually covered by our tests, but only on >= 3.12 

1455 warnings.warn( 

1456 "avoiding tracing test function because tool id " 

1457 f"{MONITORING_TOOL_ID} is already taken by tool " 

1458 f"{sys.monitoring.get_tool(MONITORING_TOOL_ID)}.", 

1459 HypothesisWarning, 

1460 stacklevel=3, 

1461 ) 

1462 

1463 if not self.falsifying_examples: 

1464 return 

1465 elif not (self.settings.report_multiple_bugs and pytest_shows_exceptiongroups): 

1466 # Pretend that we only found one failure, by discarding the others. 

1467 del self.falsifying_examples[:-1] 

1468 

1469 # The engine found one or more failures, so we need to reproduce and 

1470 # report them. 

1471 

1472 errors_to_report = [] 

1473 

1474 report_lines = describe_targets(runner.best_observed_targets) 

1475 if report_lines: 

1476 report_lines.append("") 

1477 

1478 explanations = explanatory_lines(self.explain_traces, self.settings) 

1479 for falsifying_example in self.falsifying_examples: 

1480 fragments = [] 

1481 

1482 ran_example = runner.new_conjecture_data( 

1483 falsifying_example.choices, max_choices=len(falsifying_example.choices) 

1484 ) 

1485 ran_example.slice_comments = falsifying_example.slice_comments 

1486 tb = None 

1487 origin = None 

1488 assert falsifying_example.expected_exception is not None 

1489 assert falsifying_example.expected_traceback is not None 

1490 try: 

1491 with with_reporter(fragments.append): 

1492 self.execute_once( 

1493 ran_example, 

1494 print_example=True, 

1495 is_final=True, 

1496 expected_failure=( 

1497 falsifying_example.expected_exception, 

1498 falsifying_example.expected_traceback, 

1499 ), 

1500 ) 

1501 except StopTest as e: 

1502 # Link the expected exception from the first run. Not sure 

1503 # how to access the current exception, if it failed 

1504 # differently on this run. In fact, in the only known 

1505 # reproducer, the StopTest is caused by OVERRUN before the 

1506 # test is even executed. Possibly because all initial examples 

1507 # failed until the final non-traced replay, and something was 

1508 # exhausted? Possibly a FIXME, but sufficiently weird to 

1509 # ignore for now. 

1510 err = FlakyFailure( 

1511 "Inconsistent results: An example failed on the " 

1512 "first run but now succeeds (or fails with another " 

1513 "error, or is for some reason not runnable).", 

1514 # (note: e is a BaseException) 

1515 [falsifying_example.expected_exception or e], 

1516 ) 

1517 errors_to_report.append((fragments, err)) 

1518 except UnsatisfiedAssumption as e: # pragma: no cover # ironically flaky 

1519 err = FlakyFailure( 

1520 "Unreliable assumption: An example which satisfied " 

1521 "assumptions on the first run now fails it.", 

1522 [e], 

1523 ) 

1524 errors_to_report.append((fragments, err)) 

1525 except BaseException as e: 

1526 # If we have anything for explain-mode, this is the time to report. 

1527 fragments.extend(explanations[falsifying_example.interesting_origin]) 

1528 errors_to_report.append( 

1529 (fragments, e.with_traceback(get_trimmed_traceback())) 

1530 ) 

1531 tb = format_exception(e, get_trimmed_traceback(e)) 

1532 origin = InterestingOrigin.from_exception(e) 

1533 else: 

1534 # execute_once() will always raise either the expected error, or Flaky. 

1535 raise NotImplementedError("This should be unreachable") 

1536 finally: 

1537 ran_example.freeze() 

1538 if observability_enabled(): 

1539 # log our observability line for the final failing example 

1540 tc = make_testcase( 

1541 run_start=self._start_timestamp, 

1542 property=self.test_identifier, 

1543 data=ran_example, 

1544 how_generated="minimal failing example", 

1545 representation=self._string_repr, 

1546 arguments=ran_example._observability_args, 

1547 timing=self._timing_features, 

1548 coverage=None, # Not recorded when we're replaying the MFE 

1549 status="passed" if sys.exc_info()[0] else "failed", 

1550 status_reason=str(origin or "unexpected/flaky pass"), 

1551 metadata={"traceback": tb}, 

1552 ) 

1553 deliver_observation(tc) 

1554 

1555 # Whether or not replay actually raised the exception again, we want 

1556 # to print the reproduce_failure decorator for the failing example. 

1557 if self.settings.print_blob: 

1558 fragments.append( 

1559 "\nYou can reproduce this example by temporarily adding " 

1560 f"{reproduction_decorator(falsifying_example.choices)} " 

1561 "as a decorator on your test case" 

1562 ) 

1563 

1564 _raise_to_user( 

1565 errors_to_report, 

1566 self.settings, 

1567 report_lines, 

1568 # A backend might report a failure and then report verified afterwards, 

1569 # which is to be interpreted as "there are no more failures *other 

1570 # than what we already reported*". Do not report this as unsound. 

1571 unsound_backend=( 

1572 runner._verified_by 

1573 if runner._verified_by and not runner._backend_found_failure 

1574 else None 

1575 ), 

1576 ) 

1577 

1578 

1579def _raise_to_user( 

1580 errors_to_report, settings, target_lines, trailer="", *, unsound_backend=None 

1581): 

1582 """Helper function for attaching notes and grouping multiple errors.""" 

1583 failing_prefix = "Falsifying example: " 

1584 ls = [] 

1585 for fragments, err in errors_to_report: 

1586 for note in fragments: 

1587 add_note(err, note) 

1588 if note.startswith(failing_prefix): 

1589 ls.append(note.removeprefix(failing_prefix)) 

1590 if current_pytest_item.value: 

1591 current_pytest_item.value._hypothesis_failing_examples = ls 

1592 

1593 if len(errors_to_report) == 1: 

1594 _, the_error_hypothesis_found = errors_to_report[0] 

1595 else: 

1596 assert errors_to_report 

1597 the_error_hypothesis_found = BaseExceptionGroup( 

1598 f"Hypothesis found {len(errors_to_report)} distinct failures{trailer}.", 

1599 [e for _, e in errors_to_report], 

1600 ) 

1601 

1602 if settings.verbosity >= Verbosity.normal: 

1603 for line in target_lines: 

1604 add_note(the_error_hypothesis_found, line) 

1605 

1606 if unsound_backend: 

1607 add_note( 

1608 err, 

1609 f"backend={unsound_backend!r} claimed to verify this test passes - " 

1610 "please send them a bug report!", 

1611 ) 

1612 

1613 raise the_error_hypothesis_found 

1614 

1615 

1616@contextlib.contextmanager 

1617def fake_subTest(self, msg=None, **__): 

1618 """Monkeypatch for `unittest.TestCase.subTest` during `@given`. 

1619 

1620 If we don't patch this out, each failing example is reported as a 

1621 separate failing test by the unittest test runner, which is 

1622 obviously incorrect. We therefore replace it for the duration with 

1623 this version. 

1624 """ 

1625 warnings.warn( 

1626 "subTest per-example reporting interacts badly with Hypothesis " 

1627 "trying hundreds of examples, so we disable it for the duration of " 

1628 "any test that uses `@given`.", 

1629 HypothesisWarning, 

1630 stacklevel=2, 

1631 ) 

1632 yield 

1633 

1634 

1635@dataclass 

1636class HypothesisHandle: 

1637 """This object is provided as the .hypothesis attribute on @given tests. 

1638 

1639 Downstream users can reassign its attributes to insert custom logic into 

1640 the execution of each case, for example by converting an async into a 

1641 sync function. 

1642 

1643 This must be an attribute of an attribute, because reassignment of a 

1644 first-level attribute would not be visible to Hypothesis if the function 

1645 had been decorated before the assignment. 

1646 

1647 See https://github.com/HypothesisWorks/hypothesis/issues/1257 for more 

1648 information. 

1649 """ 

1650 

1651 inner_test: Any 

1652 _get_fuzz_target: Any 

1653 _given_kwargs: Any 

1654 

1655 @property 

1656 def fuzz_one_input( 

1657 self, 

1658 ) -> Callable[[Union[bytes, bytearray, memoryview, BinaryIO]], Optional[bytes]]: 

1659 """Run the test as a fuzz target, driven with the `buffer` of bytes. 

1660 

1661 Returns None if buffer invalid for the strategy, canonical pruned 

1662 bytes if the buffer was valid, and leaves raised exceptions alone. 

1663 """ 

1664 # Note: most users, if they care about fuzzer performance, will access the 

1665 # property and assign it to a local variable to move the attribute lookup 

1666 # outside their fuzzing loop / before the fork point. We cache it anyway, 

1667 # so that naive or unusual use-cases get the best possible performance too. 

1668 try: 

1669 return self.__cached_target # type: ignore 

1670 except AttributeError: 

1671 self.__cached_target = self._get_fuzz_target() 

1672 return self.__cached_target 

1673 

1674 

1675@overload 

1676def given( 

1677 _: EllipsisType, / 

1678) -> Callable[ 

1679 [Callable[..., Optional[Coroutine[Any, Any, None]]]], Callable[[], None] 

1680]: # pragma: no cover 

1681 ... 

1682 

1683 

1684@overload 

1685def given( 

1686 *_given_arguments: SearchStrategy[Any], 

1687) -> Callable[ 

1688 [Callable[..., Optional[Coroutine[Any, Any, None]]]], Callable[..., None] 

1689]: # pragma: no cover 

1690 ... 

1691 

1692 

1693@overload 

1694def given( 

1695 **_given_kwargs: Union[SearchStrategy[Any], EllipsisType], 

1696) -> Callable[ 

1697 [Callable[..., Optional[Coroutine[Any, Any, None]]]], Callable[..., None] 

1698]: # pragma: no cover 

1699 ... 

1700 

1701 

1702def given( 

1703 *_given_arguments: Union[SearchStrategy[Any], EllipsisType], 

1704 **_given_kwargs: Union[SearchStrategy[Any], EllipsisType], 

1705) -> Callable[ 

1706 [Callable[..., Optional[Coroutine[Any, Any, None]]]], Callable[..., None] 

1707]: 

1708 """ 

1709 The |@given| decorator turns a function into a Hypothesis test. This is the 

1710 main entry point to Hypothesis. 

1711 

1712 .. seealso:: 

1713 

1714 See also the :doc:`/tutorial/introduction` tutorial, which introduces 

1715 defining Hypothesis tests with |@given|. 

1716 

1717 .. _given-arguments: 

1718 

1719 Arguments to ``@given`` 

1720 ----------------------- 

1721 

1722 Arguments to |@given| may be either positional or keyword arguments: 

1723 

1724 .. code-block:: python 

1725 

1726 @given(st.integers(), st.floats()) 

1727 def test_one(x, y): 

1728 pass 

1729 

1730 @given(x=st.integers(), y=st.floats()) 

1731 def test_two(x, y): 

1732 pass 

1733 

1734 If using keyword arguments, the arguments may appear in any order, as with 

1735 standard Python functions: 

1736 

1737 .. code-block:: python 

1738 

1739 # different order, but still equivalent to before 

1740 @given(y=st.floats(), x=st.integers()) 

1741 def test(x, y): 

1742 assert isinstance(x, int) 

1743 assert isinstance(y, float) 

1744 

1745 If |@given| is provided fewer positional arguments than the decorated test, 

1746 the test arguments are filled in on the right side, leaving the leftmost 

1747 positional arguments unfilled: 

1748 

1749 .. code-block:: python 

1750 

1751 @given(st.integers(), st.floats()) 

1752 def test(manual_string, y, z): 

1753 assert manual_string == "x" 

1754 assert isinstance(y, int) 

1755 assert isinstance(z, float) 

1756 

1757 # `test` is now a callable which takes one argument `manual_string` 

1758 

1759 test("x") 

1760 # or equivalently: 

1761 test(manual_string="x") 

1762 

1763 The reason for this "from the right" behavior is to support using |@given| 

1764 with instance methods, by automatically passing through ``self``: 

1765 

1766 .. code-block:: python 

1767 

1768 class MyTest(TestCase): 

1769 @given(st.integers()) 

1770 def test(self, x): 

1771 assert isinstance(self, MyTest) 

1772 assert isinstance(x, int) 

1773 

1774 If (and only if) using keyword arguments, |@given| may be combined with 

1775 ``**kwargs`` or ``*args``: 

1776 

1777 .. code-block:: python 

1778 

1779 @given(x=integers(), y=integers()) 

1780 def test(x, **kwargs): 

1781 assert "y" in kwargs 

1782 

1783 @given(x=integers(), y=integers()) 

1784 def test(x, *args, **kwargs): 

1785 assert args == () 

1786 assert "x" not in kwargs 

1787 assert "y" in kwargs 

1788 

1789 It is an error to: 

1790 

1791 * Mix positional and keyword arguments to |@given|. 

1792 * Use |@given| with a function that has a default value for an argument. 

1793 * Use |@given| with positional arguments with a function that uses ``*args``, 

1794 ``**kwargs``, or keyword-only arguments. 

1795 

1796 The function returned by given has all the same arguments as the original 

1797 test, minus those that are filled in by |@given|. See the :ref:`notes on 

1798 framework compatibility <framework-compatibility>` for how this interacts 

1799 with features of other testing libraries, such as :pypi:`pytest` fixtures. 

1800 """ 

1801 

1802 if currently_in_test_context(): 

1803 fail_health_check( 

1804 Settings(), 

1805 "Nesting @given tests results in quadratic generation and shrinking " 

1806 "behavior, and can usually be more cleanly expressed by replacing the " 

1807 "inner function with an st.data() parameter on the outer @given." 

1808 "\n\n" 

1809 "If it is difficult or impossible to refactor this test to remove the " 

1810 "nested @given, you can disable this health check with " 

1811 "@settings(suppress_health_check=[HealthCheck.nested_given]) on the " 

1812 "outer @given. See " 

1813 "https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck " 

1814 "for details.", 

1815 HealthCheck.nested_given, 

1816 ) 

1817 

1818 def run_test_as_given(test): 

1819 if inspect.isclass(test): 

1820 # Provide a meaningful error to users, instead of exceptions from 

1821 # internals that assume we're dealing with a function. 

1822 raise InvalidArgument("@given cannot be applied to a class") 

1823 

1824 if ( 

1825 "_pytest" in sys.modules 

1826 and "_pytest.fixtures" in sys.modules 

1827 and ( 

1828 tuple(map(int, sys.modules["_pytest"].__version__.split(".")[:2])) 

1829 >= (8, 4) 

1830 ) 

1831 and isinstance( 

1832 test, sys.modules["_pytest.fixtures"].FixtureFunctionDefinition 

1833 ) 

1834 ): # pragma: no cover # covered by pytest/test_fixtures, but not by cover/ 

1835 raise InvalidArgument("@given cannot be applied to a pytest fixture") 

1836 

1837 given_arguments = tuple(_given_arguments) 

1838 given_kwargs = dict(_given_kwargs) 

1839 

1840 original_sig = get_signature(test) 

1841 if given_arguments == (Ellipsis,) and not given_kwargs: 

1842 # user indicated that they want to infer all arguments 

1843 given_kwargs = { 

1844 p.name: Ellipsis 

1845 for p in original_sig.parameters.values() 

1846 if p.kind in (p.POSITIONAL_OR_KEYWORD, p.KEYWORD_ONLY) 

1847 } 

1848 given_arguments = () 

1849 

1850 check_invalid = is_invalid_test( 

1851 test, original_sig, given_arguments, given_kwargs 

1852 ) 

1853 

1854 # If the argument check found problems, return a dummy test function 

1855 # that will raise an error if it is actually called. 

1856 if check_invalid is not None: 

1857 return check_invalid 

1858 

1859 # Because the argument check succeeded, we can convert @given's 

1860 # positional arguments into keyword arguments for simplicity. 

1861 if given_arguments: 

1862 assert not given_kwargs 

1863 posargs = [ 

1864 p.name 

1865 for p in original_sig.parameters.values() 

1866 if p.kind is p.POSITIONAL_OR_KEYWORD 

1867 ] 

1868 given_kwargs = dict(list(zip(posargs[::-1], given_arguments[::-1]))[::-1]) 

1869 # These have been converted, so delete them to prevent accidental use. 

1870 del given_arguments 

1871 

1872 new_signature = new_given_signature(original_sig, given_kwargs) 

1873 

1874 # Use type information to convert "infer" arguments into appropriate strategies. 

1875 if ... in given_kwargs.values(): 

1876 hints = get_type_hints(test) 

1877 for name in [name for name, value in given_kwargs.items() if value is ...]: 

1878 if name not in hints: 

1879 return _invalid( 

1880 f"passed {name}=... for {test.__name__}, but {name} has " 

1881 "no type annotation", 

1882 test=test, 

1883 given_kwargs=given_kwargs, 

1884 ) 

1885 given_kwargs[name] = st.from_type(hints[name]) 

1886 

1887 # only raise if the same thread uses two different executors, not if two 

1888 # different threads use different executors. 

1889 thread_local = ThreadLocal(prev_self=lambda: not_set) 

1890 # maps thread_id to whether that thread overlaps in execution with any 

1891 # other thread in this @given. We use this to detect whether an @given is 

1892 # being run from multiple different threads at once, which informs 

1893 # decisions like whether to raise DeadlineExceeded or HealthCheck.too_slow. 

1894 thread_overlap: dict[int, bool] = {} 

1895 thread_overlap_lock = Lock() 

1896 

1897 @impersonate(test) 

1898 @define_function_signature(test.__name__, test.__doc__, new_signature) 

1899 def wrapped_test(*arguments, **kwargs): 

1900 # Tell pytest to omit the body of this function from tracebacks 

1901 __tracebackhide__ = True 

1902 with thread_overlap_lock: 

1903 for overlap_thread_id in thread_overlap: 

1904 thread_overlap[overlap_thread_id] = True 

1905 

1906 threadid = threading.get_ident() 

1907 # if there are existing threads when this thread starts, then 

1908 # this thread starts at an overlapped state. 

1909 has_existing_threads = len(thread_overlap) > 0 

1910 thread_overlap[threadid] = has_existing_threads 

1911 

1912 try: 

1913 test = wrapped_test.hypothesis.inner_test 

1914 if getattr(test, "is_hypothesis_test", False): 

1915 raise InvalidArgument( 

1916 f"You have applied @given to the test {test.__name__} more than " 

1917 "once, which wraps the test several times and is extremely slow. " 

1918 "A similar effect can be gained by combining the arguments " 

1919 "of the two calls to given. For example, instead of " 

1920 "@given(booleans()) @given(integers()), you could write " 

1921 "@given(booleans(), integers())" 

1922 ) 

1923 

1924 settings = wrapped_test._hypothesis_internal_use_settings 

1925 random = get_random_for_wrapped_test(test, wrapped_test) 

1926 arguments, kwargs, stuff = process_arguments_to_given( 

1927 wrapped_test, 

1928 arguments, 

1929 kwargs, 

1930 given_kwargs, 

1931 new_signature.parameters, 

1932 ) 

1933 

1934 if ( 

1935 inspect.iscoroutinefunction(test) 

1936 and get_executor(stuff.selfy) is default_executor 

1937 ): 

1938 # See https://github.com/HypothesisWorks/hypothesis/issues/3054 

1939 # If our custom executor doesn't handle coroutines, or we return an 

1940 # awaitable from a non-async-def function, we just rely on the 

1941 # return_value health check. This catches most user errors though. 

1942 raise InvalidArgument( 

1943 "Hypothesis doesn't know how to run async test functions like " 

1944 f"{test.__name__}. You'll need to write a custom executor, " 

1945 "or use a library like pytest-asyncio or pytest-trio which can " 

1946 "handle the translation for you.\n See https://hypothesis." 

1947 "readthedocs.io/en/latest/details.html#custom-function-execution" 

1948 ) 

1949 

1950 runner = stuff.selfy 

1951 if isinstance(stuff.selfy, TestCase) and test.__name__ in dir(TestCase): 

1952 fail_health_check( 

1953 settings, 

1954 f"You have applied @given to the method {test.__name__}, which is " 

1955 "used by the unittest runner but is not itself a test. " 

1956 "This is not useful in any way.", 

1957 HealthCheck.not_a_test_method, 

1958 ) 

1959 if bad_django_TestCase(runner): # pragma: no cover 

1960 # Covered by the Django tests, but not the pytest coverage task 

1961 raise InvalidArgument( 

1962 "You have applied @given to a method on " 

1963 f"{type(runner).__qualname__}, but this " 

1964 "class does not inherit from the supported versions in " 

1965 "`hypothesis.extra.django`. Use the Hypothesis variants " 

1966 "to ensure that each example is run in a separate " 

1967 "database transaction." 

1968 ) 

1969 

1970 nonlocal thread_local 

1971 # Check selfy really is self (not e.g. a mock) before we health-check 

1972 cur_self = ( 

1973 stuff.selfy 

1974 if getattr(type(stuff.selfy), test.__name__, None) is wrapped_test 

1975 else None 

1976 ) 

1977 if thread_local.prev_self is not_set: 

1978 thread_local.prev_self = cur_self 

1979 elif cur_self is not thread_local.prev_self: 

1980 fail_health_check( 

1981 settings, 

1982 f"The method {test.__qualname__} was called from multiple " 

1983 "different executors. This may lead to flaky tests and " 

1984 "nonreproducible errors when replaying from database." 

1985 "\n\n" 

1986 "Unlike most health checks, HealthCheck.differing_executors " 

1987 "warns about a correctness issue with your test. We " 

1988 "therefore recommend fixing the underlying issue, rather " 

1989 "than suppressing this health check. However, if you are " 

1990 "confident this health check can be safely disabled, you can " 

1991 "do so with " 

1992 "@settings(suppress_health_check=[HealthCheck.differing_executors]). " 

1993 "See " 

1994 "https://hypothesis.readthedocs.io/en/latest/reference/api.html#hypothesis.HealthCheck " 

1995 "for details.", 

1996 HealthCheck.differing_executors, 

1997 ) 

1998 

1999 state = StateForActualGivenExecution( 

2000 stuff, 

2001 test, 

2002 settings, 

2003 random, 

2004 wrapped_test, 

2005 thread_overlap=thread_overlap, 

2006 ) 

2007 

2008 # If there was a @reproduce_failure decorator, use it to reproduce 

2009 # the error (or complain that we couldn't). Either way, this will 

2010 # always raise some kind of error. 

2011 if ( 

2012 reproduce_failure := wrapped_test._hypothesis_internal_use_reproduce_failure 

2013 ) is not None: 

2014 expected_version, failure = reproduce_failure 

2015 if expected_version != __version__: 

2016 raise InvalidArgument( 

2017 "Attempting to reproduce a failure from a different " 

2018 f"version of Hypothesis. This failure is from {expected_version}, but " 

2019 f"you are currently running {__version__!r}. Please change your " 

2020 "Hypothesis version to a matching one." 

2021 ) 

2022 try: 

2023 state.execute_once( 

2024 ConjectureData.for_choices(decode_failure(failure)), 

2025 print_example=True, 

2026 is_final=True, 

2027 ) 

2028 raise DidNotReproduce( 

2029 "Expected the test to raise an error, but it " 

2030 "completed successfully." 

2031 ) 

2032 except StopTest: 

2033 raise DidNotReproduce( 

2034 "The shape of the test data has changed in some way " 

2035 "from where this blob was defined. Are you sure " 

2036 "you're running the same test?" 

2037 ) from None 

2038 except UnsatisfiedAssumption: 

2039 raise DidNotReproduce( 

2040 "The test data failed to satisfy an assumption in the " 

2041 "test. Have you added it since this blob was generated?" 

2042 ) from None 

2043 

2044 # There was no @reproduce_failure, so start by running any explicit 

2045 # examples from @example decorators. 

2046 if errors := list( 

2047 execute_explicit_examples( 

2048 state, wrapped_test, arguments, kwargs, original_sig 

2049 ) 

2050 ): 

2051 # If we're not going to report multiple bugs, we would have 

2052 # stopped running explicit examples at the first failure. 

2053 assert len(errors) == 1 or state.settings.report_multiple_bugs 

2054 

2055 # If an explicit example raised a 'skip' exception, ensure it's never 

2056 # wrapped up in an exception group. Because we break out of the loop 

2057 # immediately on finding a skip, if present it's always the last error. 

2058 if isinstance(errors[-1][1], skip_exceptions_to_reraise()): 

2059 # Covered by `test_issue_3453_regression`, just in a subprocess. 

2060 del errors[:-1] # pragma: no cover 

2061 

2062 _raise_to_user(errors, state.settings, [], " in explicit examples") 

2063 

2064 # If there were any explicit examples, they all ran successfully. 

2065 # The next step is to use the Conjecture engine to run the test on 

2066 # many different inputs. 

2067 ran_explicit_examples = ( 

2068 Phase.explicit in state.settings.phases 

2069 and getattr(wrapped_test, "hypothesis_explicit_examples", ()) 

2070 ) 

2071 SKIP_BECAUSE_NO_EXAMPLES = unittest.SkipTest( 

2072 "Hypothesis has been told to run no examples for this test." 

2073 ) 

2074 if not ( 

2075 Phase.reuse in settings.phases or Phase.generate in settings.phases 

2076 ): 

2077 if not ran_explicit_examples: 

2078 raise SKIP_BECAUSE_NO_EXAMPLES 

2079 return 

2080 

2081 try: 

2082 if isinstance(runner, TestCase) and hasattr(runner, "subTest"): 

2083 subTest = runner.subTest 

2084 try: 

2085 runner.subTest = types.MethodType(fake_subTest, runner) 

2086 state.run_engine() 

2087 finally: 

2088 runner.subTest = subTest 

2089 else: 

2090 state.run_engine() 

2091 except BaseException as e: 

2092 # The exception caught here should either be an actual test 

2093 # failure (or BaseExceptionGroup), or some kind of fatal error 

2094 # that caused the engine to stop. 

2095 generated_seed = ( 

2096 wrapped_test._hypothesis_internal_use_generated_seed 

2097 ) 

2098 with local_settings(settings): 

2099 if not (state.failed_normally or generated_seed is None): 

2100 if running_under_pytest: 

2101 report( 

2102 f"You can add @seed({generated_seed}) to this test or " 

2103 f"run pytest with --hypothesis-seed={generated_seed} " 

2104 "to reproduce this failure." 

2105 ) 

2106 else: 

2107 report( 

2108 f"You can add @seed({generated_seed}) to this test to " 

2109 "reproduce this failure." 

2110 ) 

2111 # The dance here is to avoid showing users long tracebacks 

2112 # full of Hypothesis internals they don't care about. 

2113 # We have to do this inline, to avoid adding another 

2114 # internal stack frame just when we've removed the rest. 

2115 # 

2116 # Using a variable for our trimmed error ensures that the line 

2117 # which will actually appear in tracebacks is as clear as 

2118 # possible - "raise the_error_hypothesis_found". 

2119 the_error_hypothesis_found = e.with_traceback( 

2120 None 

2121 if isinstance(e, BaseExceptionGroup) 

2122 else get_trimmed_traceback() 

2123 ) 

2124 raise the_error_hypothesis_found 

2125 

2126 if not (ran_explicit_examples or state.ever_executed): 

2127 raise SKIP_BECAUSE_NO_EXAMPLES 

2128 finally: 

2129 with thread_overlap_lock: 

2130 del thread_overlap[threadid] 

2131 

2132 def _get_fuzz_target() -> ( 

2133 Callable[[Union[bytes, bytearray, memoryview, BinaryIO]], Optional[bytes]] 

2134 ): 

2135 # Because fuzzing interfaces are very performance-sensitive, we use a 

2136 # somewhat more complicated structure here. `_get_fuzz_target()` is 

2137 # called by the `HypothesisHandle.fuzz_one_input` property, allowing 

2138 # us to defer our collection of the settings, random instance, and 

2139 # reassignable `inner_test` (etc) until `fuzz_one_input` is accessed. 

2140 # 

2141 # We then share the performance cost of setting up `state` between 

2142 # many invocations of the target. We explicitly force `deadline=None` 

2143 # for performance reasons, saving ~40% the runtime of an empty test. 

2144 test = wrapped_test.hypothesis.inner_test 

2145 settings = Settings( 

2146 parent=wrapped_test._hypothesis_internal_use_settings, deadline=None 

2147 ) 

2148 random = get_random_for_wrapped_test(test, wrapped_test) 

2149 _args, _kwargs, stuff = process_arguments_to_given( 

2150 wrapped_test, (), {}, given_kwargs, new_signature.parameters 

2151 ) 

2152 assert not _args 

2153 assert not _kwargs 

2154 state = StateForActualGivenExecution( 

2155 stuff, 

2156 test, 

2157 settings, 

2158 random, 

2159 wrapped_test, 

2160 thread_overlap=thread_overlap, 

2161 ) 

2162 database_key = function_digest(test) + b".secondary" 

2163 # We track the minimal-so-far example for each distinct origin, so 

2164 # that we track log-n instead of n examples for long runs. In particular 

2165 # it means that we saturate for common errors in long runs instead of 

2166 # storing huge volumes of low-value data. 

2167 minimal_failures: dict = {} 

2168 

2169 def fuzz_one_input( 

2170 buffer: Union[bytes, bytearray, memoryview, BinaryIO], 

2171 ) -> Optional[bytes]: 

2172 # This inner part is all that the fuzzer will actually run, 

2173 # so we keep it as small and as fast as possible. 

2174 if isinstance(buffer, io.IOBase): 

2175 buffer = buffer.read(BUFFER_SIZE) 

2176 assert isinstance(buffer, (bytes, bytearray, memoryview)) 

2177 data = ConjectureData( 

2178 random=None, 

2179 provider=BytestringProvider, 

2180 provider_kw={"bytestring": buffer}, 

2181 ) 

2182 try: 

2183 state.execute_once(data) 

2184 status = Status.VALID 

2185 except StopTest: 

2186 status = data.status 

2187 return None 

2188 except UnsatisfiedAssumption: 

2189 status = Status.INVALID 

2190 return None 

2191 except BaseException: 

2192 known = minimal_failures.get(data.interesting_origin) 

2193 if settings.database is not None and ( 

2194 known is None or sort_key(data.nodes) <= sort_key(known) 

2195 ): 

2196 settings.database.save( 

2197 database_key, choices_to_bytes(data.choices) 

2198 ) 

2199 minimal_failures[data.interesting_origin] = data.nodes 

2200 status = Status.INTERESTING 

2201 raise 

2202 finally: 

2203 if observability_enabled(): 

2204 data.freeze() 

2205 tc = make_testcase( 

2206 run_start=state._start_timestamp, 

2207 property=state.test_identifier, 

2208 data=data, 

2209 how_generated="fuzz_one_input", 

2210 representation=state._string_repr, 

2211 arguments=data._observability_args, 

2212 timing=state._timing_features, 

2213 coverage=None, 

2214 status=status, 

2215 backend_metadata=data.provider.observe_test_case(), 

2216 ) 

2217 deliver_observation(tc) 

2218 state._timing_features = {} 

2219 

2220 assert isinstance(data.provider, BytestringProvider) 

2221 return bytes(data.provider.drawn) 

2222 

2223 fuzz_one_input.__doc__ = HypothesisHandle.fuzz_one_input.__doc__ 

2224 return fuzz_one_input 

2225 

2226 # After having created the decorated test function, we need to copy 

2227 # over some attributes to make the switch as seamless as possible. 

2228 

2229 for attrib in dir(test): 

2230 if not (attrib.startswith("_") or hasattr(wrapped_test, attrib)): 

2231 setattr(wrapped_test, attrib, getattr(test, attrib)) 

2232 wrapped_test.is_hypothesis_test = True 

2233 if hasattr(test, "_hypothesis_internal_settings_applied"): 

2234 # Used to check if @settings is applied twice. 

2235 wrapped_test._hypothesis_internal_settings_applied = True 

2236 wrapped_test._hypothesis_internal_use_seed = getattr( 

2237 test, "_hypothesis_internal_use_seed", None 

2238 ) 

2239 wrapped_test._hypothesis_internal_use_settings = ( 

2240 getattr(test, "_hypothesis_internal_use_settings", None) or Settings.default 

2241 ) 

2242 wrapped_test._hypothesis_internal_use_reproduce_failure = getattr( 

2243 test, "_hypothesis_internal_use_reproduce_failure", None 

2244 ) 

2245 wrapped_test.hypothesis = HypothesisHandle(test, _get_fuzz_target, given_kwargs) 

2246 return wrapped_test 

2247 

2248 return run_test_as_given 

2249 

2250 

2251def find( 

2252 specifier: SearchStrategy[Ex], 

2253 condition: Callable[[Any], bool], 

2254 *, 

2255 settings: Optional[Settings] = None, 

2256 random: Optional[Random] = None, 

2257 database_key: Optional[bytes] = None, 

2258) -> Ex: 

2259 """Returns the minimal example from the given strategy ``specifier`` that 

2260 matches the predicate function ``condition``.""" 

2261 if settings is None: 

2262 settings = Settings(max_examples=2000) 

2263 settings = Settings( 

2264 settings, suppress_health_check=list(HealthCheck), report_multiple_bugs=False 

2265 ) 

2266 

2267 if database_key is None and settings.database is not None: 

2268 # Note: The database key is not guaranteed to be unique. If not, replaying 

2269 # of database examples may fail to reproduce due to being replayed on the 

2270 # wrong condition. 

2271 database_key = function_digest(condition) 

2272 

2273 if not isinstance(specifier, SearchStrategy): 

2274 raise InvalidArgument( 

2275 f"Expected SearchStrategy but got {specifier!r} of " 

2276 f"type {type(specifier).__name__}" 

2277 ) 

2278 specifier.validate() 

2279 

2280 last: list[Ex] = [] 

2281 

2282 @settings 

2283 @given(specifier) 

2284 def test(v): 

2285 if condition(v): 

2286 last[:] = [v] 

2287 raise Found 

2288 

2289 if random is not None: 

2290 test = seed(random.getrandbits(64))(test) 

2291 

2292 test._hypothesis_internal_database_key = database_key # type: ignore 

2293 

2294 try: 

2295 test() 

2296 except Found: 

2297 return last[0] 

2298 

2299 raise NoSuchExample(get_pretty_function_description(condition))