Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/hypothesis/statistics.py: 18%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

65 statements  

1# This file is part of Hypothesis, which may be found at 

2# https://github.com/HypothesisWorks/hypothesis/ 

3# 

4# Copyright the Hypothesis Authors. 

5# Individual contributors are listed in AUTHORS.rst and the git log. 

6# 

7# This Source Code Form is subject to the terms of the Mozilla Public License, 

8# v. 2.0. If a copy of the MPL was not distributed with this file, You can 

9# obtain one at https://mozilla.org/MPL/2.0/. 

10 

11import math 

12from collections import Counter 

13from collections.abc import Iterable 

14from typing import TYPE_CHECKING, cast 

15 

16from hypothesis._settings import Phase 

17from hypothesis.utils.dynamicvariables import DynamicVariable 

18 

19if TYPE_CHECKING: 

20 from hypothesis.internal.conjecture.engine import PhaseStatistics, StatisticsDict 

21 

22collector = DynamicVariable(None) 

23 

24 

25def note_statistics(stats_dict: "StatisticsDict") -> None: 

26 callback = collector.value 

27 if callback is not None: 

28 callback(stats_dict) 

29 

30 

31def describe_targets(best_targets: dict[str, float]) -> list[str]: 

32 """Return a list of lines describing the results of `target`, if any.""" 

33 # These lines are included in the general statistics description below, 

34 # but also printed immediately below failing examples to alleviate the 

35 # "threshold problem" where shrinking can make severe bug look trivial. 

36 # See https://github.com/HypothesisWorks/hypothesis/issues/2180 

37 if not best_targets: 

38 return [] 

39 elif len(best_targets) == 1: 

40 label, score = next(iter(best_targets.items())) 

41 return [f"Highest target score: {score:g} ({label=})"] 

42 else: 

43 lines = ["Highest target scores:"] 

44 for label, score in sorted(best_targets.items(), key=lambda x: x[::-1]): 

45 lines.append(f"{score:>16g} ({label=})") 

46 return lines 

47 

48 

49def format_ms(times: Iterable[float]) -> str: 

50 """Format `times` into a string representing approximate milliseconds. 

51 

52 `times` is a collection of durations in seconds. 

53 """ 

54 ordered = sorted(times) 

55 n = len(ordered) - 1 

56 if n < 0 or any(math.isnan(t) for t in ordered): # pragma: no cover 

57 return "NaN ms" 

58 lower = int(ordered[math.floor(n * 0.05)] * 1000) 

59 upper = int(ordered[math.ceil(n * 0.95)] * 1000) 

60 if upper == 0: 

61 return "< 1ms" 

62 elif lower == upper: 

63 return f"~ {lower}ms" 

64 else: 

65 return f"~ {lower}-{upper} ms" 

66 

67 

68def describe_statistics(stats_dict: "StatisticsDict") -> str: 

69 """Return a multi-line string describing the passed run statistics. 

70 

71 `stats_dict` must be a dictionary of data in the format collected by 

72 `hypothesis.internal.conjecture.engine.ConjectureRunner.statistics`. 

73 

74 We DO NOT promise that this format will be stable or supported over 

75 time, but do aim to make it reasonably useful for downstream users. 

76 It's also meant to support benchmarking for research purposes. 

77 

78 This function is responsible for the report which is printed in the 

79 terminal for our pytest --hypothesis-show-statistics option. 

80 """ 

81 lines = [stats_dict["nodeid"] + ":\n"] if "nodeid" in stats_dict else [] 

82 prev_failures = 0 

83 for phase in (p.name for p in list(Phase)[1:]): 

84 d = cast("PhaseStatistics", stats_dict.get(phase + "-phase", {})) 

85 # Basic information we report for every phase 

86 cases = d.get("test-cases", []) 

87 if not cases: 

88 continue 

89 statuses = Counter(t["status"] for t in cases) 

90 runtime_ms = format_ms(t["runtime"] for t in cases) 

91 drawtime_ms = format_ms(t["drawtime"] for t in cases) 

92 lines.append( 

93 f" - during {phase} phase ({d['duration-seconds']:.2f} seconds):\n" 

94 f" - Typical runtimes: {runtime_ms}, of which {drawtime_ms} in data generation\n" 

95 f" - {statuses['valid']} passing examples, {statuses['interesting']} " 

96 f"failing examples, {statuses['invalid'] + statuses['overrun']} invalid examples" 

97 ) 

98 # If we've found new distinct failures in this phase, report them 

99 distinct_failures = d["distinct-failures"] - prev_failures 

100 if distinct_failures: 

101 plural = distinct_failures > 1 

102 lines.append( 

103 " - Found {}{} distinct error{} in this phase".format( 

104 distinct_failures, " more" * bool(prev_failures), "s" * plural 

105 ) 

106 ) 

107 prev_failures = d["distinct-failures"] 

108 # Report events during the generate phase, if there were any 

109 if phase == "generate": 

110 events = Counter(sum((t["events"] for t in cases), [])) 

111 if events: 

112 lines.append(" - Events:") 

113 lines += [ 

114 f" * {100 * v / len(cases):.2f}%, {k}" 

115 for k, v in sorted(events.items(), key=lambda x: (-x[1], x[0])) 

116 ] 

117 # Some additional details on the shrinking phase 

118 if phase == "shrink": 

119 lines.append( 

120 " - Tried {} shrinks of which {} were successful".format( 

121 len(cases), d["shrinks-successful"] 

122 ) 

123 ) 

124 lines.append("") 

125 

126 target_lines = describe_targets(stats_dict.get("targets", {})) 

127 if target_lines: 

128 lines.append(" - " + target_lines[0]) 

129 lines.extend(" " + l for l in target_lines[1:]) 

130 lines.append(" - Stopped because " + stats_dict["stopped-because"]) 

131 return "\n".join(lines)