Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/astroid/inference_tip.py: 82%
44 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-07 06:53 +0000
« prev ^ index » next coverage.py v7.2.7, created at 2023-06-07 06:53 +0000
1# Licensed under the LGPL: https://www.gnu.org/licenses/old-licenses/lgpl-2.1.en.html
2# For details: https://github.com/pylint-dev/astroid/blob/main/LICENSE
3# Copyright (c) https://github.com/pylint-dev/astroid/blob/main/CONTRIBUTORS.txt
5"""Transform utilities (filters and decorator)."""
7from __future__ import annotations
9from collections import OrderedDict
10from collections.abc import Generator
11from typing import Any, TypeVar
13from astroid.context import InferenceContext
14from astroid.exceptions import InferenceOverwriteError, UseInferenceDefault
15from astroid.nodes import NodeNG
16from astroid.typing import (
17 InferenceResult,
18 InferFn,
19 TransformFn,
20)
22_cache: OrderedDict[
23 tuple[InferFn[Any], NodeNG, InferenceContext | None], list[InferenceResult]
24] = OrderedDict()
26_CURRENTLY_INFERRING: set[tuple[InferFn[Any], NodeNG]] = set()
28_NodesT = TypeVar("_NodesT", bound=NodeNG)
31def clear_inference_tip_cache() -> None:
32 """Clear the inference tips cache."""
33 _cache.clear()
36def _inference_tip_cached(func: InferFn[_NodesT]) -> InferFn[_NodesT]:
37 """Cache decorator used for inference tips."""
39 def inner(
40 node: _NodesT,
41 context: InferenceContext | None = None,
42 **kwargs: Any,
43 ) -> Generator[InferenceResult, None, None]:
44 partial_cache_key = (func, node)
45 if partial_cache_key in _CURRENTLY_INFERRING:
46 # If through recursion we end up trying to infer the same
47 # func + node we raise here.
48 _CURRENTLY_INFERRING.remove(partial_cache_key)
49 raise UseInferenceDefault
50 if context is not None and context.is_empty():
51 # Fresh, empty contexts will defeat the cache.
52 context = None
53 try:
54 yield from _cache[func, node, context]
55 return
56 except KeyError:
57 # Recursion guard with a partial cache key.
58 # Using the full key causes a recursion error on PyPy.
59 # It's a pragmatic compromise to avoid so much recursive inference
60 # with slightly different contexts while still passing the simple
61 # test cases included with this commit.
62 _CURRENTLY_INFERRING.add(partial_cache_key)
63 try:
64 # May raise UseInferenceDefault
65 result = _cache[func, node, context] = list(
66 func(node, context, **kwargs)
67 )
68 finally:
69 # Remove recursion guard.
70 try:
71 _CURRENTLY_INFERRING.remove(partial_cache_key)
72 except KeyError:
73 pass # Recursion may beat us to the punch.
75 if len(_cache) > 64:
76 _cache.popitem(last=False)
78 # https://github.com/pylint-dev/pylint/issues/8686
79 yield from result # pylint: disable=used-before-assignment
81 return inner
84def inference_tip(
85 infer_function: InferFn[_NodesT], raise_on_overwrite: bool = False
86) -> TransformFn[_NodesT]:
87 """Given an instance specific inference function, return a function to be
88 given to AstroidManager().register_transform to set this inference function.
90 :param bool raise_on_overwrite: Raise an `InferenceOverwriteError`
91 if the inference tip will overwrite another. Used for debugging
93 Typical usage
95 .. sourcecode:: python
97 AstroidManager().register_transform(Call, inference_tip(infer_named_tuple),
98 predicate)
100 .. Note::
102 Using an inference tip will override
103 any previously set inference tip for the given
104 node. Use a predicate in the transform to prevent
105 excess overwrites.
106 """
108 def transform(
109 node: _NodesT, infer_function: InferFn[_NodesT] = infer_function
110 ) -> _NodesT:
111 if (
112 raise_on_overwrite
113 and node._explicit_inference is not None
114 and node._explicit_inference is not infer_function
115 ):
116 raise InferenceOverwriteError(
117 "Inference already set to {existing_inference}. "
118 "Trying to overwrite with {new_inference} for {node}".format(
119 existing_inference=infer_function,
120 new_inference=node._explicit_inference,
121 node=node,
122 )
123 )
124 node._explicit_inference = _inference_tip_cached(infer_function)
125 return node
127 return transform