Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/asttokens/asttokens.py: 35%
148 statements
« prev ^ index » next coverage.py v7.2.2, created at 2023-03-26 06:07 +0000
« prev ^ index » next coverage.py v7.2.2, created at 2023-03-26 06:07 +0000
1# Copyright 2016 Grist Labs, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
15import abc
16import ast
17import bisect
18import sys
19import token
20from ast import Module
21from typing import Iterable, Iterator, List, Optional, Tuple, Any, cast, TYPE_CHECKING, Type
23import six
24from six.moves import xrange # pylint: disable=redefined-builtin
26from .line_numbers import LineNumbers
27from .util import Token, match_token, is_non_coding_token, patched_generate_tokens, last_stmt, annotate_fstring_nodes, generate_tokens
29if TYPE_CHECKING: # pragma: no cover
30 from .util import AstNode, TokenInfo
33class ASTTextBase(six.with_metaclass(abc.ABCMeta, object)):
34 def __init__(self, source_text, filename):
35 # type: (Any, str) -> None
36 # FIXME: Strictly, the type of source_text is one of the six string types, but hard to specify with mypy given
37 # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
39 self._filename = filename
41 # Decode source after parsing to let Python 2 handle coding declarations.
42 # (If the encoding was not utf-8 compatible, then even if it parses correctly,
43 # we'll fail with a unicode error here.)
44 source_text = six.ensure_text(source_text)
46 self._text = source_text
47 self._line_numbers = LineNumbers(source_text)
49 @abc.abstractmethod
50 def get_text_positions(self, node, padded):
51 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
52 """
53 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
54 If the positions can't be determined, or the nodes don't correspond to any particular text,
55 returns ``(1, 0)`` for both.
57 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
58 This means that if ``padded`` is True, the start position will be adjusted to include
59 leading whitespace if ``node`` is a multiline statement.
60 """
61 raise NotImplementedError
63 def get_text_range(self, node, padded=True):
64 # type: (AstNode, bool) -> Tuple[int, int]
65 """
66 Returns the (startpos, endpos) positions in source text corresponding to the given node.
67 Returns (0, 0) for nodes (like `Load`) that don't correspond to any particular text.
69 See ``get_text_positions()`` for details on the ``padded`` argument.
70 """
71 start, end = self.get_text_positions(node, padded)
72 return (
73 self._line_numbers.line_to_offset(*start),
74 self._line_numbers.line_to_offset(*end),
75 )
77 def get_text(self, node, padded=True):
78 # type: (AstNode, bool) -> str
79 """
80 Returns the text corresponding to the given node.
81 Returns '' for nodes (like `Load`) that don't correspond to any particular text.
83 See ``get_text_positions()`` for details on the ``padded`` argument.
84 """
85 start, end = self.get_text_range(node, padded)
86 return self._text[start: end]
89class ASTTokens(ASTTextBase, object):
90 """
91 ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
92 as tokens, and is used to mark and access token and position information.
94 ``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
95 that all offsets you'll get are to the unicode text, which is available as the ``.text``
96 property.
98 If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
99 tree marked with token info and made available as the ``.tree`` property.
101 If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
102 addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
103 using ``astroid`` library <https://www.astroid.org>.
105 If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
106 tree created separately.
107 """
109 def __init__(self, source_text, parse=False, tree=None, filename='<unknown>', tokens=None):
110 # type: (Any, bool, Optional[Module], str, Iterable[TokenInfo]) -> None
111 # FIXME: Strictly, the type of source_text is one of the six string types, but hard to specify with mypy given
112 # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
114 super(ASTTokens, self).__init__(source_text, filename)
116 self._tree = ast.parse(source_text, filename) if parse else tree
118 # Tokenize the code.
119 if tokens is None:
120 tokens = generate_tokens(self._text)
121 self._tokens = list(self._translate_tokens(tokens))
123 # Extract the start positions of all tokens, so that we can quickly map positions to tokens.
124 self._token_offsets = [tok.startpos for tok in self._tokens]
126 if self._tree:
127 self.mark_tokens(self._tree)
129 def mark_tokens(self, root_node):
130 # type: (Module) -> None
131 """
132 Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
133 them with token and position information by adding ``.first_token`` and
134 ``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
135 ``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
136 """
137 # The hard work of this class is done by MarkTokens
138 from .mark_tokens import MarkTokens # to avoid import loops
139 MarkTokens(self).visit_tree(root_node)
141 def _translate_tokens(self, original_tokens):
142 # type: (Iterable[TokenInfo]) -> Iterator[Token]
143 """
144 Translates the given standard library tokens into our own representation.
145 """
146 for index, tok in enumerate(patched_generate_tokens(original_tokens)):
147 tok_type, tok_str, start, end, line = tok
148 yield Token(tok_type, tok_str, start, end, line, index,
149 self._line_numbers.line_to_offset(start[0], start[1]),
150 self._line_numbers.line_to_offset(end[0], end[1]))
152 @property
153 def text(self):
154 # type: () -> str
155 """The source code passed into the constructor."""
156 return self._text
158 @property
159 def tokens(self):
160 # type: () -> List[Token]
161 """The list of tokens corresponding to the source code from the constructor."""
162 return self._tokens
164 @property
165 def tree(self):
166 # type: () -> Optional[Module]
167 """The root of the AST tree passed into the constructor or parsed from the source code."""
168 return self._tree
170 @property
171 def filename(self):
172 # type: () -> str
173 """The filename that was parsed"""
174 return self._filename
176 def get_token_from_offset(self, offset):
177 # type: (int) -> Token
178 """
179 Returns the token containing the given character offset (0-based position in source text),
180 or the preceeding token if the position is between tokens.
181 """
182 return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
184 def get_token(self, lineno, col_offset):
185 # type: (int, int) -> Token
186 """
187 Returns the token containing the given (lineno, col_offset) position, or the preceeding token
188 if the position is between tokens.
189 """
190 # TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
191 # are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
192 # but isn't explicit.
193 return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
195 def get_token_from_utf8(self, lineno, col_offset):
196 # type: (int, int) -> Token
197 """
198 Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
199 """
200 return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
202 def next_token(self, tok, include_extra=False):
203 # type: (Token, bool) -> Token
204 """
205 Returns the next token after the given one. If include_extra is True, includes non-coding
206 tokens from the tokenize module, such as NL and COMMENT.
207 """
208 i = tok.index + 1
209 if not include_extra:
210 while is_non_coding_token(self._tokens[i].type):
211 i += 1
212 return self._tokens[i]
214 def prev_token(self, tok, include_extra=False):
215 # type: (Token, bool) -> Token
216 """
217 Returns the previous token before the given one. If include_extra is True, includes non-coding
218 tokens from the tokenize module, such as NL and COMMENT.
219 """
220 i = tok.index - 1
221 if not include_extra:
222 while is_non_coding_token(self._tokens[i].type):
223 i -= 1
224 return self._tokens[i]
226 def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
227 # type: (Token, int, Optional[str], bool) -> Token
228 """
229 Looks for the first token, starting at start_token, that matches tok_type and, if given, the
230 token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
231 can check it with `token.ISEOF(t.type)`.
232 """
233 t = start_token
234 advance = self.prev_token if reverse else self.next_token
235 while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
236 t = advance(t, include_extra=True)
237 return t
239 def token_range(self,
240 first_token, # type: Token
241 last_token, # type: Token
242 include_extra=False, # type: bool
243 ):
244 # type: (...) -> Iterator[Token]
245 """
246 Yields all tokens in order from first_token through and including last_token. If
247 include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
248 """
249 for i in xrange(first_token.index, last_token.index + 1):
250 if include_extra or not is_non_coding_token(self._tokens[i].type):
251 yield self._tokens[i]
253 def get_tokens(self, node, include_extra=False):
254 # type: (AstNode, bool) -> Iterator[Token]
255 """
256 Yields all tokens making up the given node. If include_extra is True, includes non-coding
257 tokens such as tokenize.NL and .COMMENT.
258 """
259 return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
261 def get_text_positions(self, node, padded):
262 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
263 """
264 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
265 If the positions can't be determined, or the nodes don't correspond to any particular text,
266 returns ``(1, 0)`` for both.
268 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
269 This means that if ``padded`` is True, the start position will be adjusted to include
270 leading whitespace if ``node`` is a multiline statement.
271 """
272 if not hasattr(node, 'first_token'):
273 return (1, 0), (1, 0)
275 start = node.first_token.start
276 end = node.last_token.end
277 if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
278 # Set col_offset to 0 to include leading indentation for multiline statements.
279 start = (start[0], 0)
281 return start, end
284class ASTText(ASTTextBase, object):
285 """
286 Supports the same ``get_text*`` methods as ``ASTTokens``,
287 but uses the AST to determine the text positions instead of tokens.
288 This is faster than ``ASTTokens`` as it requires less setup work.
290 It also (sometimes) supports nodes inside f-strings, which ``ASTTokens`` doesn't.
292 Astroid trees are not supported at all and will raise an error.
294 Some node types and/or Python versions are not supported.
295 In these cases the ``get_text*`` methods will fall back to using ``ASTTokens``
296 which incurs the usual setup cost the first time.
297 If you want to avoid this, check ``supports_tokenless(node)`` before calling ``get_text*`` methods.
298 """
299 def __init__(self, source_text, tree=None, filename='<unknown>'):
300 # type: (Any, Optional[Module], str) -> None
301 # FIXME: Strictly, the type of source_text is one of the six string types, but hard to specify with mypy given
302 # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
304 if not isinstance(tree, (ast.AST, type(None))):
305 raise NotImplementedError('ASTText only supports AST trees')
307 super(ASTText, self).__init__(source_text, filename)
309 self._tree = tree
310 if self._tree is not None:
311 annotate_fstring_nodes(self._tree)
313 self._asttokens = None # type: Optional[ASTTokens]
315 @property
316 def tree(self):
317 # type: () -> Module
318 if self._tree is None:
319 self._tree = ast.parse(self._text, self._filename)
320 annotate_fstring_nodes(self._tree)
321 return self._tree
323 @property
324 def asttokens(self):
325 # type: () -> ASTTokens
326 if self._asttokens is None:
327 self._asttokens = ASTTokens(
328 self._text,
329 tree=self.tree,
330 filename=self._filename,
331 )
332 return self._asttokens
334 def _get_text_positions_tokenless(self, node, padded):
335 # type: (ast.AST, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
336 """
337 Version of ``get_text_positions()`` that doesn't use tokens.
338 """
339 if sys.version_info[:2] < (3, 8):
340 raise AssertionError("This method should only be called internally after checking supports_tokenless()")
342 if isinstance(node, ast.Module):
343 # Modules don't have position info, so just return the range of the whole text.
344 # The token-using method does something different, but its behavior seems weird and inconsistent.
345 # For example, in a file with only comments, it only returns the first line.
346 # It's hard to imagine a case when this matters.
347 return (1, 0), self._line_numbers.offset_to_line(len(self._text))
349 if not hasattr(node, 'lineno'):
350 return (1, 0), (1, 0)
352 assert node # tell mypy that node is not None, which we allowed up to here for compatibility
354 decorators = getattr(node, 'decorator_list', [])
355 if decorators:
356 # Function/Class definition nodes are marked by AST as starting at def/class,
357 # not the first decorator. This doesn't match the token-using behavior,
358 # or inspect.getsource(), and just seems weird.
359 start_node = decorators[0]
360 else:
361 start_node = node
363 if padded and last_stmt(node).lineno != node.lineno:
364 # Include leading indentation for multiline statements.
365 start_col_offset = 0
366 else:
367 start_col_offset = self._line_numbers.from_utf8_col(start_node.lineno, start_node.col_offset)
369 start = (start_node.lineno, start_col_offset)
371 # To match the token-using behaviour, we exclude trailing semicolons and comments.
372 # This means that for blocks containing multiple statements, we have to use the last one
373 # instead of the actual node for end_lineno and end_col_offset.
374 end_node = last_stmt(node)
375 end_lineno = cast(int, end_node.end_lineno)
376 end_col_offset = cast(int, end_node.end_col_offset)
377 end_col_offset = self._line_numbers.from_utf8_col(end_lineno, end_col_offset)
378 end = (end_lineno, end_col_offset)
380 return start, end
382 def get_text_positions(self, node, padded):
383 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
384 """
385 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
386 If the positions can't be determined, or the nodes don't correspond to any particular text,
387 returns ``(1, 0)`` for both.
389 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
390 This means that if ``padded`` is True, the start position will be adjusted to include
391 leading whitespace if ``node`` is a multiline statement.
392 """
393 if getattr(node, "_broken_positions", None):
394 # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.
395 return (1, 0), (1, 0)
397 if supports_tokenless(node):
398 return self._get_text_positions_tokenless(node, padded)
400 return self.asttokens.get_text_positions(node, padded)
403# Node types that _get_text_positions_tokenless doesn't support. Only relevant for Python 3.8+.
404_unsupported_tokenless_types = () # type: Tuple[Type[ast.AST], ...]
405if sys.version_info[:2] >= (3, 8):
406 _unsupported_tokenless_types += (
407 # no lineno
408 ast.arguments, ast.withitem,
409 )
410 if sys.version_info[:2] == (3, 8):
411 _unsupported_tokenless_types += (
412 # _get_text_positions_tokenless works incorrectly for these types due to bugs in Python 3.8.
413 ast.arg, ast.Starred,
414 # no lineno in 3.8
415 ast.Slice, ast.ExtSlice, ast.Index, ast.keyword,
416 )
419def supports_tokenless(node=None):
420 # type: (Any) -> bool
421 """
422 Returns True if the Python version and the node (if given) are supported by
423 the ``get_text*`` methods of ``ASTText`` without falling back to ``ASTTokens``.
424 See ``ASTText`` for why this matters.
426 The following cases are not supported:
428 - Python 3.7 and earlier
429 - PyPy
430 - Astroid nodes (``get_text*`` methods of ``ASTText`` will raise an error)
431 - ``ast.arguments`` and ``ast.withitem``
432 - The following nodes in Python 3.8 only:
433 - ``ast.arg``
434 - ``ast.Starred``
435 - ``ast.Slice``
436 - ``ast.ExtSlice``
437 - ``ast.Index``
438 - ``ast.keyword``
439 """
440 return (
441 isinstance(node, (ast.AST, type(None)))
442 and not isinstance(node, _unsupported_tokenless_types)
443 and sys.version_info[:2] >= (3, 8)
444 and 'pypy' not in sys.version.lower()
445 )