Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/asttokens/asttokens.py: 36%
148 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-20 06:09 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-20 06:09 +0000
1# Copyright 2016 Grist Labs, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
15import abc
16import ast
17import bisect
18import sys
19import token
20from ast import Module
21from typing import Iterable, Iterator, List, Optional, Tuple, Any, cast, TYPE_CHECKING
23import six
24from six.moves import xrange # pylint: disable=redefined-builtin
26from .line_numbers import LineNumbers
27from .util import (
28 Token, match_token, is_non_coding_token, patched_generate_tokens, last_stmt,
29 annotate_fstring_nodes, generate_tokens, is_module, is_stmt
30)
32if TYPE_CHECKING: # pragma: no cover
33 from .util import AstNode, TokenInfo
36class ASTTextBase(six.with_metaclass(abc.ABCMeta, object)):
37 def __init__(self, source_text, filename):
38 # type: (Any, str) -> None
39 # FIXME: Strictly, the type of source_text is one of the six string types, but hard to specify with mypy given
40 # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
42 self._filename = filename
44 # Decode source after parsing to let Python 2 handle coding declarations.
45 # (If the encoding was not utf-8 compatible, then even if it parses correctly,
46 # we'll fail with a unicode error here.)
47 source_text = six.ensure_text(source_text)
49 self._text = source_text
50 self._line_numbers = LineNumbers(source_text)
52 @abc.abstractmethod
53 def get_text_positions(self, node, padded):
54 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
55 """
56 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
57 If the positions can't be determined, or the nodes don't correspond to any particular text,
58 returns ``(1, 0)`` for both.
60 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
61 This means that if ``padded`` is True, the start position will be adjusted to include
62 leading whitespace if ``node`` is a multiline statement.
63 """
64 raise NotImplementedError # pragma: no cover
66 def get_text_range(self, node, padded=True):
67 # type: (AstNode, bool) -> Tuple[int, int]
68 """
69 Returns the (startpos, endpos) positions in source text corresponding to the given node.
70 Returns (0, 0) for nodes (like `Load`) that don't correspond to any particular text.
72 See ``get_text_positions()`` for details on the ``padded`` argument.
73 """
74 start, end = self.get_text_positions(node, padded)
75 return (
76 self._line_numbers.line_to_offset(*start),
77 self._line_numbers.line_to_offset(*end),
78 )
80 def get_text(self, node, padded=True):
81 # type: (AstNode, bool) -> str
82 """
83 Returns the text corresponding to the given node.
84 Returns '' for nodes (like `Load`) that don't correspond to any particular text.
86 See ``get_text_positions()`` for details on the ``padded`` argument.
87 """
88 start, end = self.get_text_range(node, padded)
89 return self._text[start: end]
92class ASTTokens(ASTTextBase, object):
93 """
94 ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
95 as tokens, and is used to mark and access token and position information.
97 ``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
98 that all offsets you'll get are to the unicode text, which is available as the ``.text``
99 property.
101 If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
102 tree marked with token info and made available as the ``.tree`` property.
104 If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
105 addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
106 using ``astroid`` library <https://www.astroid.org>.
108 If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
109 tree created separately.
110 """
112 def __init__(self, source_text, parse=False, tree=None, filename='<unknown>', tokens=None):
113 # type: (Any, bool, Optional[Module], str, Iterable[TokenInfo]) -> None
114 # FIXME: Strictly, the type of source_text is one of the six string types, but hard to specify with mypy given
115 # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
117 super(ASTTokens, self).__init__(source_text, filename)
119 self._tree = ast.parse(source_text, filename) if parse else tree
121 # Tokenize the code.
122 if tokens is None:
123 tokens = generate_tokens(self._text)
124 self._tokens = list(self._translate_tokens(tokens))
126 # Extract the start positions of all tokens, so that we can quickly map positions to tokens.
127 self._token_offsets = [tok.startpos for tok in self._tokens]
129 if self._tree:
130 self.mark_tokens(self._tree)
132 def mark_tokens(self, root_node):
133 # type: (Module) -> None
134 """
135 Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
136 them with token and position information by adding ``.first_token`` and
137 ``.last_token``attributes. This is done automatically in the constructor when ``parse`` or
138 ``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
139 """
140 # The hard work of this class is done by MarkTokens
141 from .mark_tokens import MarkTokens # to avoid import loops
142 MarkTokens(self).visit_tree(root_node)
144 def _translate_tokens(self, original_tokens):
145 # type: (Iterable[TokenInfo]) -> Iterator[Token]
146 """
147 Translates the given standard library tokens into our own representation.
148 """
149 for index, tok in enumerate(patched_generate_tokens(original_tokens)):
150 tok_type, tok_str, start, end, line = tok
151 yield Token(tok_type, tok_str, start, end, line, index,
152 self._line_numbers.line_to_offset(start[0], start[1]),
153 self._line_numbers.line_to_offset(end[0], end[1]))
155 @property
156 def text(self):
157 # type: () -> str
158 """The source code passed into the constructor."""
159 return self._text
161 @property
162 def tokens(self):
163 # type: () -> List[Token]
164 """The list of tokens corresponding to the source code from the constructor."""
165 return self._tokens
167 @property
168 def tree(self):
169 # type: () -> Optional[Module]
170 """The root of the AST tree passed into the constructor or parsed from the source code."""
171 return self._tree
173 @property
174 def filename(self):
175 # type: () -> str
176 """The filename that was parsed"""
177 return self._filename
179 def get_token_from_offset(self, offset):
180 # type: (int) -> Token
181 """
182 Returns the token containing the given character offset (0-based position in source text),
183 or the preceeding token if the position is between tokens.
184 """
185 return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
187 def get_token(self, lineno, col_offset):
188 # type: (int, int) -> Token
189 """
190 Returns the token containing the given (lineno, col_offset) position, or the preceeding token
191 if the position is between tokens.
192 """
193 # TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
194 # are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
195 # but isn't explicit.
196 return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
198 def get_token_from_utf8(self, lineno, col_offset):
199 # type: (int, int) -> Token
200 """
201 Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
202 """
203 return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
205 def next_token(self, tok, include_extra=False):
206 # type: (Token, bool) -> Token
207 """
208 Returns the next token after the given one. If include_extra is True, includes non-coding
209 tokens from the tokenize module, such as NL and COMMENT.
210 """
211 i = tok.index + 1
212 if not include_extra:
213 while is_non_coding_token(self._tokens[i].type):
214 i += 1
215 return self._tokens[i]
217 def prev_token(self, tok, include_extra=False):
218 # type: (Token, bool) -> Token
219 """
220 Returns the previous token before the given one. If include_extra is True, includes non-coding
221 tokens from the tokenize module, such as NL and COMMENT.
222 """
223 i = tok.index - 1
224 if not include_extra:
225 while is_non_coding_token(self._tokens[i].type):
226 i -= 1
227 return self._tokens[i]
229 def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
230 # type: (Token, int, Optional[str], bool) -> Token
231 """
232 Looks for the first token, starting at start_token, that matches tok_type and, if given, the
233 token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
234 can check it with `token.ISEOF(t.type)`).
235 """
236 t = start_token
237 advance = self.prev_token if reverse else self.next_token
238 while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
239 t = advance(t, include_extra=True)
240 return t
242 def token_range(self,
243 first_token, # type: Token
244 last_token, # type: Token
245 include_extra=False, # type: bool
246 ):
247 # type: (...) -> Iterator[Token]
248 """
249 Yields all tokens in order from first_token through and including last_token. If
250 include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
251 """
252 for i in xrange(first_token.index, last_token.index + 1):
253 if include_extra or not is_non_coding_token(self._tokens[i].type):
254 yield self._tokens[i]
256 def get_tokens(self, node, include_extra=False):
257 # type: (AstNode, bool) -> Iterator[Token]
258 """
259 Yields all tokens making up the given node. If include_extra is True, includes non-coding
260 tokens such as tokenize.NL and .COMMENT.
261 """
262 return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
264 def get_text_positions(self, node, padded):
265 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
266 """
267 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
268 If the positions can't be determined, or the nodes don't correspond to any particular text,
269 returns ``(1, 0)`` for both.
271 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
272 This means that if ``padded`` is True, the start position will be adjusted to include
273 leading whitespace if ``node`` is a multiline statement.
274 """
275 if not hasattr(node, 'first_token'):
276 return (1, 0), (1, 0)
278 start = node.first_token.start
279 end = node.last_token.end
280 if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
281 # Set col_offset to 0 to include leading indentation for multiline statements.
282 start = (start[0], 0)
284 return start, end
287class ASTText(ASTTextBase, object):
288 """
289 Supports the same ``get_text*`` methods as ``ASTTokens``,
290 but uses the AST to determine the text positions instead of tokens.
291 This is faster than ``ASTTokens`` as it requires less setup work.
293 It also (sometimes) supports nodes inside f-strings, which ``ASTTokens`` doesn't.
295 Some node types and/or Python versions are not supported.
296 In these cases the ``get_text*`` methods will fall back to using ``ASTTokens``
297 which incurs the usual setup cost the first time.
298 If you want to avoid this, check ``supports_tokenless(node)`` before calling ``get_text*`` methods.
299 """
300 def __init__(self, source_text, tree=None, filename='<unknown>'):
301 # type: (Any, Optional[Module], str) -> None
302 # FIXME: Strictly, the type of source_text is one of the six string types, but hard to specify with mypy given
303 # https://mypy.readthedocs.io/en/stable/common_issues.html#variables-vs-type-aliases
305 super(ASTText, self).__init__(source_text, filename)
307 self._tree = tree
308 if self._tree is not None:
309 annotate_fstring_nodes(self._tree)
311 self._asttokens = None # type: Optional[ASTTokens]
313 @property
314 def tree(self):
315 # type: () -> Module
316 if self._tree is None:
317 self._tree = ast.parse(self._text, self._filename)
318 annotate_fstring_nodes(self._tree)
319 return self._tree
321 @property
322 def asttokens(self):
323 # type: () -> ASTTokens
324 if self._asttokens is None:
325 self._asttokens = ASTTokens(
326 self._text,
327 tree=self.tree,
328 filename=self._filename,
329 )
330 return self._asttokens
332 def _get_text_positions_tokenless(self, node, padded):
333 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
334 """
335 Version of ``get_text_positions()`` that doesn't use tokens.
336 """
337 if sys.version_info[:2] < (3, 8): # pragma: no cover
338 # This is just for mpypy
339 raise AssertionError("This method should only be called internally after checking supports_tokenless()")
341 if is_module(node):
342 # Modules don't have position info, so just return the range of the whole text.
343 # The token-using method does something different, but its behavior seems weird and inconsistent.
344 # For example, in a file with only comments, it only returns the first line.
345 # It's hard to imagine a case when this matters.
346 return (1, 0), self._line_numbers.offset_to_line(len(self._text))
348 if getattr(node, 'lineno', None) is None:
349 return (1, 0), (1, 0)
351 assert node # tell mypy that node is not None, which we allowed up to here for compatibility
353 decorators = getattr(node, 'decorator_list', [])
354 if not decorators:
355 # Astroid uses node.decorators.nodes instead of node.decorator_list.
356 decorators_node = getattr(node, 'decorators', None)
357 decorators = getattr(decorators_node, 'nodes', [])
358 if decorators:
359 # Function/Class definition nodes are marked by AST as starting at def/class,
360 # not the first decorator. This doesn't match the token-using behavior,
361 # or inspect.getsource(), and just seems weird.
362 start_node = decorators[0]
363 else:
364 start_node = node
366 start_lineno = start_node.lineno
367 end_node = last_stmt(node)
369 # Include leading indentation for multiline statements.
370 # This doesn't mean simple statements that happen to be on multiple lines,
371 # but compound statements where inner indentation matters.
372 # So we don't just compare node.lineno and node.end_lineno,
373 # we check for a contained statement starting on a different line.
374 if padded and (
375 start_lineno != end_node.lineno
376 or (
377 # Astroid docstrings aren't treated as separate statements.
378 # So to handle function/class definitions with a docstring but no other body,
379 # we just check that the node is a statement with a docstring
380 # and spanning multiple lines in the simple, literal sense.
381 start_lineno != node.end_lineno
382 and getattr(node, "doc_node", None)
383 and is_stmt(node)
384 )
385 ):
386 start_col_offset = 0
387 else:
388 start_col_offset = self._line_numbers.from_utf8_col(start_lineno, start_node.col_offset)
390 start = (start_lineno, start_col_offset)
392 # To match the token-using behaviour, we exclude trailing semicolons and comments.
393 # This means that for blocks containing multiple statements, we have to use the last one
394 # instead of the actual node for end_lineno and end_col_offset.
395 end_lineno = cast(int, end_node.end_lineno)
396 end_col_offset = cast(int, end_node.end_col_offset)
397 end_col_offset = self._line_numbers.from_utf8_col(end_lineno, end_col_offset)
398 end = (end_lineno, end_col_offset)
400 return start, end
402 def get_text_positions(self, node, padded):
403 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
404 """
405 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
406 If the positions can't be determined, or the nodes don't correspond to any particular text,
407 returns ``(1, 0)`` for both.
409 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
410 This means that if ``padded`` is True, the start position will be adjusted to include
411 leading whitespace if ``node`` is a multiline statement.
412 """
413 if getattr(node, "_broken_positions", None):
414 # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.
415 return (1, 0), (1, 0)
417 if supports_tokenless(node):
418 return self._get_text_positions_tokenless(node, padded)
420 return self.asttokens.get_text_positions(node, padded)
423# Node types that _get_text_positions_tokenless doesn't support. Only relevant for Python 3.8+.
424_unsupported_tokenless_types = () # type: Tuple[str, ...]
425if sys.version_info[:2] >= (3, 8):
426 # no lineno
427 _unsupported_tokenless_types += ("arguments", "Arguments", "withitem")
428 if sys.version_info[:2] == (3, 8):
429 # _get_text_positions_tokenless works incorrectly for these types due to bugs in Python 3.8.
430 _unsupported_tokenless_types += ("arg", "Starred")
431 # no lineno in 3.8
432 _unsupported_tokenless_types += ("Slice", "ExtSlice", "Index", "keyword")
435def supports_tokenless(node=None):
436 # type: (Any) -> bool
437 """
438 Returns True if the Python version and the node (if given) are supported by
439 the ``get_text*`` methods of ``ASTText`` without falling back to ``ASTTokens``.
440 See ``ASTText`` for why this matters.
442 The following cases are not supported:
444 - Python 3.7 and earlier
445 - PyPy
446 - ``ast.arguments`` / ``astroid.Arguments``
447 - ``ast.withitem``
448 - ``astroid.Comprehension``
449 - ``astroid.AssignName`` inside ``astroid.Arguments`` or ``astroid.ExceptHandler``
450 - The following nodes in Python 3.8 only:
451 - ``ast.arg``
452 - ``ast.Starred``
453 - ``ast.Slice``
454 - ``ast.ExtSlice``
455 - ``ast.Index``
456 - ``ast.keyword``
457 """
458 return (
459 type(node).__name__ not in _unsupported_tokenless_types
460 and not (
461 # astroid nodes
462 not isinstance(node, ast.AST) and node is not None and (
463 (
464 type(node).__name__ == "AssignName"
465 and type(node.parent).__name__ in ("Arguments", "ExceptHandler")
466 )
467 )
468 )
469 and sys.version_info[:2] >= (3, 8)
470 and 'pypy' not in sys.version.lower()
471 )