1# Copyright 2016 Grist Labs, Inc.
2#
3# Licensed under the Apache License, Version 2.0 (the "License");
4# you may not use this file except in compliance with the License.
5# You may obtain a copy of the License at
6#
7# http://www.apache.org/licenses/LICENSE-2.0
8#
9# Unless required by applicable law or agreed to in writing, software
10# distributed under the License is distributed on an "AS IS" BASIS,
11# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12# See the License for the specific language governing permissions and
13# limitations under the License.
14
15import abc
16import ast
17import bisect
18import sys
19import token
20from ast import Module
21from typing import Iterable, Iterator, List, Optional, Tuple, Any, cast, TYPE_CHECKING
22
23from .line_numbers import LineNumbers
24from .util import (
25 Token, match_token, is_non_coding_token, patched_generate_tokens, last_stmt,
26 annotate_fstring_nodes, generate_tokens, is_module, is_stmt
27)
28
29if TYPE_CHECKING: # pragma: no cover
30 from .util import AstNode, TokenInfo
31
32
33class ASTTextBase(metaclass=abc.ABCMeta):
34 def __init__(self, source_text: str, filename: str) -> None:
35 self._filename = filename
36
37 # Decode source after parsing to let Python 2 handle coding declarations.
38 # (If the encoding was not utf-8 compatible, then even if it parses correctly,
39 # we'll fail with a unicode error here.)
40 source_text = str(source_text)
41
42 self._text = source_text
43 self._line_numbers = LineNumbers(source_text)
44
45 @abc.abstractmethod
46 def get_text_positions(self, node, padded):
47 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
48 """
49 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
50 If the positions can't be determined, or the nodes don't correspond to any particular text,
51 returns ``(1, 0)`` for both.
52
53 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
54 This means that if ``padded`` is True, the start position will be adjusted to include
55 leading whitespace if ``node`` is a multiline statement.
56 """
57 raise NotImplementedError # pragma: no cover
58
59 def get_text_range(self, node, padded=True):
60 # type: (AstNode, bool) -> Tuple[int, int]
61 """
62 Returns the (startpos, endpos) positions in source text corresponding to the given node.
63 Returns (0, 0) for nodes (like `Load`) that don't correspond to any particular text.
64
65 See ``get_text_positions()`` for details on the ``padded`` argument.
66 """
67 start, end = self.get_text_positions(node, padded)
68 return (
69 self._line_numbers.line_to_offset(*start),
70 self._line_numbers.line_to_offset(*end),
71 )
72
73 def get_text(self, node, padded=True):
74 # type: (AstNode, bool) -> str
75 """
76 Returns the text corresponding to the given node.
77 Returns '' for nodes (like `Load`) that don't correspond to any particular text.
78
79 See ``get_text_positions()`` for details on the ``padded`` argument.
80 """
81 start, end = self.get_text_range(node, padded)
82 return self._text[start: end]
83
84
85class ASTTokens(ASTTextBase):
86 """
87 ASTTokens maintains the text of Python code in several forms: as a string, as line numbers, and
88 as tokens, and is used to mark and access token and position information.
89
90 ``source_text`` must be a unicode or UTF8-encoded string. If you pass in UTF8 bytes, remember
91 that all offsets you'll get are to the unicode text, which is available as the ``.text``
92 property.
93
94 If ``parse`` is set, the ``source_text`` will be parsed with ``ast.parse()``, and the resulting
95 tree marked with token info and made available as the ``.tree`` property.
96
97 If ``tree`` is given, it will be marked and made available as the ``.tree`` property. In
98 addition to the trees produced by the ``ast`` module, ASTTokens will also mark trees produced
99 using ``astroid`` library <https://www.astroid.org>.
100
101 If only ``source_text`` is given, you may use ``.mark_tokens(tree)`` to mark the nodes of an AST
102 tree created separately.
103 """
104
105 def __init__(self, source_text, parse=False, tree=None, filename='<unknown>', tokens=None):
106 # type: (Any, bool, Optional[Module], str, Iterable[TokenInfo]) -> None
107 super(ASTTokens, self).__init__(source_text, filename)
108
109 self._tree = ast.parse(source_text, filename) if parse else tree
110
111 # Tokenize the code.
112 if tokens is None:
113 tokens = generate_tokens(self._text)
114 self._tokens = list(self._translate_tokens(tokens))
115
116 # Extract the start positions of all tokens, so that we can quickly map positions to tokens.
117 self._token_offsets = [tok.startpos for tok in self._tokens]
118
119 if self._tree:
120 self.mark_tokens(self._tree)
121
122 def mark_tokens(self, root_node):
123 # type: (Module) -> None
124 """
125 Given the root of the AST or Astroid tree produced from source_text, visits all nodes marking
126 them with token and position information by adding ``.first_token`` and
127 ``.last_token`` attributes. This is done automatically in the constructor when ``parse`` or
128 ``tree`` arguments are set, but may be used manually with a separate AST or Astroid tree.
129 """
130 # The hard work of this class is done by MarkTokens
131 from .mark_tokens import MarkTokens # to avoid import loops
132 MarkTokens(self).visit_tree(root_node)
133
134 def _translate_tokens(self, original_tokens):
135 # type: (Iterable[TokenInfo]) -> Iterator[Token]
136 """
137 Translates the given standard library tokens into our own representation.
138 """
139 for index, tok in enumerate(patched_generate_tokens(original_tokens)):
140 tok_type, tok_str, start, end, line = tok
141 yield Token(tok_type, tok_str, start, end, line, index,
142 self._line_numbers.line_to_offset(start[0], start[1]),
143 self._line_numbers.line_to_offset(end[0], end[1]))
144
145 @property
146 def text(self):
147 # type: () -> str
148 """The source code passed into the constructor."""
149 return self._text
150
151 @property
152 def tokens(self):
153 # type: () -> List[Token]
154 """The list of tokens corresponding to the source code from the constructor."""
155 return self._tokens
156
157 @property
158 def tree(self):
159 # type: () -> Optional[Module]
160 """The root of the AST tree passed into the constructor or parsed from the source code."""
161 return self._tree
162
163 @property
164 def filename(self):
165 # type: () -> str
166 """The filename that was parsed"""
167 return self._filename
168
169 def get_token_from_offset(self, offset):
170 # type: (int) -> Token
171 """
172 Returns the token containing the given character offset (0-based position in source text),
173 or the preceeding token if the position is between tokens.
174 """
175 return self._tokens[bisect.bisect(self._token_offsets, offset) - 1]
176
177 def get_token(self, lineno, col_offset):
178 # type: (int, int) -> Token
179 """
180 Returns the token containing the given (lineno, col_offset) position, or the preceeding token
181 if the position is between tokens.
182 """
183 # TODO: add test for multibyte unicode. We need to translate offsets from ast module (which
184 # are in utf8) to offsets into the unicode text. tokenize module seems to use unicode offsets
185 # but isn't explicit.
186 return self.get_token_from_offset(self._line_numbers.line_to_offset(lineno, col_offset))
187
188 def get_token_from_utf8(self, lineno, col_offset):
189 # type: (int, int) -> Token
190 """
191 Same as get_token(), but interprets col_offset as a UTF8 offset, which is what `ast` uses.
192 """
193 return self.get_token(lineno, self._line_numbers.from_utf8_col(lineno, col_offset))
194
195 def next_token(self, tok, include_extra=False):
196 # type: (Token, bool) -> Token
197 """
198 Returns the next token after the given one. If include_extra is True, includes non-coding
199 tokens from the tokenize module, such as NL and COMMENT.
200 """
201 i = tok.index + 1
202 if not include_extra:
203 while is_non_coding_token(self._tokens[i].type):
204 i += 1
205 return self._tokens[i]
206
207 def prev_token(self, tok, include_extra=False):
208 # type: (Token, bool) -> Token
209 """
210 Returns the previous token before the given one. If include_extra is True, includes non-coding
211 tokens from the tokenize module, such as NL and COMMENT.
212 """
213 i = tok.index - 1
214 if not include_extra:
215 while is_non_coding_token(self._tokens[i].type):
216 i -= 1
217 return self._tokens[i]
218
219 def find_token(self, start_token, tok_type, tok_str=None, reverse=False):
220 # type: (Token, int, Optional[str], bool) -> Token
221 """
222 Looks for the first token, starting at start_token, that matches tok_type and, if given, the
223 token string. Searches backwards if reverse is True. Returns ENDMARKER token if not found (you
224 can check it with `token.ISEOF(t.type)`).
225 """
226 t = start_token
227 advance = self.prev_token if reverse else self.next_token
228 while not match_token(t, tok_type, tok_str) and not token.ISEOF(t.type):
229 t = advance(t, include_extra=True)
230 return t
231
232 def token_range(self,
233 first_token, # type: Token
234 last_token, # type: Token
235 include_extra=False, # type: bool
236 ):
237 # type: (...) -> Iterator[Token]
238 """
239 Yields all tokens in order from first_token through and including last_token. If
240 include_extra is True, includes non-coding tokens such as tokenize.NL and .COMMENT.
241 """
242 for i in range(first_token.index, last_token.index + 1):
243 if include_extra or not is_non_coding_token(self._tokens[i].type):
244 yield self._tokens[i]
245
246 def get_tokens(self, node, include_extra=False):
247 # type: (AstNode, bool) -> Iterator[Token]
248 """
249 Yields all tokens making up the given node. If include_extra is True, includes non-coding
250 tokens such as tokenize.NL and .COMMENT.
251 """
252 return self.token_range(node.first_token, node.last_token, include_extra=include_extra)
253
254 def get_text_positions(self, node, padded):
255 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
256 """
257 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
258 If the positions can't be determined, or the nodes don't correspond to any particular text,
259 returns ``(1, 0)`` for both.
260
261 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
262 This means that if ``padded`` is True, the start position will be adjusted to include
263 leading whitespace if ``node`` is a multiline statement.
264 """
265 if not hasattr(node, 'first_token'):
266 return (1, 0), (1, 0)
267
268 start = node.first_token.start
269 end = node.last_token.end
270 if padded and any(match_token(t, token.NEWLINE) for t in self.get_tokens(node)):
271 # Set col_offset to 0 to include leading indentation for multiline statements.
272 start = (start[0], 0)
273
274 return start, end
275
276
277class ASTText(ASTTextBase):
278 """
279 Supports the same ``get_text*`` methods as ``ASTTokens``,
280 but uses the AST to determine the text positions instead of tokens.
281 This is faster than ``ASTTokens`` as it requires less setup work.
282
283 It also (sometimes) supports nodes inside f-strings, which ``ASTTokens`` doesn't.
284
285 Some node types and/or Python versions are not supported.
286 In these cases the ``get_text*`` methods will fall back to using ``ASTTokens``
287 which incurs the usual setup cost the first time.
288 If you want to avoid this, check ``supports_tokenless(node)`` before calling ``get_text*`` methods.
289 """
290 def __init__(self, source_text, tree=None, filename='<unknown>'):
291 # type: (Any, Optional[Module], str) -> None
292 super(ASTText, self).__init__(source_text, filename)
293
294 self._tree = tree
295 if self._tree is not None:
296 annotate_fstring_nodes(self._tree)
297
298 self._asttokens = None # type: Optional[ASTTokens]
299
300 @property
301 def tree(self):
302 # type: () -> Module
303 if self._tree is None:
304 self._tree = ast.parse(self._text, self._filename)
305 annotate_fstring_nodes(self._tree)
306 return self._tree
307
308 @property
309 def asttokens(self):
310 # type: () -> ASTTokens
311 if self._asttokens is None:
312 self._asttokens = ASTTokens(
313 self._text,
314 tree=self.tree,
315 filename=self._filename,
316 )
317 return self._asttokens
318
319 def _get_text_positions_tokenless(self, node, padded):
320 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
321 """
322 Version of ``get_text_positions()`` that doesn't use tokens.
323 """
324 if is_module(node):
325 # Modules don't have position info, so just return the range of the whole text.
326 # The token-using method does something different, but its behavior seems weird and inconsistent.
327 # For example, in a file with only comments, it only returns the first line.
328 # It's hard to imagine a case when this matters.
329 return (1, 0), self._line_numbers.offset_to_line(len(self._text))
330
331 if getattr(node, 'lineno', None) is None:
332 return (1, 0), (1, 0)
333
334 assert node # tell mypy that node is not None, which we allowed up to here for compatibility
335
336 decorators = getattr(node, 'decorator_list', [])
337 if not decorators:
338 # Astroid uses node.decorators.nodes instead of node.decorator_list.
339 decorators_node = getattr(node, 'decorators', None)
340 decorators = getattr(decorators_node, 'nodes', [])
341 if decorators:
342 # Function/Class definition nodes are marked by AST as starting at def/class,
343 # not the first decorator. This doesn't match the token-using behavior,
344 # or inspect.getsource(), and just seems weird.
345 start_node = decorators[0]
346 else:
347 start_node = node
348
349 start_lineno = start_node.lineno
350 end_node = last_stmt(node)
351
352 # Include leading indentation for multiline statements.
353 # This doesn't mean simple statements that happen to be on multiple lines,
354 # but compound statements where inner indentation matters.
355 # So we don't just compare node.lineno and node.end_lineno,
356 # we check for a contained statement starting on a different line.
357 if padded and (
358 start_lineno != end_node.lineno
359 or (
360 # Astroid docstrings aren't treated as separate statements.
361 # So to handle function/class definitions with a docstring but no other body,
362 # we just check that the node is a statement with a docstring
363 # and spanning multiple lines in the simple, literal sense.
364 start_lineno != node.end_lineno
365 and getattr(node, "doc_node", None)
366 and is_stmt(node)
367 )
368 ):
369 start_col_offset = 0
370 else:
371 start_col_offset = self._line_numbers.from_utf8_col(start_lineno, start_node.col_offset)
372
373 start = (start_lineno, start_col_offset)
374
375 # To match the token-using behaviour, we exclude trailing semicolons and comments.
376 # This means that for blocks containing multiple statements, we have to use the last one
377 # instead of the actual node for end_lineno and end_col_offset.
378 end_lineno = cast(int, end_node.end_lineno)
379 end_col_offset = cast(int, end_node.end_col_offset)
380 end_col_offset = self._line_numbers.from_utf8_col(end_lineno, end_col_offset)
381 end = (end_lineno, end_col_offset)
382
383 return start, end
384
385 def get_text_positions(self, node, padded):
386 # type: (AstNode, bool) -> Tuple[Tuple[int, int], Tuple[int, int]]
387 """
388 Returns two ``(lineno, col_offset)`` tuples for the start and end of the given node.
389 If the positions can't be determined, or the nodes don't correspond to any particular text,
390 returns ``(1, 0)`` for both.
391
392 ``padded`` corresponds to the ``padded`` argument to ``ast.get_source_segment()``.
393 This means that if ``padded`` is True, the start position will be adjusted to include
394 leading whitespace if ``node`` is a multiline statement.
395 """
396 if getattr(node, "_broken_positions", None):
397 # This node was marked in util.annotate_fstring_nodes as having untrustworthy lineno/col_offset.
398 return (1, 0), (1, 0)
399
400 if supports_tokenless(node):
401 return self._get_text_positions_tokenless(node, padded)
402
403 return self.asttokens.get_text_positions(node, padded)
404
405
406# Node types that _get_text_positions_tokenless doesn't support.
407# These initial values are missing lineno.
408_unsupported_tokenless_types = ("arguments", "Arguments", "withitem") # type: Tuple[str, ...]
409if sys.version_info[:2] == (3, 8):
410 # _get_text_positions_tokenless works incorrectly for these types due to bugs in Python 3.8.
411 _unsupported_tokenless_types += ("arg", "Starred")
412 # no lineno in 3.8
413 _unsupported_tokenless_types += ("Slice", "ExtSlice", "Index", "keyword")
414
415
416def supports_tokenless(node=None):
417 # type: (Any) -> bool
418 """
419 Returns True if the Python version and the node (if given) are supported by
420 the ``get_text*`` methods of ``ASTText`` without falling back to ``ASTTokens``.
421 See ``ASTText`` for why this matters.
422
423 The following cases are not supported:
424
425 - PyPy
426 - ``ast.arguments`` / ``astroid.Arguments``
427 - ``ast.withitem``
428 - ``astroid.Comprehension``
429 - ``astroid.AssignName`` inside ``astroid.Arguments`` or ``astroid.ExceptHandler``
430 - The following nodes in Python 3.8 only:
431 - ``ast.arg``
432 - ``ast.Starred``
433 - ``ast.Slice``
434 - ``ast.ExtSlice``
435 - ``ast.Index``
436 - ``ast.keyword``
437 """
438 return (
439 type(node).__name__ not in _unsupported_tokenless_types
440 and not (
441 # astroid nodes
442 not isinstance(node, ast.AST) and node is not None and (
443 (
444 type(node).__name__ == "AssignName"
445 and type(node.parent).__name__ in ("Arguments", "ExceptHandler")
446 )
447 )
448 )
449 and 'pypy' not in sys.version.lower()
450 )