Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/IPython/core/inputtransformer2.py: 23%
397 statements
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-25 06:05 +0000
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-25 06:05 +0000
1"""Input transformer machinery to support IPython special syntax.
3This includes the machinery to recognise and transform ``%magic`` commands,
4``!system`` commands, ``help?`` querying, prompt stripping, and so forth.
6Added: IPython 7.0. Replaces inputsplitter and inputtransformer which were
7deprecated in 7.0.
8"""
10# Copyright (c) IPython Development Team.
11# Distributed under the terms of the Modified BSD License.
13import ast
14from codeop import CommandCompiler, Compile
15import re
16import sys
17import tokenize
18from typing import List, Tuple, Optional, Any
19import warnings
21from IPython.utils import tokenutil
23_indent_re = re.compile(r'^[ \t]+')
25def leading_empty_lines(lines):
26 """Remove leading empty lines
28 If the leading lines are empty or contain only whitespace, they will be
29 removed.
30 """
31 if not lines:
32 return lines
33 for i, line in enumerate(lines):
34 if line and not line.isspace():
35 return lines[i:]
36 return lines
38def leading_indent(lines):
39 """Remove leading indentation.
41 If the first line starts with a spaces or tabs, the same whitespace will be
42 removed from each following line in the cell.
43 """
44 if not lines:
45 return lines
46 m = _indent_re.match(lines[0])
47 if not m:
48 return lines
49 space = m.group(0)
50 n = len(space)
51 return [l[n:] if l.startswith(space) else l
52 for l in lines]
54class PromptStripper:
55 """Remove matching input prompts from a block of input.
57 Parameters
58 ----------
59 prompt_re : regular expression
60 A regular expression matching any input prompt (including continuation,
61 e.g. ``...``)
62 initial_re : regular expression, optional
63 A regular expression matching only the initial prompt, but not continuation.
64 If no initial expression is given, prompt_re will be used everywhere.
65 Used mainly for plain Python prompts (``>>>``), where the continuation prompt
66 ``...`` is a valid Python expression in Python 3, so shouldn't be stripped.
68 Notes
69 -----
71 If initial_re and prompt_re differ,
72 only initial_re will be tested against the first line.
73 If any prompt is found on the first two lines,
74 prompts will be stripped from the rest of the block.
75 """
76 def __init__(self, prompt_re, initial_re=None):
77 self.prompt_re = prompt_re
78 self.initial_re = initial_re or prompt_re
80 def _strip(self, lines):
81 return [self.prompt_re.sub('', l, count=1) for l in lines]
83 def __call__(self, lines):
84 if not lines:
85 return lines
86 if self.initial_re.match(lines[0]) or \
87 (len(lines) > 1 and self.prompt_re.match(lines[1])):
88 return self._strip(lines)
89 return lines
91classic_prompt = PromptStripper(
92 prompt_re=re.compile(r'^(>>>|\.\.\.)( |$)'),
93 initial_re=re.compile(r'^>>>( |$)')
94)
96ipython_prompt = PromptStripper(
97 re.compile(
98 r"""
99 ^( # Match from the beginning of a line, either:
101 # 1. First-line prompt:
102 ((\[nav\]|\[ins\])?\ )? # Vi editing mode prompt, if it's there
103 In\ # The 'In' of the prompt, with a space
104 \[\d+\]: # Command index, as displayed in the prompt
105 \ # With a mandatory trailing space
107 | # ... or ...
109 # 2. The three dots of the multiline prompt
110 \s* # All leading whitespace characters
111 \.{3,}: # The three (or more) dots
112 \ ? # With an optional trailing space
114 )
115 """,
116 re.VERBOSE,
117 )
118)
121def cell_magic(lines):
122 if not lines or not lines[0].startswith('%%'):
123 return lines
124 if re.match(r'%%\w+\?', lines[0]):
125 # This case will be handled by help_end
126 return lines
127 magic_name, _, first_line = lines[0][2:].rstrip().partition(' ')
128 body = ''.join(lines[1:])
129 return ['get_ipython().run_cell_magic(%r, %r, %r)\n'
130 % (magic_name, first_line, body)]
133def _find_assign_op(token_line) -> Optional[int]:
134 """Get the index of the first assignment in the line ('=' not inside brackets)
136 Note: We don't try to support multiple special assignment (a = b = %foo)
137 """
138 paren_level = 0
139 for i, ti in enumerate(token_line):
140 s = ti.string
141 if s == '=' and paren_level == 0:
142 return i
143 if s in {'(','[','{'}:
144 paren_level += 1
145 elif s in {')', ']', '}'}:
146 if paren_level > 0:
147 paren_level -= 1
148 return None
150def find_end_of_continued_line(lines, start_line: int):
151 """Find the last line of a line explicitly extended using backslashes.
153 Uses 0-indexed line numbers.
154 """
155 end_line = start_line
156 while lines[end_line].endswith('\\\n'):
157 end_line += 1
158 if end_line >= len(lines):
159 break
160 return end_line
162def assemble_continued_line(lines, start: Tuple[int, int], end_line: int):
163 r"""Assemble a single line from multiple continued line pieces
165 Continued lines are lines ending in ``\``, and the line following the last
166 ``\`` in the block.
168 For example, this code continues over multiple lines::
170 if (assign_ix is not None) \
171 and (len(line) >= assign_ix + 2) \
172 and (line[assign_ix+1].string == '%') \
173 and (line[assign_ix+2].type == tokenize.NAME):
175 This statement contains four continued line pieces.
176 Assembling these pieces into a single line would give::
178 if (assign_ix is not None) and (len(line) >= assign_ix + 2) and (line[...
180 This uses 0-indexed line numbers. *start* is (lineno, colno).
182 Used to allow ``%magic`` and ``!system`` commands to be continued over
183 multiple lines.
184 """
185 parts = [lines[start[0]][start[1]:]] + lines[start[0]+1:end_line+1]
186 return ' '.join([p.rstrip()[:-1] for p in parts[:-1]] # Strip backslash+newline
187 + [parts[-1].rstrip()]) # Strip newline from last line
189class TokenTransformBase:
190 """Base class for transformations which examine tokens.
192 Special syntax should not be transformed when it occurs inside strings or
193 comments. This is hard to reliably avoid with regexes. The solution is to
194 tokenise the code as Python, and recognise the special syntax in the tokens.
196 IPython's special syntax is not valid Python syntax, so tokenising may go
197 wrong after the special syntax starts. These classes therefore find and
198 transform *one* instance of special syntax at a time into regular Python
199 syntax. After each transformation, tokens are regenerated to find the next
200 piece of special syntax.
202 Subclasses need to implement one class method (find)
203 and one regular method (transform).
205 The priority attribute can select which transformation to apply if multiple
206 transformers match in the same place. Lower numbers have higher priority.
207 This allows "%magic?" to be turned into a help call rather than a magic call.
208 """
209 # Lower numbers -> higher priority (for matches in the same location)
210 priority = 10
212 def sortby(self):
213 return self.start_line, self.start_col, self.priority
215 def __init__(self, start):
216 self.start_line = start[0] - 1 # Shift from 1-index to 0-index
217 self.start_col = start[1]
219 @classmethod
220 def find(cls, tokens_by_line):
221 """Find one instance of special syntax in the provided tokens.
223 Tokens are grouped into logical lines for convenience,
224 so it is easy to e.g. look at the first token of each line.
225 *tokens_by_line* is a list of lists of tokenize.TokenInfo objects.
227 This should return an instance of its class, pointing to the start
228 position it has found, or None if it found no match.
229 """
230 raise NotImplementedError
232 def transform(self, lines: List[str]):
233 """Transform one instance of special syntax found by ``find()``
235 Takes a list of strings representing physical lines,
236 returns a similar list of transformed lines.
237 """
238 raise NotImplementedError
240class MagicAssign(TokenTransformBase):
241 """Transformer for assignments from magics (a = %foo)"""
242 @classmethod
243 def find(cls, tokens_by_line):
244 """Find the first magic assignment (a = %foo) in the cell.
245 """
246 for line in tokens_by_line:
247 assign_ix = _find_assign_op(line)
248 if (assign_ix is not None) \
249 and (len(line) >= assign_ix + 2) \
250 and (line[assign_ix+1].string == '%') \
251 and (line[assign_ix+2].type == tokenize.NAME):
252 return cls(line[assign_ix+1].start)
254 def transform(self, lines: List[str]):
255 """Transform a magic assignment found by the ``find()`` classmethod.
256 """
257 start_line, start_col = self.start_line, self.start_col
258 lhs = lines[start_line][:start_col]
259 end_line = find_end_of_continued_line(lines, start_line)
260 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
261 assert rhs.startswith('%'), rhs
262 magic_name, _, args = rhs[1:].partition(' ')
264 lines_before = lines[:start_line]
265 call = "get_ipython().run_line_magic({!r}, {!r})".format(magic_name, args)
266 new_line = lhs + call + '\n'
267 lines_after = lines[end_line+1:]
269 return lines_before + [new_line] + lines_after
272class SystemAssign(TokenTransformBase):
273 """Transformer for assignments from system commands (a = !foo)"""
274 @classmethod
275 def find_pre_312(cls, tokens_by_line):
276 for line in tokens_by_line:
277 assign_ix = _find_assign_op(line)
278 if (assign_ix is not None) \
279 and not line[assign_ix].line.strip().startswith('=') \
280 and (len(line) >= assign_ix + 2) \
281 and (line[assign_ix + 1].type == tokenize.ERRORTOKEN):
282 ix = assign_ix + 1
284 while ix < len(line) and line[ix].type == tokenize.ERRORTOKEN:
285 if line[ix].string == '!':
286 return cls(line[ix].start)
287 elif not line[ix].string.isspace():
288 break
289 ix += 1
291 @classmethod
292 def find_post_312(cls, tokens_by_line):
293 for line in tokens_by_line:
294 assign_ix = _find_assign_op(line)
295 if (
296 (assign_ix is not None)
297 and not line[assign_ix].line.strip().startswith("=")
298 and (len(line) >= assign_ix + 2)
299 and (line[assign_ix + 1].type == tokenize.OP)
300 and (line[assign_ix + 1].string == "!")
301 ):
302 return cls(line[assign_ix + 1].start)
304 @classmethod
305 def find(cls, tokens_by_line):
306 """Find the first system assignment (a = !foo) in the cell."""
307 if sys.version_info < (3, 12):
308 return cls.find_pre_312(tokens_by_line)
309 return cls.find_post_312(tokens_by_line)
311 def transform(self, lines: List[str]):
312 """Transform a system assignment found by the ``find()`` classmethod.
313 """
314 start_line, start_col = self.start_line, self.start_col
316 lhs = lines[start_line][:start_col]
317 end_line = find_end_of_continued_line(lines, start_line)
318 rhs = assemble_continued_line(lines, (start_line, start_col), end_line)
319 assert rhs.startswith('!'), rhs
320 cmd = rhs[1:]
322 lines_before = lines[:start_line]
323 call = "get_ipython().getoutput({!r})".format(cmd)
324 new_line = lhs + call + '\n'
325 lines_after = lines[end_line + 1:]
327 return lines_before + [new_line] + lines_after
329# The escape sequences that define the syntax transformations IPython will
330# apply to user input. These can NOT be just changed here: many regular
331# expressions and other parts of the code may use their hardcoded values, and
332# for all intents and purposes they constitute the 'IPython syntax', so they
333# should be considered fixed.
335ESC_SHELL = '!' # Send line to underlying system shell
336ESC_SH_CAP = '!!' # Send line to system shell and capture output
337ESC_HELP = '?' # Find information about object
338ESC_HELP2 = '??' # Find extra-detailed information about object
339ESC_MAGIC = '%' # Call magic function
340ESC_MAGIC2 = '%%' # Call cell-magic function
341ESC_QUOTE = ',' # Split args on whitespace, quote each as string and call
342ESC_QUOTE2 = ';' # Quote all args as a single string, call
343ESC_PAREN = '/' # Call first argument with rest of line as arguments
345ESCAPE_SINGLES = {'!', '?', '%', ',', ';', '/'}
346ESCAPE_DOUBLES = {'!!', '??'} # %% (cell magic) is handled separately
348def _make_help_call(target, esc):
349 """Prepares a pinfo(2)/psearch call from a target name and the escape
350 (i.e. ? or ??)"""
351 method = 'pinfo2' if esc == '??' \
352 else 'psearch' if '*' in target \
353 else 'pinfo'
354 arg = " ".join([method, target])
355 #Prepare arguments for get_ipython().run_line_magic(magic_name, magic_args)
356 t_magic_name, _, t_magic_arg_s = arg.partition(' ')
357 t_magic_name = t_magic_name.lstrip(ESC_MAGIC)
358 return "get_ipython().run_line_magic(%r, %r)" % (t_magic_name, t_magic_arg_s)
361def _tr_help(content):
362 """Translate lines escaped with: ?
364 A naked help line should fire the intro help screen (shell.show_usage())
365 """
366 if not content:
367 return 'get_ipython().show_usage()'
369 return _make_help_call(content, '?')
371def _tr_help2(content):
372 """Translate lines escaped with: ??
374 A naked help line should fire the intro help screen (shell.show_usage())
375 """
376 if not content:
377 return 'get_ipython().show_usage()'
379 return _make_help_call(content, '??')
381def _tr_magic(content):
382 "Translate lines escaped with a percent sign: %"
383 name, _, args = content.partition(' ')
384 return 'get_ipython().run_line_magic(%r, %r)' % (name, args)
386def _tr_quote(content):
387 "Translate lines escaped with a comma: ,"
388 name, _, args = content.partition(' ')
389 return '%s("%s")' % (name, '", "'.join(args.split()) )
391def _tr_quote2(content):
392 "Translate lines escaped with a semicolon: ;"
393 name, _, args = content.partition(' ')
394 return '%s("%s")' % (name, args)
396def _tr_paren(content):
397 "Translate lines escaped with a slash: /"
398 name, _, args = content.partition(' ')
399 return '%s(%s)' % (name, ", ".join(args.split()))
401tr = { ESC_SHELL : 'get_ipython().system({!r})'.format,
402 ESC_SH_CAP : 'get_ipython().getoutput({!r})'.format,
403 ESC_HELP : _tr_help,
404 ESC_HELP2 : _tr_help2,
405 ESC_MAGIC : _tr_magic,
406 ESC_QUOTE : _tr_quote,
407 ESC_QUOTE2 : _tr_quote2,
408 ESC_PAREN : _tr_paren }
410class EscapedCommand(TokenTransformBase):
411 """Transformer for escaped commands like %foo, !foo, or /foo"""
412 @classmethod
413 def find(cls, tokens_by_line):
414 """Find the first escaped command (%foo, !foo, etc.) in the cell.
415 """
416 for line in tokens_by_line:
417 if not line:
418 continue
419 ix = 0
420 ll = len(line)
421 while ll > ix and line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
422 ix += 1
423 if ix >= ll:
424 continue
425 if line[ix].string in ESCAPE_SINGLES:
426 return cls(line[ix].start)
428 def transform(self, lines):
429 """Transform an escaped line found by the ``find()`` classmethod.
430 """
431 start_line, start_col = self.start_line, self.start_col
433 indent = lines[start_line][:start_col]
434 end_line = find_end_of_continued_line(lines, start_line)
435 line = assemble_continued_line(lines, (start_line, start_col), end_line)
437 if len(line) > 1 and line[:2] in ESCAPE_DOUBLES:
438 escape, content = line[:2], line[2:]
439 else:
440 escape, content = line[:1], line[1:]
442 if escape in tr:
443 call = tr[escape](content)
444 else:
445 call = ''
447 lines_before = lines[:start_line]
448 new_line = indent + call + '\n'
449 lines_after = lines[end_line + 1:]
451 return lines_before + [new_line] + lines_after
454_help_end_re = re.compile(
455 r"""(%{0,2}
456 (?!\d)[\w*]+ # Variable name
457 (\.(?!\d)[\w*]+|\[-?[0-9]+\])* # .etc.etc or [0], we only support literal integers.
458 )
459 (\?\??)$ # ? or ??
460 """,
461 re.VERBOSE,
462)
465class HelpEnd(TokenTransformBase):
466 """Transformer for help syntax: obj? and obj??"""
467 # This needs to be higher priority (lower number) than EscapedCommand so
468 # that inspecting magics (%foo?) works.
469 priority = 5
471 def __init__(self, start, q_locn):
472 super().__init__(start)
473 self.q_line = q_locn[0] - 1 # Shift from 1-indexed to 0-indexed
474 self.q_col = q_locn[1]
476 @classmethod
477 def find(cls, tokens_by_line):
478 """Find the first help command (foo?) in the cell.
479 """
480 for line in tokens_by_line:
481 # Last token is NEWLINE; look at last but one
482 if len(line) > 2 and line[-2].string == '?':
483 # Find the first token that's not INDENT/DEDENT
484 ix = 0
485 while line[ix].type in {tokenize.INDENT, tokenize.DEDENT}:
486 ix += 1
487 return cls(line[ix].start, line[-2].start)
489 def transform(self, lines):
490 """Transform a help command found by the ``find()`` classmethod.
491 """
493 piece = "".join(lines[self.start_line : self.q_line + 1])
494 indent, content = piece[: self.start_col], piece[self.start_col :]
495 lines_before = lines[: self.start_line]
496 lines_after = lines[self.q_line + 1 :]
498 m = _help_end_re.search(content)
499 if not m:
500 raise SyntaxError(content)
501 assert m is not None, content
502 target = m.group(1)
503 esc = m.group(3)
506 call = _make_help_call(target, esc)
507 new_line = indent + call + '\n'
509 return lines_before + [new_line] + lines_after
511def make_tokens_by_line(lines:List[str]):
512 """Tokenize a series of lines and group tokens by line.
514 The tokens for a multiline Python string or expression are grouped as one
515 line. All lines except the last lines should keep their line ending ('\\n',
516 '\\r\\n') for this to properly work. Use `.splitlines(keeplineending=True)`
517 for example when passing block of text to this function.
519 """
520 # NL tokens are used inside multiline expressions, but also after blank
521 # lines or comments. This is intentional - see https://bugs.python.org/issue17061
522 # We want to group the former case together but split the latter, so we
523 # track parentheses level, similar to the internals of tokenize.
525 # reexported from token on 3.7+
526 NEWLINE, NL = tokenize.NEWLINE, tokenize.NL # type: ignore
527 tokens_by_line: List[List[Any]] = [[]]
528 if len(lines) > 1 and not lines[0].endswith(("\n", "\r", "\r\n", "\x0b", "\x0c")):
529 warnings.warn(
530 "`make_tokens_by_line` received a list of lines which do not have lineending markers ('\\n', '\\r', '\\r\\n', '\\x0b', '\\x0c'), behavior will be unspecified",
531 stacklevel=2,
532 )
533 parenlev = 0
534 try:
535 for token in tokenutil.generate_tokens_catch_errors(
536 iter(lines).__next__, extra_errors_to_catch=["expected EOF"]
537 ):
538 tokens_by_line[-1].append(token)
539 if (token.type == NEWLINE) \
540 or ((token.type == NL) and (parenlev <= 0)):
541 tokens_by_line.append([])
542 elif token.string in {'(', '[', '{'}:
543 parenlev += 1
544 elif token.string in {')', ']', '}'}:
545 if parenlev > 0:
546 parenlev -= 1
547 except tokenize.TokenError:
548 # Input ended in a multiline string or expression. That's OK for us.
549 pass
552 if not tokens_by_line[-1]:
553 tokens_by_line.pop()
556 return tokens_by_line
559def has_sunken_brackets(tokens: List[tokenize.TokenInfo]):
560 """Check if the depth of brackets in the list of tokens drops below 0"""
561 parenlev = 0
562 for token in tokens:
563 if token.string in {"(", "[", "{"}:
564 parenlev += 1
565 elif token.string in {")", "]", "}"}:
566 parenlev -= 1
567 if parenlev < 0:
568 return True
569 return False
572def show_linewise_tokens(s: str):
573 """For investigation and debugging"""
574 warnings.warn(
575 "show_linewise_tokens is deprecated since IPython 8.6",
576 DeprecationWarning,
577 stacklevel=2,
578 )
579 if not s.endswith("\n"):
580 s += "\n"
581 lines = s.splitlines(keepends=True)
582 for line in make_tokens_by_line(lines):
583 print("Line -------")
584 for tokinfo in line:
585 print(" ", tokinfo)
587# Arbitrary limit to prevent getting stuck in infinite loops
588TRANSFORM_LOOP_LIMIT = 500
590class TransformerManager:
591 """Applies various transformations to a cell or code block.
593 The key methods for external use are ``transform_cell()``
594 and ``check_complete()``.
595 """
596 def __init__(self):
597 self.cleanup_transforms = [
598 leading_empty_lines,
599 leading_indent,
600 classic_prompt,
601 ipython_prompt,
602 ]
603 self.line_transforms = [
604 cell_magic,
605 ]
606 self.token_transformers = [
607 MagicAssign,
608 SystemAssign,
609 EscapedCommand,
610 HelpEnd,
611 ]
613 def do_one_token_transform(self, lines):
614 """Find and run the transform earliest in the code.
616 Returns (changed, lines).
618 This method is called repeatedly until changed is False, indicating
619 that all available transformations are complete.
621 The tokens following IPython special syntax might not be valid, so
622 the transformed code is retokenised every time to identify the next
623 piece of special syntax. Hopefully long code cells are mostly valid
624 Python, not using lots of IPython special syntax, so this shouldn't be
625 a performance issue.
626 """
627 tokens_by_line = make_tokens_by_line(lines)
628 candidates = []
629 for transformer_cls in self.token_transformers:
630 transformer = transformer_cls.find(tokens_by_line)
631 if transformer:
632 candidates.append(transformer)
634 if not candidates:
635 # Nothing to transform
636 return False, lines
637 ordered_transformers = sorted(candidates, key=TokenTransformBase.sortby)
638 for transformer in ordered_transformers:
639 try:
640 return True, transformer.transform(lines)
641 except SyntaxError:
642 pass
643 return False, lines
645 def do_token_transforms(self, lines):
646 for _ in range(TRANSFORM_LOOP_LIMIT):
647 changed, lines = self.do_one_token_transform(lines)
648 if not changed:
649 return lines
651 raise RuntimeError("Input transformation still changing after "
652 "%d iterations. Aborting." % TRANSFORM_LOOP_LIMIT)
654 def transform_cell(self, cell: str) -> str:
655 """Transforms a cell of input code"""
656 if not cell.endswith('\n'):
657 cell += '\n' # Ensure the cell has a trailing newline
658 lines = cell.splitlines(keepends=True)
659 for transform in self.cleanup_transforms + self.line_transforms:
660 lines = transform(lines)
662 lines = self.do_token_transforms(lines)
663 return ''.join(lines)
665 def check_complete(self, cell: str):
666 """Return whether a block of code is ready to execute, or should be continued
668 Parameters
669 ----------
670 cell : string
671 Python input code, which can be multiline.
673 Returns
674 -------
675 status : str
676 One of 'complete', 'incomplete', or 'invalid' if source is not a
677 prefix of valid code.
678 indent_spaces : int or None
679 The number of spaces by which to indent the next line of code. If
680 status is not 'incomplete', this is None.
681 """
682 # Remember if the lines ends in a new line.
683 ends_with_newline = False
684 for character in reversed(cell):
685 if character == '\n':
686 ends_with_newline = True
687 break
688 elif character.strip():
689 break
690 else:
691 continue
693 if not ends_with_newline:
694 # Append an newline for consistent tokenization
695 # See https://bugs.python.org/issue33899
696 cell += '\n'
698 lines = cell.splitlines(keepends=True)
700 if not lines:
701 return 'complete', None
703 for line in reversed(lines):
704 if not line.strip():
705 continue
706 elif line.strip("\n").endswith("\\"):
707 return "incomplete", find_last_indent(lines)
708 else:
709 break
711 try:
712 for transform in self.cleanup_transforms:
713 if not getattr(transform, 'has_side_effects', False):
714 lines = transform(lines)
715 except SyntaxError:
716 return 'invalid', None
718 if lines[0].startswith('%%'):
719 # Special case for cell magics - completion marked by blank line
720 if lines[-1].strip():
721 return 'incomplete', find_last_indent(lines)
722 else:
723 return 'complete', None
725 try:
726 for transform in self.line_transforms:
727 if not getattr(transform, 'has_side_effects', False):
728 lines = transform(lines)
729 lines = self.do_token_transforms(lines)
730 except SyntaxError:
731 return 'invalid', None
733 tokens_by_line = make_tokens_by_line(lines)
735 # Bail if we got one line and there are more closing parentheses than
736 # the opening ones
737 if (
738 len(lines) == 1
739 and tokens_by_line
740 and has_sunken_brackets(tokens_by_line[0])
741 ):
742 return "invalid", None
744 if not tokens_by_line:
745 return 'incomplete', find_last_indent(lines)
747 if (
748 tokens_by_line[-1][-1].type != tokenize.ENDMARKER
749 and tokens_by_line[-1][-1].type != tokenize.ERRORTOKEN
750 ):
751 # We're in a multiline string or expression
752 return 'incomplete', find_last_indent(lines)
754 newline_types = {tokenize.NEWLINE, tokenize.COMMENT, tokenize.ENDMARKER} # type: ignore
756 # Pop the last line which only contains DEDENTs and ENDMARKER
757 last_token_line = None
758 if {t.type for t in tokens_by_line[-1]} in [
759 {tokenize.DEDENT, tokenize.ENDMARKER},
760 {tokenize.ENDMARKER}
761 ] and len(tokens_by_line) > 1:
762 last_token_line = tokens_by_line.pop()
764 while tokens_by_line[-1] and tokens_by_line[-1][-1].type in newline_types:
765 tokens_by_line[-1].pop()
767 if not tokens_by_line[-1]:
768 return 'incomplete', find_last_indent(lines)
770 if tokens_by_line[-1][-1].string == ':':
771 # The last line starts a block (e.g. 'if foo:')
772 ix = 0
773 while tokens_by_line[-1][ix].type in {tokenize.INDENT, tokenize.DEDENT}:
774 ix += 1
776 indent = tokens_by_line[-1][ix].start[1]
777 return 'incomplete', indent + 4
779 if tokens_by_line[-1][0].line.endswith('\\'):
780 return 'incomplete', None
782 # At this point, our checks think the code is complete (or invalid).
783 # We'll use codeop.compile_command to check this with the real parser
784 try:
785 with warnings.catch_warnings():
786 warnings.simplefilter('error', SyntaxWarning)
787 res = compile_command(''.join(lines), symbol='exec')
788 except (SyntaxError, OverflowError, ValueError, TypeError,
789 MemoryError, SyntaxWarning):
790 return 'invalid', None
791 else:
792 if res is None:
793 return 'incomplete', find_last_indent(lines)
795 if last_token_line and last_token_line[0].type == tokenize.DEDENT:
796 if ends_with_newline:
797 return 'complete', None
798 return 'incomplete', find_last_indent(lines)
800 # If there's a blank line at the end, assume we're ready to execute
801 if not lines[-1].strip():
802 return 'complete', None
804 return 'complete', None
807def find_last_indent(lines):
808 m = _indent_re.match(lines[-1])
809 if not m:
810 return 0
811 return len(m.group(0).replace('\t', ' '*4))
814class MaybeAsyncCompile(Compile):
815 def __init__(self, extra_flags=0):
816 super().__init__()
817 self.flags |= extra_flags
820class MaybeAsyncCommandCompiler(CommandCompiler):
821 def __init__(self, extra_flags=0):
822 self.compiler = MaybeAsyncCompile(extra_flags=extra_flags)
825_extra_flags = ast.PyCF_ALLOW_TOP_LEVEL_AWAIT
827compile_command = MaybeAsyncCommandCompiler(extra_flags=_extra_flags)