Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.9/dist-packages/IPython/utils/tokenutil.py: 25%

79 statements  

« prev     ^ index     » next       coverage.py v7.3.1, created at 2023-09-25 06:05 +0000

1"""Token-related utilities""" 

2 

3# Copyright (c) IPython Development Team. 

4# Distributed under the terms of the Modified BSD License. 

5 

6from collections import namedtuple 

7from io import StringIO 

8from keyword import iskeyword 

9 

10import tokenize 

11 

12 

13Token = namedtuple('Token', ['token', 'text', 'start', 'end', 'line']) 

14 

15def generate_tokens(readline): 

16 """wrap generate_tokens to catch EOF errors""" 

17 try: 

18 for token in tokenize.generate_tokens(readline): 

19 yield token 

20 except tokenize.TokenError: 

21 # catch EOF error 

22 return 

23 

24 

25def generate_tokens_catch_errors(readline, extra_errors_to_catch=None): 

26 default_errors_to_catch = [ 

27 "unterminated string literal", 

28 "invalid non-printable character", 

29 "after line continuation character", 

30 ] 

31 assert extra_errors_to_catch is None or isinstance(extra_errors_to_catch, list) 

32 errors_to_catch = default_errors_to_catch + (extra_errors_to_catch or []) 

33 

34 tokens = [] 

35 try: 

36 for token in tokenize.generate_tokens(readline): 

37 tokens.append(token) 

38 yield token 

39 except tokenize.TokenError as exc: 

40 if any(error in exc.args[0] for error in errors_to_catch): 

41 if tokens: 

42 start = tokens[-1].start[0], tokens[-1].end[0] 

43 end = start 

44 line = tokens[-1].line 

45 else: 

46 start = end = (1, 0) 

47 line = "" 

48 yield tokenize.TokenInfo(tokenize.ERRORTOKEN, "", start, end, line) 

49 else: 

50 # Catch EOF 

51 raise 

52 

53 

54def line_at_cursor(cell, cursor_pos=0): 

55 """Return the line in a cell at a given cursor position 

56 

57 Used for calling line-based APIs that don't support multi-line input, yet. 

58 

59 Parameters 

60 ---------- 

61 cell : str 

62 multiline block of text 

63 cursor_pos : integer 

64 the cursor position 

65 

66 Returns 

67 ------- 

68 (line, offset): (string, integer) 

69 The line with the current cursor, and the character offset of the start of the line. 

70 """ 

71 offset = 0 

72 lines = cell.splitlines(True) 

73 for line in lines: 

74 next_offset = offset + len(line) 

75 if not line.endswith('\n'): 

76 # If the last line doesn't have a trailing newline, treat it as if 

77 # it does so that the cursor at the end of the line still counts 

78 # as being on that line. 

79 next_offset += 1 

80 if next_offset > cursor_pos: 

81 break 

82 offset = next_offset 

83 else: 

84 line = "" 

85 return (line, offset) 

86 

87def token_at_cursor(cell, cursor_pos=0): 

88 """Get the token at a given cursor 

89 

90 Used for introspection. 

91 

92 Function calls are prioritized, so the token for the callable will be returned 

93 if the cursor is anywhere inside the call. 

94 

95 Parameters 

96 ---------- 

97 cell : unicode 

98 A block of Python code 

99 cursor_pos : int 

100 The location of the cursor in the block where the token should be found 

101 """ 

102 names = [] 

103 tokens = [] 

104 call_names = [] 

105 

106 offsets = {1: 0} # lines start at 1 

107 for tup in generate_tokens(StringIO(cell).readline): 

108 

109 tok = Token(*tup) 

110 

111 # token, text, start, end, line = tup 

112 start_line, start_col = tok.start 

113 end_line, end_col = tok.end 

114 if end_line + 1 not in offsets: 

115 # keep track of offsets for each line 

116 lines = tok.line.splitlines(True) 

117 for lineno, line in enumerate(lines, start_line + 1): 

118 if lineno not in offsets: 

119 offsets[lineno] = offsets[lineno-1] + len(line) 

120 

121 offset = offsets[start_line] 

122 # allow '|foo' to find 'foo' at the beginning of a line 

123 boundary = cursor_pos + 1 if start_col == 0 else cursor_pos 

124 if offset + start_col >= boundary: 

125 # current token starts after the cursor, 

126 # don't consume it 

127 break 

128 

129 if tok.token == tokenize.NAME and not iskeyword(tok.text): 

130 if names and tokens and tokens[-1].token == tokenize.OP and tokens[-1].text == '.': 

131 names[-1] = "%s.%s" % (names[-1], tok.text) 

132 else: 

133 names.append(tok.text) 

134 elif tok.token == tokenize.OP: 

135 if tok.text == '=' and names: 

136 # don't inspect the lhs of an assignment 

137 names.pop(-1) 

138 if tok.text == '(' and names: 

139 # if we are inside a function call, inspect the function 

140 call_names.append(names[-1]) 

141 elif tok.text == ')' and call_names: 

142 call_names.pop(-1) 

143 

144 tokens.append(tok) 

145 

146 if offsets[end_line] + end_col > cursor_pos: 

147 # we found the cursor, stop reading 

148 break 

149 

150 if call_names: 

151 return call_names[-1] 

152 elif names: 

153 return names[-1] 

154 else: 

155 return ''