Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/lark/parsers/xearley.py: 85%

73 statements  

« prev     ^ index     » next       coverage.py v7.3.1, created at 2023-09-25 06:30 +0000

1"""This module implements an experimental Earley parser with a dynamic lexer 

2 

3The core Earley algorithm used here is based on Elizabeth Scott's implementation, here: 

4 https://www.sciencedirect.com/science/article/pii/S1571066108001497 

5 

6That is probably the best reference for understanding the algorithm here. 

7 

8The Earley parser outputs an SPPF-tree as per that document. The SPPF tree format 

9is better documented here: 

10 http://www.bramvandersanden.com/post/2014/06/shared-packed-parse-forest/ 

11 

12Instead of running a lexer beforehand, or using a costy char-by-char method, this parser 

13uses regular expressions by necessity, achieving high-performance while maintaining all of 

14Earley's power in parsing any CFG. 

15""" 

16 

17from collections import defaultdict 

18 

19from ..tree import Tree 

20from ..exceptions import UnexpectedCharacters 

21from ..lexer import Token 

22from ..grammar import Terminal 

23from .earley import Parser as BaseParser 

24from .earley_forest import SymbolNode, TokenNode 

25 

26 

27class Parser(BaseParser): 

28 def __init__(self, lexer_conf, parser_conf, term_matcher, resolve_ambiguity=True, complete_lex = False, debug=False, tree_class=Tree): 

29 BaseParser.__init__(self, lexer_conf, parser_conf, term_matcher, resolve_ambiguity, debug, tree_class) 

30 self.ignore = [Terminal(t) for t in lexer_conf.ignore] 

31 self.complete_lex = complete_lex 

32 

33 def _parse(self, stream, columns, to_scan, start_symbol=None): 

34 

35 def scan(i, to_scan): 

36 """The core Earley Scanner. 

37 

38 This is a custom implementation of the scanner that uses the 

39 Lark lexer to match tokens. The scan list is built by the 

40 Earley predictor, based on the previously completed tokens. 

41 This ensures that at each phase of the parse we have a custom 

42 lexer context, allowing for more complex ambiguities.""" 

43 

44 node_cache = {} 

45 

46 # 1) Loop the expectations and ask the lexer to match. 

47 # Since regexp is forward looking on the input stream, and we only 

48 # want to process tokens when we hit the point in the stream at which 

49 # they complete, we push all tokens into a buffer (delayed_matches), to 

50 # be held possibly for a later parse step when we reach the point in the 

51 # input stream at which they complete. 

52 for item in set(to_scan): 

53 m = match(item.expect, stream, i) 

54 if m: 

55 t = Token(item.expect.name, m.group(0), i, text_line, text_column) 

56 delayed_matches[m.end()].append( (item, i, t) ) 

57 

58 if self.complete_lex: 

59 s = m.group(0) 

60 for j in range(1, len(s)): 

61 m = match(item.expect, s[:-j]) 

62 if m: 

63 t = Token(item.expect.name, m.group(0), i, text_line, text_column) 

64 delayed_matches[i+m.end()].append( (item, i, t) ) 

65 

66 # XXX The following 3 lines were commented out for causing a bug. See issue #768 

67 # # Remove any items that successfully matched in this pass from the to_scan buffer. 

68 # # This ensures we don't carry over tokens that already matched, if we're ignoring below. 

69 # to_scan.remove(item) 

70 

71 # 3) Process any ignores. This is typically used for e.g. whitespace. 

72 # We carry over any unmatched items from the to_scan buffer to be matched again after 

73 # the ignore. This should allow us to use ignored symbols in non-terminals to implement 

74 # e.g. mandatory spacing. 

75 for x in self.ignore: 

76 m = match(x, stream, i) 

77 if m: 

78 # Carry over any items still in the scan buffer, to past the end of the ignored items. 

79 delayed_matches[m.end()].extend([(item, i, None) for item in to_scan ]) 

80 

81 # If we're ignoring up to the end of the file, # carry over the start symbol if it already completed. 

82 delayed_matches[m.end()].extend([(item, i, None) for item in columns[i] if item.is_complete and item.s == start_symbol]) 

83 

84 next_to_scan = set() 

85 next_set = set() 

86 columns.append(next_set) 

87 transitives.append({}) 

88 

89 ## 4) Process Tokens from delayed_matches. 

90 # This is the core of the Earley scanner. Create an SPPF node for each Token, 

91 # and create the symbol node in the SPPF tree. Advance the item that completed, 

92 # and add the resulting new item to either the Earley set (for processing by the 

93 # completer/predictor) or the to_scan buffer for the next parse step. 

94 for item, start, token in delayed_matches[i+1]: 

95 if token is not None: 

96 token.end_line = text_line 

97 token.end_column = text_column + 1 

98 token.end_pos = i + 1 

99 

100 new_item = item.advance() 

101 label = (new_item.s, new_item.start, i) 

102 token_node = TokenNode(token, terminals[token.type]) 

103 new_item.node = node_cache[label] if label in node_cache else node_cache.setdefault(label, SymbolNode(*label)) 

104 new_item.node.add_family(new_item.s, item.rule, new_item.start, item.node, token_node) 

105 else: 

106 new_item = item 

107 

108 if new_item.expect in self.TERMINALS: 

109 # add (B ::= Aai+1.B, h, y) to Q' 

110 next_to_scan.add(new_item) 

111 else: 

112 # add (B ::= Aa+1.B, h, y) to Ei+1 

113 next_set.add(new_item) 

114 

115 del delayed_matches[i+1] # No longer needed, so unburden memory 

116 

117 if not next_set and not delayed_matches and not next_to_scan: 

118 considered_rules = list(sorted(to_scan, key=lambda key: key.rule.origin.name)) 

119 raise UnexpectedCharacters(stream, i, text_line, text_column, {item.expect.name for item in to_scan}, 

120 set(to_scan), state=frozenset(i.s for i in to_scan), 

121 considered_rules=considered_rules 

122 ) 

123 

124 return next_to_scan 

125 

126 

127 delayed_matches = defaultdict(list) 

128 match = self.term_matcher 

129 terminals = self.lexer_conf.terminals_by_name 

130 

131 # Cache for nodes & tokens created in a particular parse step. 

132 transitives = [{}] 

133 

134 text_line = 1 

135 text_column = 1 

136 

137 ## The main Earley loop. 

138 # Run the Prediction/Completion cycle for any Items in the current Earley set. 

139 # Completions will be added to the SPPF tree, and predictions will be recursively 

140 # processed down to terminals/empty nodes to be added to the scanner for the next 

141 # step. 

142 i = 0 

143 for token in stream: 

144 self.predict_and_complete(i, to_scan, columns, transitives) 

145 

146 to_scan = scan(i, to_scan) 

147 

148 if token == '\n': 

149 text_line += 1 

150 text_column = 1 

151 else: 

152 text_column += 1 

153 i += 1 

154 

155 self.predict_and_complete(i, to_scan, columns, transitives) 

156 

157 ## Column is now the final column in the parse. 

158 assert i == len(columns)-1 

159 return to_scan