Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/ply/lex.py: 58%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1# -----------------------------------------------------------------------------
2# ply: lex.py
3#
4# Copyright (C) 2001-2022
5# David M. Beazley (Dabeaz LLC)
6# All rights reserved.
7#
8# Latest version: https://github.com/dabeaz/ply
9#
10# Redistribution and use in source and binary forms, with or without
11# modification, are permitted provided that the following conditions are
12# met:
13#
14# * Redistributions of source code must retain the above copyright notice,
15# this list of conditions and the following disclaimer.
16# * Redistributions in binary form must reproduce the above copyright notice,
17# this list of conditions and the following disclaimer in the documentation
18# and/or other materials provided with the distribution.
19# * Neither the name of David Beazley or Dabeaz LLC may be used to
20# endorse or promote products derived from this software without
21# specific prior written permission.
22#
23# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
24# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
25# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
26# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
27# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
28# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
29# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
30# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
31# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
32# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
33# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
34# -----------------------------------------------------------------------------
36import re
37import sys
38import types
39import copy
40import os
41import inspect
43# This tuple contains acceptable string types
44StringTypes = (str, bytes)
46# This regular expression is used to match valid token names
47_is_identifier = re.compile(r'^[a-zA-Z0-9_]+$')
49# Exception thrown when invalid token encountered and no default error
50# handler is defined.
51class LexError(Exception):
52 def __init__(self, message, s):
53 self.args = (message,)
54 self.text = s
56# Token class. This class is used to represent the tokens produced.
57class LexToken(object):
58 def __repr__(self):
59 return f'LexToken({self.type},{self.value!r},{self.lineno},{self.lexpos})'
61# This object is a stand-in for a logging object created by the
62# logging module.
64class PlyLogger(object):
65 def __init__(self, f):
66 self.f = f
68 def critical(self, msg, *args, **kwargs):
69 self.f.write((msg % args) + '\n')
71 def warning(self, msg, *args, **kwargs):
72 self.f.write('WARNING: ' + (msg % args) + '\n')
74 def error(self, msg, *args, **kwargs):
75 self.f.write('ERROR: ' + (msg % args) + '\n')
77 info = critical
78 debug = critical
80# -----------------------------------------------------------------------------
81# === Lexing Engine ===
82#
83# The following Lexer class implements the lexer runtime. There are only
84# a few public methods and attributes:
85#
86# input() - Store a new string in the lexer
87# token() - Get the next token
88# clone() - Clone the lexer
89#
90# lineno - Current line number
91# lexpos - Current position in the input string
92# -----------------------------------------------------------------------------
94class Lexer:
95 def __init__(self):
96 self.lexre = None # Master regular expression. This is a list of
97 # tuples (re, findex) where re is a compiled
98 # regular expression and findex is a list
99 # mapping regex group numbers to rules
100 self.lexretext = None # Current regular expression strings
101 self.lexstatere = {} # Dictionary mapping lexer states to master regexs
102 self.lexstateretext = {} # Dictionary mapping lexer states to regex strings
103 self.lexstaterenames = {} # Dictionary mapping lexer states to symbol names
104 self.lexstate = 'INITIAL' # Current lexer state
105 self.lexstatestack = [] # Stack of lexer states
106 self.lexstateinfo = None # State information
107 self.lexstateignore = {} # Dictionary of ignored characters for each state
108 self.lexstateerrorf = {} # Dictionary of error functions for each state
109 self.lexstateeoff = {} # Dictionary of eof functions for each state
110 self.lexreflags = 0 # Optional re compile flags
111 self.lexdata = None # Actual input data (as a string)
112 self.lexpos = 0 # Current position in input text
113 self.lexlen = 0 # Length of the input text
114 self.lexerrorf = None # Error rule (if any)
115 self.lexeoff = None # EOF rule (if any)
116 self.lextokens = None # List of valid tokens
117 self.lexignore = '' # Ignored characters
118 self.lexliterals = '' # Literal characters that can be passed through
119 self.lexmodule = None # Module
120 self.lineno = 1 # Current line number
122 def clone(self, object=None):
123 c = copy.copy(self)
125 # If the object parameter has been supplied, it means we are attaching the
126 # lexer to a new object. In this case, we have to rebind all methods in
127 # the lexstatere and lexstateerrorf tables.
129 if object:
130 newtab = {}
131 for key, ritem in self.lexstatere.items():
132 newre = []
133 for cre, findex in ritem:
134 newfindex = []
135 for f in findex:
136 if not f or not f[0]:
137 newfindex.append(f)
138 continue
139 newfindex.append((getattr(object, f[0].__name__), f[1]))
140 newre.append((cre, newfindex))
141 newtab[key] = newre
142 c.lexstatere = newtab
143 c.lexstateerrorf = {}
144 for key, ef in self.lexstateerrorf.items():
145 c.lexstateerrorf[key] = getattr(object, ef.__name__)
146 c.lexmodule = object
147 return c
149 # ------------------------------------------------------------
150 # input() - Push a new string into the lexer
151 # ------------------------------------------------------------
152 def input(self, s):
153 self.lexdata = s
154 self.lexpos = 0
155 self.lexlen = len(s)
157 # ------------------------------------------------------------
158 # begin() - Changes the lexing state
159 # ------------------------------------------------------------
160 def begin(self, state):
161 if state not in self.lexstatere:
162 raise ValueError(f'Undefined state {state!r}')
163 self.lexre = self.lexstatere[state]
164 self.lexretext = self.lexstateretext[state]
165 self.lexignore = self.lexstateignore.get(state, '')
166 self.lexerrorf = self.lexstateerrorf.get(state, None)
167 self.lexeoff = self.lexstateeoff.get(state, None)
168 self.lexstate = state
170 # ------------------------------------------------------------
171 # push_state() - Changes the lexing state and saves old on stack
172 # ------------------------------------------------------------
173 def push_state(self, state):
174 self.lexstatestack.append(self.lexstate)
175 self.begin(state)
177 # ------------------------------------------------------------
178 # pop_state() - Restores the previous state
179 # ------------------------------------------------------------
180 def pop_state(self):
181 self.begin(self.lexstatestack.pop())
183 # ------------------------------------------------------------
184 # current_state() - Returns the current lexing state
185 # ------------------------------------------------------------
186 def current_state(self):
187 return self.lexstate
189 # ------------------------------------------------------------
190 # skip() - Skip ahead n characters
191 # ------------------------------------------------------------
192 def skip(self, n):
193 self.lexpos += n
195 # ------------------------------------------------------------
196 # token() - Return the next token from the Lexer
197 #
198 # Note: This function has been carefully implemented to be as fast
199 # as possible. Don't make changes unless you really know what
200 # you are doing
201 # ------------------------------------------------------------
202 def token(self):
203 # Make local copies of frequently referenced attributes
204 lexpos = self.lexpos
205 lexlen = self.lexlen
206 lexignore = self.lexignore
207 lexdata = self.lexdata
209 while lexpos < lexlen:
210 # This code provides some short-circuit code for whitespace, tabs, and other ignored characters
211 if lexdata[lexpos] in lexignore:
212 lexpos += 1
213 continue
215 # Look for a regular expression match
216 for lexre, lexindexfunc in self.lexre:
217 m = lexre.match(lexdata, lexpos)
218 if not m:
219 continue
221 # Create a token for return
222 tok = LexToken()
223 tok.value = m.group()
224 tok.lineno = self.lineno
225 tok.lexpos = lexpos
227 i = m.lastindex
228 func, tok.type = lexindexfunc[i]
230 if not func:
231 # If no token type was set, it's an ignored token
232 if tok.type:
233 self.lexpos = m.end()
234 return tok
235 else:
236 lexpos = m.end()
237 break
239 lexpos = m.end()
241 # If token is processed by a function, call it
243 tok.lexer = self # Set additional attributes useful in token rules
244 self.lexmatch = m
245 self.lexpos = lexpos
246 newtok = func(tok)
247 del tok.lexer
248 del self.lexmatch
250 # Every function must return a token, if nothing, we just move to next token
251 if not newtok:
252 lexpos = self.lexpos # This is here in case user has updated lexpos.
253 lexignore = self.lexignore # This is here in case there was a state change
254 break
255 return newtok
256 else:
257 # No match, see if in literals
258 if lexdata[lexpos] in self.lexliterals:
259 tok = LexToken()
260 tok.value = lexdata[lexpos]
261 tok.lineno = self.lineno
262 tok.type = tok.value
263 tok.lexpos = lexpos
264 self.lexpos = lexpos + 1
265 return tok
267 # No match. Call t_error() if defined.
268 if self.lexerrorf:
269 tok = LexToken()
270 tok.value = self.lexdata[lexpos:]
271 tok.lineno = self.lineno
272 tok.type = 'error'
273 tok.lexer = self
274 tok.lexpos = lexpos
275 self.lexpos = lexpos
276 newtok = self.lexerrorf(tok)
277 if lexpos == self.lexpos:
278 # Error method didn't change text position at all. This is an error.
279 raise LexError(f"Scanning error. Illegal character {lexdata[lexpos]!r}",
280 lexdata[lexpos:])
281 lexpos = self.lexpos
282 if not newtok:
283 continue
284 return newtok
286 self.lexpos = lexpos
287 raise LexError(f"Illegal character {lexdata[lexpos]!r} at index {lexpos}",
288 lexdata[lexpos:])
290 if self.lexeoff:
291 tok = LexToken()
292 tok.type = 'eof'
293 tok.value = ''
294 tok.lineno = self.lineno
295 tok.lexpos = lexpos
296 tok.lexer = self
297 self.lexpos = lexpos
298 newtok = self.lexeoff(tok)
299 return newtok
301 self.lexpos = lexpos + 1
302 if self.lexdata is None:
303 raise RuntimeError('No input string given with input()')
304 return None
306 # Iterator interface
307 def __iter__(self):
308 return self
310 def __next__(self):
311 t = self.token()
312 if t is None:
313 raise StopIteration
314 return t
316# -----------------------------------------------------------------------------
317# ==== Lex Builder ===
318#
319# The functions and classes below are used to collect lexing information
320# and build a Lexer object from it.
321# -----------------------------------------------------------------------------
323# -----------------------------------------------------------------------------
324# _get_regex(func)
325#
326# Returns the regular expression assigned to a function either as a doc string
327# or as a .regex attribute attached by the @TOKEN decorator.
328# -----------------------------------------------------------------------------
329def _get_regex(func):
330 return getattr(func, 'regex', func.__doc__)
332# -----------------------------------------------------------------------------
333# get_caller_module_dict()
334#
335# This function returns a dictionary containing all of the symbols defined within
336# a caller further down the call stack. This is used to get the environment
337# associated with the yacc() call if none was provided.
338# -----------------------------------------------------------------------------
339def get_caller_module_dict(levels):
340 f = sys._getframe(levels)
341 return { **f.f_globals, **f.f_locals }
343# -----------------------------------------------------------------------------
344# _form_master_re()
345#
346# This function takes a list of all of the regex components and attempts to
347# form the master regular expression. Given limitations in the Python re
348# module, it may be necessary to break the master regex into separate expressions.
349# -----------------------------------------------------------------------------
350def _form_master_re(relist, reflags, ldict, toknames):
351 if not relist:
352 return [], [], []
353 regex = '|'.join(relist)
354 try:
355 lexre = re.compile(regex, reflags)
357 # Build the index to function map for the matching engine
358 lexindexfunc = [None] * (max(lexre.groupindex.values()) + 1)
359 lexindexnames = lexindexfunc[:]
361 for f, i in lexre.groupindex.items():
362 handle = ldict.get(f, None)
363 if type(handle) in (types.FunctionType, types.MethodType):
364 lexindexfunc[i] = (handle, toknames[f])
365 lexindexnames[i] = f
366 elif handle is not None:
367 lexindexnames[i] = f
368 if f.find('ignore_') > 0:
369 lexindexfunc[i] = (None, None)
370 else:
371 lexindexfunc[i] = (None, toknames[f])
373 return [(lexre, lexindexfunc)], [regex], [lexindexnames]
374 except Exception:
375 m = (len(relist) // 2) + 1
376 llist, lre, lnames = _form_master_re(relist[:m], reflags, ldict, toknames)
377 rlist, rre, rnames = _form_master_re(relist[m:], reflags, ldict, toknames)
378 return (llist+rlist), (lre+rre), (lnames+rnames)
380# -----------------------------------------------------------------------------
381# def _statetoken(s,names)
382#
383# Given a declaration name s of the form "t_" and a dictionary whose keys are
384# state names, this function returns a tuple (states,tokenname) where states
385# is a tuple of state names and tokenname is the name of the token. For example,
386# calling this with s = "t_foo_bar_SPAM" might return (('foo','bar'),'SPAM')
387# -----------------------------------------------------------------------------
388def _statetoken(s, names):
389 parts = s.split('_')
390 for i, part in enumerate(parts[1:], 1):
391 if part not in names and part != 'ANY':
392 break
394 if i > 1:
395 states = tuple(parts[1:i])
396 else:
397 states = ('INITIAL',)
399 if 'ANY' in states:
400 states = tuple(names)
402 tokenname = '_'.join(parts[i:])
403 return (states, tokenname)
406# -----------------------------------------------------------------------------
407# LexerReflect()
408#
409# This class represents information needed to build a lexer as extracted from a
410# user's input file.
411# -----------------------------------------------------------------------------
412class LexerReflect(object):
413 def __init__(self, ldict, log=None, reflags=0):
414 self.ldict = ldict
415 self.error_func = None
416 self.tokens = []
417 self.reflags = reflags
418 self.stateinfo = {'INITIAL': 'inclusive'}
419 self.modules = set()
420 self.error = False
421 self.log = PlyLogger(sys.stderr) if log is None else log
423 # Get all of the basic information
424 def get_all(self):
425 self.get_tokens()
426 self.get_literals()
427 self.get_states()
428 self.get_rules()
430 # Validate all of the information
431 def validate_all(self):
432 self.validate_tokens()
433 self.validate_literals()
434 self.validate_rules()
435 return self.error
437 # Get the tokens map
438 def get_tokens(self):
439 tokens = self.ldict.get('tokens', None)
440 if not tokens:
441 self.log.error('No token list is defined')
442 self.error = True
443 return
445 if not isinstance(tokens, (list, tuple)):
446 self.log.error('tokens must be a list or tuple')
447 self.error = True
448 return
450 if not tokens:
451 self.log.error('tokens is empty')
452 self.error = True
453 return
455 self.tokens = tokens
457 # Validate the tokens
458 def validate_tokens(self):
459 terminals = {}
460 for n in self.tokens:
461 if not _is_identifier.match(n):
462 self.log.error(f"Bad token name {n!r}")
463 self.error = True
464 if n in terminals:
465 self.log.warning(f"Token {n!r} multiply defined")
466 terminals[n] = 1
468 # Get the literals specifier
469 def get_literals(self):
470 self.literals = self.ldict.get('literals', '')
471 if not self.literals:
472 self.literals = ''
474 # Validate literals
475 def validate_literals(self):
476 try:
477 for c in self.literals:
478 if not isinstance(c, StringTypes) or len(c) > 1:
479 self.log.error(f'Invalid literal {c!r}. Must be a single character')
480 self.error = True
482 except TypeError:
483 self.log.error('Invalid literals specification. literals must be a sequence of characters')
484 self.error = True
486 def get_states(self):
487 self.states = self.ldict.get('states', None)
488 # Build statemap
489 if self.states:
490 if not isinstance(self.states, (tuple, list)):
491 self.log.error('states must be defined as a tuple or list')
492 self.error = True
493 else:
494 for s in self.states:
495 if not isinstance(s, tuple) or len(s) != 2:
496 self.log.error("Invalid state specifier %r. Must be a tuple (statename,'exclusive|inclusive')", s)
497 self.error = True
498 continue
499 name, statetype = s
500 if not isinstance(name, StringTypes):
501 self.log.error('State name %r must be a string', name)
502 self.error = True
503 continue
504 if not (statetype == 'inclusive' or statetype == 'exclusive'):
505 self.log.error("State type for state %r must be 'inclusive' or 'exclusive'", name)
506 self.error = True
507 continue
508 if name in self.stateinfo:
509 self.log.error("State %r already defined", name)
510 self.error = True
511 continue
512 self.stateinfo[name] = statetype
514 # Get all of the symbols with a t_ prefix and sort them into various
515 # categories (functions, strings, error functions, and ignore characters)
517 def get_rules(self):
518 tsymbols = [f for f in self.ldict if f[:2] == 't_']
520 # Now build up a list of functions and a list of strings
521 self.toknames = {} # Mapping of symbols to token names
522 self.funcsym = {} # Symbols defined as functions
523 self.strsym = {} # Symbols defined as strings
524 self.ignore = {} # Ignore strings by state
525 self.errorf = {} # Error functions by state
526 self.eoff = {} # EOF functions by state
528 for s in self.stateinfo:
529 self.funcsym[s] = []
530 self.strsym[s] = []
532 if len(tsymbols) == 0:
533 self.log.error('No rules of the form t_rulename are defined')
534 self.error = True
535 return
537 for f in tsymbols:
538 t = self.ldict[f]
539 states, tokname = _statetoken(f, self.stateinfo)
540 self.toknames[f] = tokname
542 if hasattr(t, '__call__'):
543 if tokname == 'error':
544 for s in states:
545 self.errorf[s] = t
546 elif tokname == 'eof':
547 for s in states:
548 self.eoff[s] = t
549 elif tokname == 'ignore':
550 line = t.__code__.co_firstlineno
551 file = t.__code__.co_filename
552 self.log.error("%s:%d: Rule %r must be defined as a string", file, line, t.__name__)
553 self.error = True
554 else:
555 for s in states:
556 self.funcsym[s].append((f, t))
557 elif isinstance(t, StringTypes):
558 if tokname == 'ignore':
559 for s in states:
560 self.ignore[s] = t
561 if '\\' in t:
562 self.log.warning("%s contains a literal backslash '\\'", f)
564 elif tokname == 'error':
565 self.log.error("Rule %r must be defined as a function", f)
566 self.error = True
567 else:
568 for s in states:
569 self.strsym[s].append((f, t))
570 else:
571 self.log.error('%s not defined as a function or string', f)
572 self.error = True
574 # Sort the functions by line number
575 for f in self.funcsym.values():
576 f.sort(key=lambda x: x[1].__code__.co_firstlineno)
578 # Sort the strings by regular expression length
579 for s in self.strsym.values():
580 s.sort(key=lambda x: len(x[1]), reverse=True)
582 # Validate all of the t_rules collected
583 def validate_rules(self):
584 for state in self.stateinfo:
585 # Validate all rules defined by functions
587 for fname, f in self.funcsym[state]:
588 line = f.__code__.co_firstlineno
589 file = f.__code__.co_filename
590 module = inspect.getmodule(f)
591 self.modules.add(module)
593 tokname = self.toknames[fname]
594 if isinstance(f, types.MethodType):
595 reqargs = 2
596 else:
597 reqargs = 1
598 nargs = f.__code__.co_argcount
599 if nargs > reqargs:
600 self.log.error("%s:%d: Rule %r has too many arguments", file, line, f.__name__)
601 self.error = True
602 continue
604 if nargs < reqargs:
605 self.log.error("%s:%d: Rule %r requires an argument", file, line, f.__name__)
606 self.error = True
607 continue
609 if not _get_regex(f):
610 self.log.error("%s:%d: No regular expression defined for rule %r", file, line, f.__name__)
611 self.error = True
612 continue
614 try:
615 c = re.compile('(?P<%s>%s)' % (fname, _get_regex(f)), self.reflags)
616 if c.match(''):
617 self.log.error("%s:%d: Regular expression for rule %r matches empty string", file, line, f.__name__)
618 self.error = True
619 except re.error as e:
620 self.log.error("%s:%d: Invalid regular expression for rule '%s'. %s", file, line, f.__name__, e)
621 if '#' in _get_regex(f):
622 self.log.error("%s:%d. Make sure '#' in rule %r is escaped with '\\#'", file, line, f.__name__)
623 self.error = True
625 # Validate all rules defined by strings
626 for name, r in self.strsym[state]:
627 tokname = self.toknames[name]
628 if tokname == 'error':
629 self.log.error("Rule %r must be defined as a function", name)
630 self.error = True
631 continue
633 if tokname not in self.tokens and tokname.find('ignore_') < 0:
634 self.log.error("Rule %r defined for an unspecified token %s", name, tokname)
635 self.error = True
636 continue
638 try:
639 c = re.compile('(?P<%s>%s)' % (name, r), self.reflags)
640 if (c.match('')):
641 self.log.error("Regular expression for rule %r matches empty string", name)
642 self.error = True
643 except re.error as e:
644 self.log.error("Invalid regular expression for rule %r. %s", name, e)
645 if '#' in r:
646 self.log.error("Make sure '#' in rule %r is escaped with '\\#'", name)
647 self.error = True
649 if not self.funcsym[state] and not self.strsym[state]:
650 self.log.error("No rules defined for state %r", state)
651 self.error = True
653 # Validate the error function
654 efunc = self.errorf.get(state, None)
655 if efunc:
656 f = efunc
657 line = f.__code__.co_firstlineno
658 file = f.__code__.co_filename
659 module = inspect.getmodule(f)
660 self.modules.add(module)
662 if isinstance(f, types.MethodType):
663 reqargs = 2
664 else:
665 reqargs = 1
666 nargs = f.__code__.co_argcount
667 if nargs > reqargs:
668 self.log.error("%s:%d: Rule %r has too many arguments", file, line, f.__name__)
669 self.error = True
671 if nargs < reqargs:
672 self.log.error("%s:%d: Rule %r requires an argument", file, line, f.__name__)
673 self.error = True
675 for module in self.modules:
676 self.validate_module(module)
678 # -----------------------------------------------------------------------------
679 # validate_module()
680 #
681 # This checks to see if there are duplicated t_rulename() functions or strings
682 # in the parser input file. This is done using a simple regular expression
683 # match on each line in the source code of the given module.
684 # -----------------------------------------------------------------------------
686 def validate_module(self, module):
687 try:
688 lines, linen = inspect.getsourcelines(module)
689 except IOError:
690 return
692 fre = re.compile(r'\s*def\s+(t_[a-zA-Z_0-9]*)\(')
693 sre = re.compile(r'\s*(t_[a-zA-Z_0-9]*)\s*=')
695 counthash = {}
696 linen += 1
697 for line in lines:
698 m = fre.match(line)
699 if not m:
700 m = sre.match(line)
701 if m:
702 name = m.group(1)
703 prev = counthash.get(name)
704 if not prev:
705 counthash[name] = linen
706 else:
707 filename = inspect.getsourcefile(module)
708 self.log.error('%s:%d: Rule %s redefined. Previously defined on line %d', filename, linen, name, prev)
709 self.error = True
710 linen += 1
712# -----------------------------------------------------------------------------
713# lex(module)
714#
715# Build all of the regular expression rules from definitions in the supplied module
716# -----------------------------------------------------------------------------
717def lex(*, module=None, object=None, debug=False,
718 reflags=int(re.VERBOSE), debuglog=None, errorlog=None):
720 global lexer
722 ldict = None
723 stateinfo = {'INITIAL': 'inclusive'}
724 lexobj = Lexer()
725 global token, input
727 if errorlog is None:
728 errorlog = PlyLogger(sys.stderr)
730 if debug:
731 if debuglog is None:
732 debuglog = PlyLogger(sys.stderr)
734 # Get the module dictionary used for the lexer
735 if object:
736 module = object
738 # Get the module dictionary used for the parser
739 if module:
740 _items = [(k, getattr(module, k)) for k in dir(module)]
741 ldict = dict(_items)
742 # If no __file__ attribute is available, try to obtain it from the __module__ instead
743 if '__file__' not in ldict:
744 ldict['__file__'] = sys.modules[ldict['__module__']].__file__
745 else:
746 ldict = get_caller_module_dict(2)
748 # Collect parser information from the dictionary
749 linfo = LexerReflect(ldict, log=errorlog, reflags=reflags)
750 linfo.get_all()
751 if linfo.validate_all():
752 raise SyntaxError("Can't build lexer")
754 # Dump some basic debugging information
755 if debug:
756 debuglog.info('lex: tokens = %r', linfo.tokens)
757 debuglog.info('lex: literals = %r', linfo.literals)
758 debuglog.info('lex: states = %r', linfo.stateinfo)
760 # Build a dictionary of valid token names
761 lexobj.lextokens = set()
762 for n in linfo.tokens:
763 lexobj.lextokens.add(n)
765 # Get literals specification
766 if isinstance(linfo.literals, (list, tuple)):
767 lexobj.lexliterals = type(linfo.literals[0])().join(linfo.literals)
768 else:
769 lexobj.lexliterals = linfo.literals
771 lexobj.lextokens_all = lexobj.lextokens | set(lexobj.lexliterals)
773 # Get the stateinfo dictionary
774 stateinfo = linfo.stateinfo
776 regexs = {}
777 # Build the master regular expressions
778 for state in stateinfo:
779 regex_list = []
781 # Add rules defined by functions first
782 for fname, f in linfo.funcsym[state]:
783 regex_list.append('(?P<%s>%s)' % (fname, _get_regex(f)))
784 if debug:
785 debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", fname, _get_regex(f), state)
787 # Now add all of the simple rules
788 for name, r in linfo.strsym[state]:
789 regex_list.append('(?P<%s>%s)' % (name, r))
790 if debug:
791 debuglog.info("lex: Adding rule %s -> '%s' (state '%s')", name, r, state)
793 regexs[state] = regex_list
795 # Build the master regular expressions
797 if debug:
798 debuglog.info('lex: ==== MASTER REGEXS FOLLOW ====')
800 for state in regexs:
801 lexre, re_text, re_names = _form_master_re(regexs[state], reflags, ldict, linfo.toknames)
802 lexobj.lexstatere[state] = lexre
803 lexobj.lexstateretext[state] = re_text
804 lexobj.lexstaterenames[state] = re_names
805 if debug:
806 for i, text in enumerate(re_text):
807 debuglog.info("lex: state '%s' : regex[%d] = '%s'", state, i, text)
809 # For inclusive states, we need to add the regular expressions from the INITIAL state
810 for state, stype in stateinfo.items():
811 if state != 'INITIAL' and stype == 'inclusive':
812 lexobj.lexstatere[state].extend(lexobj.lexstatere['INITIAL'])
813 lexobj.lexstateretext[state].extend(lexobj.lexstateretext['INITIAL'])
814 lexobj.lexstaterenames[state].extend(lexobj.lexstaterenames['INITIAL'])
816 lexobj.lexstateinfo = stateinfo
817 lexobj.lexre = lexobj.lexstatere['INITIAL']
818 lexobj.lexretext = lexobj.lexstateretext['INITIAL']
819 lexobj.lexreflags = reflags
821 # Set up ignore variables
822 lexobj.lexstateignore = linfo.ignore
823 lexobj.lexignore = lexobj.lexstateignore.get('INITIAL', '')
825 # Set up error functions
826 lexobj.lexstateerrorf = linfo.errorf
827 lexobj.lexerrorf = linfo.errorf.get('INITIAL', None)
828 if not lexobj.lexerrorf:
829 errorlog.warning('No t_error rule is defined')
831 # Set up eof functions
832 lexobj.lexstateeoff = linfo.eoff
833 lexobj.lexeoff = linfo.eoff.get('INITIAL', None)
835 # Check state information for ignore and error rules
836 for s, stype in stateinfo.items():
837 if stype == 'exclusive':
838 if s not in linfo.errorf:
839 errorlog.warning("No error rule is defined for exclusive state %r", s)
840 if s not in linfo.ignore and lexobj.lexignore:
841 errorlog.warning("No ignore rule is defined for exclusive state %r", s)
842 elif stype == 'inclusive':
843 if s not in linfo.errorf:
844 linfo.errorf[s] = linfo.errorf.get('INITIAL', None)
845 if s not in linfo.ignore:
846 linfo.ignore[s] = linfo.ignore.get('INITIAL', '')
848 # Create global versions of the token() and input() functions
849 token = lexobj.token
850 input = lexobj.input
851 lexer = lexobj
853 return lexobj
855# -----------------------------------------------------------------------------
856# runmain()
857#
858# This runs the lexer as a main program
859# -----------------------------------------------------------------------------
861def runmain(lexer=None, data=None):
862 if not data:
863 try:
864 filename = sys.argv[1]
865 with open(filename) as f:
866 data = f.read()
867 except IndexError:
868 sys.stdout.write('Reading from standard input (type EOF to end):\n')
869 data = sys.stdin.read()
871 if lexer:
872 _input = lexer.input
873 else:
874 _input = input
875 _input(data)
876 if lexer:
877 _token = lexer.token
878 else:
879 _token = token
881 while True:
882 tok = _token()
883 if not tok:
884 break
885 sys.stdout.write(f'({tok.type},{tok.value!r},{tok.lineno},{tok.lexpos})\n')
887# -----------------------------------------------------------------------------
888# @TOKEN(regex)
889#
890# This decorator function can be used to set the regex expression on a function
891# when its docstring might need to be set in an alternative way
892# -----------------------------------------------------------------------------
894def TOKEN(r):
895 def set_regex(f):
896 if hasattr(r, '__call__'):
897 f.regex = _get_regex(r)
898 else:
899 f.regex = r
900 return f
901 return set_regex