Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pygments/lexer.py: 84%
469 statements
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-18 06:13 +0000
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-18 06:13 +0000
1"""
2 pygments.lexer
3 ~~~~~~~~~~~~~~
5 Base lexer classes.
7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
11import re
12import sys
13import time
15from pygments.filter import apply_filters, Filter
16from pygments.filters import get_filter_by_name
17from pygments.token import Error, Text, Other, Whitespace, _TokenType
18from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
19 make_analysator, Future, guess_decode
20from pygments.regexopt import regex_opt
22__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
23 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
24 'default', 'words', 'line_re']
26line_re = re.compile('.*?\n')
28_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
29 (b'\xff\xfe\0\0', 'utf-32'),
30 (b'\0\0\xfe\xff', 'utf-32be'),
31 (b'\xff\xfe', 'utf-16'),
32 (b'\xfe\xff', 'utf-16be')]
34_default_analyse = staticmethod(lambda x: 0.0)
37class LexerMeta(type):
38 """
39 This metaclass automagically converts ``analyse_text`` methods into
40 static methods which always return float values.
41 """
43 def __new__(mcs, name, bases, d):
44 if 'analyse_text' in d:
45 d['analyse_text'] = make_analysator(d['analyse_text'])
46 return type.__new__(mcs, name, bases, d)
49class Lexer(metaclass=LexerMeta):
50 """
51 Lexer for a specific language.
53 See also :doc:`lexerdevelopment`, a high-level guide to writing
54 lexers.
56 Lexer classes have attributes used for choosing the most appropriate
57 lexer based on various criteria.
59 .. autoattribute:: name
60 :no-value:
61 .. autoattribute:: aliases
62 :no-value:
63 .. autoattribute:: filenames
64 :no-value:
65 .. autoattribute:: alias_filenames
66 .. autoattribute:: mimetypes
67 :no-value:
68 .. autoattribute:: priority
70 Lexers included in Pygments should have an additional attribute:
72 .. autoattribute:: url
73 :no-value:
75 Lexers included in Pygments may have additional attributes:
77 .. autoattribute:: _example
78 :no-value:
80 You can pass options to the constructor. The basic options recognized
81 by all lexers and processed by the base `Lexer` class are:
83 ``stripnl``
84 Strip leading and trailing newlines from the input (default: True).
85 ``stripall``
86 Strip all leading and trailing whitespace from the input
87 (default: False).
88 ``ensurenl``
89 Make sure that the input ends with a newline (default: True). This
90 is required for some lexers that consume input linewise.
92 .. versionadded:: 1.3
94 ``tabsize``
95 If given and greater than 0, expand tabs in the input (default: 0).
96 ``encoding``
97 If given, must be an encoding name. This encoding will be used to
98 convert the input string to Unicode, if it is not already a Unicode
99 string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
100 Latin1 detection. Can also be ``'chardet'`` to use the chardet
101 library, if it is installed.
102 ``inencoding``
103 Overrides the ``encoding`` if given.
104 """
106 #: Full name of the lexer, in human-readable form
107 name = None
109 #: A list of short, unique identifiers that can be used to look
110 #: up the lexer from a list, e.g., using `get_lexer_by_name()`.
111 aliases = []
113 #: A list of `fnmatch` patterns that match filenames which contain
114 #: content for this lexer. The patterns in this list should be unique among
115 #: all lexers.
116 filenames = []
118 #: A list of `fnmatch` patterns that match filenames which may or may not
119 #: contain content for this lexer. This list is used by the
120 #: :func:`.guess_lexer_for_filename()` function, to determine which lexers
121 #: are then included in guessing the correct one. That means that
122 #: e.g. every lexer for HTML and a template language should include
123 #: ``\*.html`` in this list.
124 alias_filenames = []
126 #: A list of MIME types for content that can be lexed with this lexer.
127 mimetypes = []
129 #: Priority, should multiple lexers match and no content is provided
130 priority = 0
132 #: URL of the language specification/definition. Used in the Pygments
133 #: documentation.
134 url = None
136 #: Example file name. Relative to the ``tests/examplefiles`` directory.
137 #: This is used by the documentation generator to show an example.
138 _example = None
140 def __init__(self, **options):
141 """
142 This constructor takes arbitrary options as keyword arguments.
143 Every subclass must first process its own options and then call
144 the `Lexer` constructor, since it processes the basic
145 options like `stripnl`.
147 An example looks like this:
149 .. sourcecode:: python
151 def __init__(self, **options):
152 self.compress = options.get('compress', '')
153 Lexer.__init__(self, **options)
155 As these options must all be specifiable as strings (due to the
156 command line usage), there are various utility functions
157 available to help with that, see `Utilities`_.
158 """
159 self.options = options
160 self.stripnl = get_bool_opt(options, 'stripnl', True)
161 self.stripall = get_bool_opt(options, 'stripall', False)
162 self.ensurenl = get_bool_opt(options, 'ensurenl', True)
163 self.tabsize = get_int_opt(options, 'tabsize', 0)
164 self.encoding = options.get('encoding', 'guess')
165 self.encoding = options.get('inencoding') or self.encoding
166 self.filters = []
167 for filter_ in get_list_opt(options, 'filters', ()):
168 self.add_filter(filter_)
170 def __repr__(self):
171 if self.options:
172 return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
173 self.options)
174 else:
175 return '<pygments.lexers.%s>' % self.__class__.__name__
177 def add_filter(self, filter_, **options):
178 """
179 Add a new stream filter to this lexer.
180 """
181 if not isinstance(filter_, Filter):
182 filter_ = get_filter_by_name(filter_, **options)
183 self.filters.append(filter_)
185 def analyse_text(text):
186 """
187 A static method which is called for lexer guessing.
189 It should analyse the text and return a float in the range
190 from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
191 will not be selected as the most probable one, if it returns
192 ``1.0``, it will be selected immediately. This is used by
193 `guess_lexer`.
195 The `LexerMeta` metaclass automatically wraps this function so
196 that it works like a static method (no ``self`` or ``cls``
197 parameter) and the return value is automatically converted to
198 `float`. If the return value is an object that is boolean `False`
199 it's the same as if the return values was ``0.0``.
200 """
202 def get_tokens(self, text, unfiltered=False):
203 """
204 This method is the basic interface of a lexer. It is called by
205 the `highlight()` function. It must process the text and return an
206 iterable of ``(tokentype, value)`` pairs from `text`.
208 Normally, you don't need to override this method. The default
209 implementation processes the options recognized by all lexers
210 (`stripnl`, `stripall` and so on), and then yields all tokens
211 from `get_tokens_unprocessed()`, with the ``index`` dropped.
213 If `unfiltered` is set to `True`, the filtering mechanism is
214 bypassed even if filters are defined.
215 """
216 if not isinstance(text, str):
217 if self.encoding == 'guess':
218 text, _ = guess_decode(text)
219 elif self.encoding == 'chardet':
220 try:
221 import chardet
222 except ImportError as e:
223 raise ImportError('To enable chardet encoding guessing, '
224 'please install the chardet library '
225 'from http://chardet.feedparser.org/') from e
226 # check for BOM first
227 decoded = None
228 for bom, encoding in _encoding_map:
229 if text.startswith(bom):
230 decoded = text[len(bom):].decode(encoding, 'replace')
231 break
232 # no BOM found, so use chardet
233 if decoded is None:
234 enc = chardet.detect(text[:1024]) # Guess using first 1KB
235 decoded = text.decode(enc.get('encoding') or 'utf-8',
236 'replace')
237 text = decoded
238 else:
239 text = text.decode(self.encoding)
240 if text.startswith('\ufeff'):
241 text = text[len('\ufeff'):]
242 else:
243 if text.startswith('\ufeff'):
244 text = text[len('\ufeff'):]
246 # text now *is* a unicode string
247 text = text.replace('\r\n', '\n')
248 text = text.replace('\r', '\n')
249 if self.stripall:
250 text = text.strip()
251 elif self.stripnl:
252 text = text.strip('\n')
253 if self.tabsize > 0:
254 text = text.expandtabs(self.tabsize)
255 if self.ensurenl and not text.endswith('\n'):
256 text += '\n'
258 def streamer():
259 for _, t, v in self.get_tokens_unprocessed(text):
260 yield t, v
261 stream = streamer()
262 if not unfiltered:
263 stream = apply_filters(stream, self.filters, self)
264 return stream
266 def get_tokens_unprocessed(self, text):
267 """
268 This method should process the text and return an iterable of
269 ``(index, tokentype, value)`` tuples where ``index`` is the starting
270 position of the token within the input text.
272 It must be overridden by subclasses. It is recommended to
273 implement it as a generator to maximize effectiveness.
274 """
275 raise NotImplementedError
278class DelegatingLexer(Lexer):
279 """
280 This lexer takes two lexer as arguments. A root lexer and
281 a language lexer. First everything is scanned using the language
282 lexer, afterwards all ``Other`` tokens are lexed using the root
283 lexer.
285 The lexers from the ``template`` lexer package use this base lexer.
286 """
288 def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
289 self.root_lexer = _root_lexer(**options)
290 self.language_lexer = _language_lexer(**options)
291 self.needle = _needle
292 Lexer.__init__(self, **options)
294 def get_tokens_unprocessed(self, text):
295 buffered = ''
296 insertions = []
297 lng_buffer = []
298 for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
299 if t is self.needle:
300 if lng_buffer:
301 insertions.append((len(buffered), lng_buffer))
302 lng_buffer = []
303 buffered += v
304 else:
305 lng_buffer.append((i, t, v))
306 if lng_buffer:
307 insertions.append((len(buffered), lng_buffer))
308 return do_insertions(insertions,
309 self.root_lexer.get_tokens_unprocessed(buffered))
312# ------------------------------------------------------------------------------
313# RegexLexer and ExtendedRegexLexer
314#
317class include(str): # pylint: disable=invalid-name
318 """
319 Indicates that a state should include rules from another state.
320 """
321 pass
324class _inherit:
325 """
326 Indicates the a state should inherit from its superclass.
327 """
328 def __repr__(self):
329 return 'inherit'
331inherit = _inherit() # pylint: disable=invalid-name
334class combined(tuple): # pylint: disable=invalid-name
335 """
336 Indicates a state combined from multiple states.
337 """
339 def __new__(cls, *args):
340 return tuple.__new__(cls, args)
342 def __init__(self, *args):
343 # tuple.__init__ doesn't do anything
344 pass
347class _PseudoMatch:
348 """
349 A pseudo match object constructed from a string.
350 """
352 def __init__(self, start, text):
353 self._text = text
354 self._start = start
356 def start(self, arg=None):
357 return self._start
359 def end(self, arg=None):
360 return self._start + len(self._text)
362 def group(self, arg=None):
363 if arg:
364 raise IndexError('No such group')
365 return self._text
367 def groups(self):
368 return (self._text,)
370 def groupdict(self):
371 return {}
374def bygroups(*args):
375 """
376 Callback that yields multiple actions for each group in the match.
377 """
378 def callback(lexer, match, ctx=None):
379 for i, action in enumerate(args):
380 if action is None:
381 continue
382 elif type(action) is _TokenType:
383 data = match.group(i + 1)
384 if data:
385 yield match.start(i + 1), action, data
386 else:
387 data = match.group(i + 1)
388 if data is not None:
389 if ctx:
390 ctx.pos = match.start(i + 1)
391 for item in action(lexer,
392 _PseudoMatch(match.start(i + 1), data), ctx):
393 if item:
394 yield item
395 if ctx:
396 ctx.pos = match.end()
397 return callback
400class _This:
401 """
402 Special singleton used for indicating the caller class.
403 Used by ``using``.
404 """
406this = _This()
409def using(_other, **kwargs):
410 """
411 Callback that processes the match with a different lexer.
413 The keyword arguments are forwarded to the lexer, except `state` which
414 is handled separately.
416 `state` specifies the state that the new lexer will start in, and can
417 be an enumerable such as ('root', 'inline', 'string') or a simple
418 string which is assumed to be on top of the root state.
420 Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
421 """
422 gt_kwargs = {}
423 if 'state' in kwargs:
424 s = kwargs.pop('state')
425 if isinstance(s, (list, tuple)):
426 gt_kwargs['stack'] = s
427 else:
428 gt_kwargs['stack'] = ('root', s)
430 if _other is this:
431 def callback(lexer, match, ctx=None):
432 # if keyword arguments are given the callback
433 # function has to create a new lexer instance
434 if kwargs:
435 # XXX: cache that somehow
436 kwargs.update(lexer.options)
437 lx = lexer.__class__(**kwargs)
438 else:
439 lx = lexer
440 s = match.start()
441 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
442 yield i + s, t, v
443 if ctx:
444 ctx.pos = match.end()
445 else:
446 def callback(lexer, match, ctx=None):
447 # XXX: cache that somehow
448 kwargs.update(lexer.options)
449 lx = _other(**kwargs)
451 s = match.start()
452 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
453 yield i + s, t, v
454 if ctx:
455 ctx.pos = match.end()
456 return callback
459class default:
460 """
461 Indicates a state or state action (e.g. #pop) to apply.
462 For example default('#pop') is equivalent to ('', Token, '#pop')
463 Note that state tuples may be used as well.
465 .. versionadded:: 2.0
466 """
467 def __init__(self, state):
468 self.state = state
471class words(Future):
472 """
473 Indicates a list of literal words that is transformed into an optimized
474 regex that matches any of the words.
476 .. versionadded:: 2.0
477 """
478 def __init__(self, words, prefix='', suffix=''):
479 self.words = words
480 self.prefix = prefix
481 self.suffix = suffix
483 def get(self):
484 return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
487class RegexLexerMeta(LexerMeta):
488 """
489 Metaclass for RegexLexer, creates the self._tokens attribute from
490 self.tokens on the first instantiation.
491 """
493 def _process_regex(cls, regex, rflags, state):
494 """Preprocess the regular expression component of a token definition."""
495 if isinstance(regex, Future):
496 regex = regex.get()
497 return re.compile(regex, rflags).match
499 def _process_token(cls, token):
500 """Preprocess the token component of a token definition."""
501 assert type(token) is _TokenType or callable(token), \
502 'token type must be simple type or callable, not %r' % (token,)
503 return token
505 def _process_new_state(cls, new_state, unprocessed, processed):
506 """Preprocess the state transition action of a token definition."""
507 if isinstance(new_state, str):
508 # an existing state
509 if new_state == '#pop':
510 return -1
511 elif new_state in unprocessed:
512 return (new_state,)
513 elif new_state == '#push':
514 return new_state
515 elif new_state[:5] == '#pop:':
516 return -int(new_state[5:])
517 else:
518 assert False, 'unknown new state %r' % new_state
519 elif isinstance(new_state, combined):
520 # combine a new state from existing ones
521 tmp_state = '_tmp_%d' % cls._tmpname
522 cls._tmpname += 1
523 itokens = []
524 for istate in new_state:
525 assert istate != new_state, 'circular state ref %r' % istate
526 itokens.extend(cls._process_state(unprocessed,
527 processed, istate))
528 processed[tmp_state] = itokens
529 return (tmp_state,)
530 elif isinstance(new_state, tuple):
531 # push more than one state
532 for istate in new_state:
533 assert (istate in unprocessed or
534 istate in ('#pop', '#push')), \
535 'unknown new state ' + istate
536 return new_state
537 else:
538 assert False, 'unknown new state def %r' % new_state
540 def _process_state(cls, unprocessed, processed, state):
541 """Preprocess a single state definition."""
542 assert type(state) is str, "wrong state name %r" % state
543 assert state[0] != '#', "invalid state name %r" % state
544 if state in processed:
545 return processed[state]
546 tokens = processed[state] = []
547 rflags = cls.flags
548 for tdef in unprocessed[state]:
549 if isinstance(tdef, include):
550 # it's a state reference
551 assert tdef != state, "circular state reference %r" % state
552 tokens.extend(cls._process_state(unprocessed, processed,
553 str(tdef)))
554 continue
555 if isinstance(tdef, _inherit):
556 # should be processed already, but may not in the case of:
557 # 1. the state has no counterpart in any parent
558 # 2. the state includes more than one 'inherit'
559 continue
560 if isinstance(tdef, default):
561 new_state = cls._process_new_state(tdef.state, unprocessed, processed)
562 tokens.append((re.compile('').match, None, new_state))
563 continue
565 assert type(tdef) is tuple, "wrong rule def %r" % tdef
567 try:
568 rex = cls._process_regex(tdef[0], rflags, state)
569 except Exception as err:
570 raise ValueError("uncompilable regex %r in state %r of %r: %s" %
571 (tdef[0], state, cls, err)) from err
573 token = cls._process_token(tdef[1])
575 if len(tdef) == 2:
576 new_state = None
577 else:
578 new_state = cls._process_new_state(tdef[2],
579 unprocessed, processed)
581 tokens.append((rex, token, new_state))
582 return tokens
584 def process_tokendef(cls, name, tokendefs=None):
585 """Preprocess a dictionary of token definitions."""
586 processed = cls._all_tokens[name] = {}
587 tokendefs = tokendefs or cls.tokens[name]
588 for state in list(tokendefs):
589 cls._process_state(tokendefs, processed, state)
590 return processed
592 def get_tokendefs(cls):
593 """
594 Merge tokens from superclasses in MRO order, returning a single tokendef
595 dictionary.
597 Any state that is not defined by a subclass will be inherited
598 automatically. States that *are* defined by subclasses will, by
599 default, override that state in the superclass. If a subclass wishes to
600 inherit definitions from a superclass, it can use the special value
601 "inherit", which will cause the superclass' state definition to be
602 included at that point in the state.
603 """
604 tokens = {}
605 inheritable = {}
606 for c in cls.__mro__:
607 toks = c.__dict__.get('tokens', {})
609 for state, items in toks.items():
610 curitems = tokens.get(state)
611 if curitems is None:
612 # N.b. because this is assigned by reference, sufficiently
613 # deep hierarchies are processed incrementally (e.g. for
614 # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
615 # will not see any inherits in B).
616 tokens[state] = items
617 try:
618 inherit_ndx = items.index(inherit)
619 except ValueError:
620 continue
621 inheritable[state] = inherit_ndx
622 continue
624 inherit_ndx = inheritable.pop(state, None)
625 if inherit_ndx is None:
626 continue
628 # Replace the "inherit" value with the items
629 curitems[inherit_ndx:inherit_ndx+1] = items
630 try:
631 # N.b. this is the index in items (that is, the superclass
632 # copy), so offset required when storing below.
633 new_inh_ndx = items.index(inherit)
634 except ValueError:
635 pass
636 else:
637 inheritable[state] = inherit_ndx + new_inh_ndx
639 return tokens
641 def __call__(cls, *args, **kwds):
642 """Instantiate cls after preprocessing its token definitions."""
643 if '_tokens' not in cls.__dict__:
644 cls._all_tokens = {}
645 cls._tmpname = 0
646 if hasattr(cls, 'token_variants') and cls.token_variants:
647 # don't process yet
648 pass
649 else:
650 cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
652 return type.__call__(cls, *args, **kwds)
655class RegexLexer(Lexer, metaclass=RegexLexerMeta):
656 """
657 Base for simple stateful regular expression-based lexers.
658 Simplifies the lexing process so that you need only
659 provide a list of states and regular expressions.
660 """
662 #: Flags for compiling the regular expressions.
663 #: Defaults to MULTILINE.
664 flags = re.MULTILINE
666 #: At all time there is a stack of states. Initially, the stack contains
667 #: a single state 'root'. The top of the stack is called "the current state".
668 #:
669 #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
670 #:
671 #: ``new_state`` can be omitted to signify no state transition.
672 #: If ``new_state`` is a string, it is pushed on the stack. This ensure
673 #: the new current state is ``new_state``.
674 #: If ``new_state`` is a tuple of strings, all of those strings are pushed
675 #: on the stack and the current state will be the last element of the list.
676 #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
677 #: to signify a new, anonymous state combined from the rules of two
678 #: or more existing ones.
679 #: Furthermore, it can be '#pop' to signify going back one step in
680 #: the state stack, or '#push' to push the current state on the stack
681 #: again. Note that if you push while in a combined state, the combined
682 #: state itself is pushed, and not only the state in which the rule is
683 #: defined.
684 #:
685 #: The tuple can also be replaced with ``include('state')``, in which
686 #: case the rules from the state named by the string are included in the
687 #: current one.
688 tokens = {}
690 def get_tokens_unprocessed(self, text, stack=('root',)):
691 """
692 Split ``text`` into (tokentype, text) pairs.
694 ``stack`` is the initial stack (default: ``['root']``)
695 """
696 pos = 0
697 tokendefs = self._tokens
698 statestack = list(stack)
699 statetokens = tokendefs[statestack[-1]]
700 while 1:
701 for rexmatch, action, new_state in statetokens:
702 m = rexmatch(text, pos)
703 if m:
704 if action is not None:
705 if type(action) is _TokenType:
706 yield pos, action, m.group()
707 else:
708 yield from action(self, m)
709 pos = m.end()
710 if new_state is not None:
711 # state transition
712 if isinstance(new_state, tuple):
713 for state in new_state:
714 if state == '#pop':
715 if len(statestack) > 1:
716 statestack.pop()
717 elif state == '#push':
718 statestack.append(statestack[-1])
719 else:
720 statestack.append(state)
721 elif isinstance(new_state, int):
722 # pop, but keep at least one state on the stack
723 # (random code leading to unexpected pops should
724 # not allow exceptions)
725 if abs(new_state) >= len(statestack):
726 del statestack[1:]
727 else:
728 del statestack[new_state:]
729 elif new_state == '#push':
730 statestack.append(statestack[-1])
731 else:
732 assert False, "wrong state def: %r" % new_state
733 statetokens = tokendefs[statestack[-1]]
734 break
735 else:
736 # We are here only if all state tokens have been considered
737 # and there was not a match on any of them.
738 try:
739 if text[pos] == '\n':
740 # at EOL, reset state to "root"
741 statestack = ['root']
742 statetokens = tokendefs['root']
743 yield pos, Whitespace, '\n'
744 pos += 1
745 continue
746 yield pos, Error, text[pos]
747 pos += 1
748 except IndexError:
749 break
752class LexerContext:
753 """
754 A helper object that holds lexer position data.
755 """
757 def __init__(self, text, pos, stack=None, end=None):
758 self.text = text
759 self.pos = pos
760 self.end = end or len(text) # end=0 not supported ;-)
761 self.stack = stack or ['root']
763 def __repr__(self):
764 return 'LexerContext(%r, %r, %r)' % (
765 self.text, self.pos, self.stack)
768class ExtendedRegexLexer(RegexLexer):
769 """
770 A RegexLexer that uses a context object to store its state.
771 """
773 def get_tokens_unprocessed(self, text=None, context=None):
774 """
775 Split ``text`` into (tokentype, text) pairs.
776 If ``context`` is given, use this lexer context instead.
777 """
778 tokendefs = self._tokens
779 if not context:
780 ctx = LexerContext(text, 0)
781 statetokens = tokendefs['root']
782 else:
783 ctx = context
784 statetokens = tokendefs[ctx.stack[-1]]
785 text = ctx.text
786 while 1:
787 for rexmatch, action, new_state in statetokens:
788 m = rexmatch(text, ctx.pos, ctx.end)
789 if m:
790 if action is not None:
791 if type(action) is _TokenType:
792 yield ctx.pos, action, m.group()
793 ctx.pos = m.end()
794 else:
795 yield from action(self, m, ctx)
796 if not new_state:
797 # altered the state stack?
798 statetokens = tokendefs[ctx.stack[-1]]
799 # CAUTION: callback must set ctx.pos!
800 if new_state is not None:
801 # state transition
802 if isinstance(new_state, tuple):
803 for state in new_state:
804 if state == '#pop':
805 if len(ctx.stack) > 1:
806 ctx.stack.pop()
807 elif state == '#push':
808 ctx.stack.append(ctx.stack[-1])
809 else:
810 ctx.stack.append(state)
811 elif isinstance(new_state, int):
812 # see RegexLexer for why this check is made
813 if abs(new_state) >= len(ctx.stack):
814 del ctx.stack[1:]
815 else:
816 del ctx.stack[new_state:]
817 elif new_state == '#push':
818 ctx.stack.append(ctx.stack[-1])
819 else:
820 assert False, "wrong state def: %r" % new_state
821 statetokens = tokendefs[ctx.stack[-1]]
822 break
823 else:
824 try:
825 if ctx.pos >= ctx.end:
826 break
827 if text[ctx.pos] == '\n':
828 # at EOL, reset state to "root"
829 ctx.stack = ['root']
830 statetokens = tokendefs['root']
831 yield ctx.pos, Text, '\n'
832 ctx.pos += 1
833 continue
834 yield ctx.pos, Error, text[ctx.pos]
835 ctx.pos += 1
836 except IndexError:
837 break
840def do_insertions(insertions, tokens):
841 """
842 Helper for lexers which must combine the results of several
843 sublexers.
845 ``insertions`` is a list of ``(index, itokens)`` pairs.
846 Each ``itokens`` iterable should be inserted at position
847 ``index`` into the token stream given by the ``tokens``
848 argument.
850 The result is a combined token stream.
852 TODO: clean up the code here.
853 """
854 insertions = iter(insertions)
855 try:
856 index, itokens = next(insertions)
857 except StopIteration:
858 # no insertions
859 yield from tokens
860 return
862 realpos = None
863 insleft = True
865 # iterate over the token stream where we want to insert
866 # the tokens from the insertion list.
867 for i, t, v in tokens:
868 # first iteration. store the position of first item
869 if realpos is None:
870 realpos = i
871 oldi = 0
872 while insleft and i + len(v) >= index:
873 tmpval = v[oldi:index - i]
874 if tmpval:
875 yield realpos, t, tmpval
876 realpos += len(tmpval)
877 for it_index, it_token, it_value in itokens:
878 yield realpos, it_token, it_value
879 realpos += len(it_value)
880 oldi = index - i
881 try:
882 index, itokens = next(insertions)
883 except StopIteration:
884 insleft = False
885 break # not strictly necessary
886 if oldi < len(v):
887 yield realpos, t, v[oldi:]
888 realpos += len(v) - oldi
890 # leftover tokens
891 while insleft:
892 # no normal tokens, set realpos to zero
893 realpos = realpos or 0
894 for p, t, v in itokens:
895 yield realpos, t, v
896 realpos += len(v)
897 try:
898 index, itokens = next(insertions)
899 except StopIteration:
900 insleft = False
901 break # not strictly necessary
904class ProfilingRegexLexerMeta(RegexLexerMeta):
905 """Metaclass for ProfilingRegexLexer, collects regex timing info."""
907 def _process_regex(cls, regex, rflags, state):
908 if isinstance(regex, words):
909 rex = regex_opt(regex.words, prefix=regex.prefix,
910 suffix=regex.suffix)
911 else:
912 rex = regex
913 compiled = re.compile(rex, rflags)
915 def match_func(text, pos, endpos=sys.maxsize):
916 info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
917 t0 = time.time()
918 res = compiled.match(text, pos, endpos)
919 t1 = time.time()
920 info[0] += 1
921 info[1] += t1 - t0
922 return res
923 return match_func
926class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
927 """Drop-in replacement for RegexLexer that does profiling of its regexes."""
929 _prof_data = []
930 _prof_sort_index = 4 # defaults to time per call
932 def get_tokens_unprocessed(self, text, stack=('root',)):
933 # this needs to be a stack, since using(this) will produce nested calls
934 self.__class__._prof_data.append({})
935 yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
936 rawdata = self.__class__._prof_data.pop()
937 data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
938 n, 1000 * t, 1000 * t / n)
939 for ((s, r), (n, t)) in rawdata.items()),
940 key=lambda x: x[self._prof_sort_index],
941 reverse=True)
942 sum_total = sum(x[3] for x in data)
944 print()
945 print('Profiling result for %s lexing %d chars in %.3f ms' %
946 (self.__class__.__name__, len(text), sum_total))
947 print('=' * 110)
948 print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
949 print('-' * 110)
950 for d in data:
951 print('%-20s %-65s %5d %8.4f %8.4f' % d)
952 print('=' * 110)