Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pygments/lexer.py: 21%
472 statements
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-20 06:09 +0000
« prev ^ index » next coverage.py v7.4.4, created at 2024-04-20 06:09 +0000
1"""
2 pygments.lexer
3 ~~~~~~~~~~~~~~
5 Base lexer classes.
7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
11import re
12import sys
13import time
15from pygments.filter import apply_filters, Filter
16from pygments.filters import get_filter_by_name
17from pygments.token import Error, Text, Other, Whitespace, _TokenType
18from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
19 make_analysator, Future, guess_decode
20from pygments.regexopt import regex_opt
22__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
23 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
24 'default', 'words', 'line_re']
26line_re = re.compile('.*?\n')
28_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
29 (b'\xff\xfe\0\0', 'utf-32'),
30 (b'\0\0\xfe\xff', 'utf-32be'),
31 (b'\xff\xfe', 'utf-16'),
32 (b'\xfe\xff', 'utf-16be')]
34_default_analyse = staticmethod(lambda x: 0.0)
37class LexerMeta(type):
38 """
39 This metaclass automagically converts ``analyse_text`` methods into
40 static methods which always return float values.
41 """
43 def __new__(mcs, name, bases, d):
44 if 'analyse_text' in d:
45 d['analyse_text'] = make_analysator(d['analyse_text'])
46 return type.__new__(mcs, name, bases, d)
49class Lexer(metaclass=LexerMeta):
50 """
51 Lexer for a specific language.
53 See also :doc:`lexerdevelopment`, a high-level guide to writing
54 lexers.
56 Lexer classes have attributes used for choosing the most appropriate
57 lexer based on various criteria.
59 .. autoattribute:: name
60 :no-value:
61 .. autoattribute:: aliases
62 :no-value:
63 .. autoattribute:: filenames
64 :no-value:
65 .. autoattribute:: alias_filenames
66 .. autoattribute:: mimetypes
67 :no-value:
68 .. autoattribute:: priority
70 Lexers included in Pygments should have an additional attribute:
72 .. autoattribute:: url
73 :no-value:
75 Lexers included in Pygments may have additional attributes:
77 .. autoattribute:: _example
78 :no-value:
80 You can pass options to the constructor. The basic options recognized
81 by all lexers and processed by the base `Lexer` class are:
83 ``stripnl``
84 Strip leading and trailing newlines from the input (default: True).
85 ``stripall``
86 Strip all leading and trailing whitespace from the input
87 (default: False).
88 ``ensurenl``
89 Make sure that the input ends with a newline (default: True). This
90 is required for some lexers that consume input linewise.
92 .. versionadded:: 1.3
94 ``tabsize``
95 If given and greater than 0, expand tabs in the input (default: 0).
96 ``encoding``
97 If given, must be an encoding name. This encoding will be used to
98 convert the input string to Unicode, if it is not already a Unicode
99 string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
100 Latin1 detection. Can also be ``'chardet'`` to use the chardet
101 library, if it is installed.
102 ``inencoding``
103 Overrides the ``encoding`` if given.
104 """
106 #: Full name of the lexer, in human-readable form
107 name = None
109 #: A list of short, unique identifiers that can be used to look
110 #: up the lexer from a list, e.g., using `get_lexer_by_name()`.
111 aliases = []
113 #: A list of `fnmatch` patterns that match filenames which contain
114 #: content for this lexer. The patterns in this list should be unique among
115 #: all lexers.
116 filenames = []
118 #: A list of `fnmatch` patterns that match filenames which may or may not
119 #: contain content for this lexer. This list is used by the
120 #: :func:`.guess_lexer_for_filename()` function, to determine which lexers
121 #: are then included in guessing the correct one. That means that
122 #: e.g. every lexer for HTML and a template language should include
123 #: ``\*.html`` in this list.
124 alias_filenames = []
126 #: A list of MIME types for content that can be lexed with this lexer.
127 mimetypes = []
129 #: Priority, should multiple lexers match and no content is provided
130 priority = 0
132 #: URL of the language specification/definition. Used in the Pygments
133 #: documentation.
134 url = None
136 #: Example file name. Relative to the ``tests/examplefiles`` directory.
137 #: This is used by the documentation generator to show an example.
138 _example = None
140 def __init__(self, **options):
141 """
142 This constructor takes arbitrary options as keyword arguments.
143 Every subclass must first process its own options and then call
144 the `Lexer` constructor, since it processes the basic
145 options like `stripnl`.
147 An example looks like this:
149 .. sourcecode:: python
151 def __init__(self, **options):
152 self.compress = options.get('compress', '')
153 Lexer.__init__(self, **options)
155 As these options must all be specifiable as strings (due to the
156 command line usage), there are various utility functions
157 available to help with that, see `Utilities`_.
158 """
159 self.options = options
160 self.stripnl = get_bool_opt(options, 'stripnl', True)
161 self.stripall = get_bool_opt(options, 'stripall', False)
162 self.ensurenl = get_bool_opt(options, 'ensurenl', True)
163 self.tabsize = get_int_opt(options, 'tabsize', 0)
164 self.encoding = options.get('encoding', 'guess')
165 self.encoding = options.get('inencoding') or self.encoding
166 self.filters = []
167 for filter_ in get_list_opt(options, 'filters', ()):
168 self.add_filter(filter_)
170 def __repr__(self):
171 if self.options:
172 return '<pygments.lexers.%s with %r>' % (self.__class__.__name__,
173 self.options)
174 else:
175 return '<pygments.lexers.%s>' % self.__class__.__name__
177 def add_filter(self, filter_, **options):
178 """
179 Add a new stream filter to this lexer.
180 """
181 if not isinstance(filter_, Filter):
182 filter_ = get_filter_by_name(filter_, **options)
183 self.filters.append(filter_)
185 def analyse_text(text):
186 """
187 A static method which is called for lexer guessing.
189 It should analyse the text and return a float in the range
190 from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
191 will not be selected as the most probable one, if it returns
192 ``1.0``, it will be selected immediately. This is used by
193 `guess_lexer`.
195 The `LexerMeta` metaclass automatically wraps this function so
196 that it works like a static method (no ``self`` or ``cls``
197 parameter) and the return value is automatically converted to
198 `float`. If the return value is an object that is boolean `False`
199 it's the same as if the return values was ``0.0``.
200 """
202 def _preprocess_lexer_input(self, text):
203 """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
205 if not isinstance(text, str):
206 if self.encoding == 'guess':
207 text, _ = guess_decode(text)
208 elif self.encoding == 'chardet':
209 try:
210 import chardet
211 except ImportError as e:
212 raise ImportError('To enable chardet encoding guessing, '
213 'please install the chardet library '
214 'from http://chardet.feedparser.org/') from e
215 # check for BOM first
216 decoded = None
217 for bom, encoding in _encoding_map:
218 if text.startswith(bom):
219 decoded = text[len(bom):].decode(encoding, 'replace')
220 break
221 # no BOM found, so use chardet
222 if decoded is None:
223 enc = chardet.detect(text[:1024]) # Guess using first 1KB
224 decoded = text.decode(enc.get('encoding') or 'utf-8',
225 'replace')
226 text = decoded
227 else:
228 text = text.decode(self.encoding)
229 if text.startswith('\ufeff'):
230 text = text[len('\ufeff'):]
231 else:
232 if text.startswith('\ufeff'):
233 text = text[len('\ufeff'):]
235 # text now *is* a unicode string
236 text = text.replace('\r\n', '\n')
237 text = text.replace('\r', '\n')
238 if self.stripall:
239 text = text.strip()
240 elif self.stripnl:
241 text = text.strip('\n')
242 if self.tabsize > 0:
243 text = text.expandtabs(self.tabsize)
244 if self.ensurenl and not text.endswith('\n'):
245 text += '\n'
247 return text
249 def get_tokens(self, text, unfiltered=False):
250 """
251 This method is the basic interface of a lexer. It is called by
252 the `highlight()` function. It must process the text and return an
253 iterable of ``(tokentype, value)`` pairs from `text`.
255 Normally, you don't need to override this method. The default
256 implementation processes the options recognized by all lexers
257 (`stripnl`, `stripall` and so on), and then yields all tokens
258 from `get_tokens_unprocessed()`, with the ``index`` dropped.
260 If `unfiltered` is set to `True`, the filtering mechanism is
261 bypassed even if filters are defined.
262 """
263 text = self._preprocess_lexer_input(text)
265 def streamer():
266 for _, t, v in self.get_tokens_unprocessed(text):
267 yield t, v
268 stream = streamer()
269 if not unfiltered:
270 stream = apply_filters(stream, self.filters, self)
271 return stream
273 def get_tokens_unprocessed(self, text):
274 """
275 This method should process the text and return an iterable of
276 ``(index, tokentype, value)`` tuples where ``index`` is the starting
277 position of the token within the input text.
279 It must be overridden by subclasses. It is recommended to
280 implement it as a generator to maximize effectiveness.
281 """
282 raise NotImplementedError
285class DelegatingLexer(Lexer):
286 """
287 This lexer takes two lexer as arguments. A root lexer and
288 a language lexer. First everything is scanned using the language
289 lexer, afterwards all ``Other`` tokens are lexed using the root
290 lexer.
292 The lexers from the ``template`` lexer package use this base lexer.
293 """
295 def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
296 self.root_lexer = _root_lexer(**options)
297 self.language_lexer = _language_lexer(**options)
298 self.needle = _needle
299 Lexer.__init__(self, **options)
301 def get_tokens_unprocessed(self, text):
302 buffered = ''
303 insertions = []
304 lng_buffer = []
305 for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
306 if t is self.needle:
307 if lng_buffer:
308 insertions.append((len(buffered), lng_buffer))
309 lng_buffer = []
310 buffered += v
311 else:
312 lng_buffer.append((i, t, v))
313 if lng_buffer:
314 insertions.append((len(buffered), lng_buffer))
315 return do_insertions(insertions,
316 self.root_lexer.get_tokens_unprocessed(buffered))
319# ------------------------------------------------------------------------------
320# RegexLexer and ExtendedRegexLexer
321#
324class include(str): # pylint: disable=invalid-name
325 """
326 Indicates that a state should include rules from another state.
327 """
328 pass
331class _inherit:
332 """
333 Indicates the a state should inherit from its superclass.
334 """
335 def __repr__(self):
336 return 'inherit'
338inherit = _inherit() # pylint: disable=invalid-name
341class combined(tuple): # pylint: disable=invalid-name
342 """
343 Indicates a state combined from multiple states.
344 """
346 def __new__(cls, *args):
347 return tuple.__new__(cls, args)
349 def __init__(self, *args):
350 # tuple.__init__ doesn't do anything
351 pass
354class _PseudoMatch:
355 """
356 A pseudo match object constructed from a string.
357 """
359 def __init__(self, start, text):
360 self._text = text
361 self._start = start
363 def start(self, arg=None):
364 return self._start
366 def end(self, arg=None):
367 return self._start + len(self._text)
369 def group(self, arg=None):
370 if arg:
371 raise IndexError('No such group')
372 return self._text
374 def groups(self):
375 return (self._text,)
377 def groupdict(self):
378 return {}
381def bygroups(*args):
382 """
383 Callback that yields multiple actions for each group in the match.
384 """
385 def callback(lexer, match, ctx=None):
386 for i, action in enumerate(args):
387 if action is None:
388 continue
389 elif type(action) is _TokenType:
390 data = match.group(i + 1)
391 if data:
392 yield match.start(i + 1), action, data
393 else:
394 data = match.group(i + 1)
395 if data is not None:
396 if ctx:
397 ctx.pos = match.start(i + 1)
398 for item in action(lexer,
399 _PseudoMatch(match.start(i + 1), data), ctx):
400 if item:
401 yield item
402 if ctx:
403 ctx.pos = match.end()
404 return callback
407class _This:
408 """
409 Special singleton used for indicating the caller class.
410 Used by ``using``.
411 """
413this = _This()
416def using(_other, **kwargs):
417 """
418 Callback that processes the match with a different lexer.
420 The keyword arguments are forwarded to the lexer, except `state` which
421 is handled separately.
423 `state` specifies the state that the new lexer will start in, and can
424 be an enumerable such as ('root', 'inline', 'string') or a simple
425 string which is assumed to be on top of the root state.
427 Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
428 """
429 gt_kwargs = {}
430 if 'state' in kwargs:
431 s = kwargs.pop('state')
432 if isinstance(s, (list, tuple)):
433 gt_kwargs['stack'] = s
434 else:
435 gt_kwargs['stack'] = ('root', s)
437 if _other is this:
438 def callback(lexer, match, ctx=None):
439 # if keyword arguments are given the callback
440 # function has to create a new lexer instance
441 if kwargs:
442 # XXX: cache that somehow
443 kwargs.update(lexer.options)
444 lx = lexer.__class__(**kwargs)
445 else:
446 lx = lexer
447 s = match.start()
448 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
449 yield i + s, t, v
450 if ctx:
451 ctx.pos = match.end()
452 else:
453 def callback(lexer, match, ctx=None):
454 # XXX: cache that somehow
455 kwargs.update(lexer.options)
456 lx = _other(**kwargs)
458 s = match.start()
459 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
460 yield i + s, t, v
461 if ctx:
462 ctx.pos = match.end()
463 return callback
466class default:
467 """
468 Indicates a state or state action (e.g. #pop) to apply.
469 For example default('#pop') is equivalent to ('', Token, '#pop')
470 Note that state tuples may be used as well.
472 .. versionadded:: 2.0
473 """
474 def __init__(self, state):
475 self.state = state
478class words(Future):
479 """
480 Indicates a list of literal words that is transformed into an optimized
481 regex that matches any of the words.
483 .. versionadded:: 2.0
484 """
485 def __init__(self, words, prefix='', suffix=''):
486 self.words = words
487 self.prefix = prefix
488 self.suffix = suffix
490 def get(self):
491 return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
494class RegexLexerMeta(LexerMeta):
495 """
496 Metaclass for RegexLexer, creates the self._tokens attribute from
497 self.tokens on the first instantiation.
498 """
500 def _process_regex(cls, regex, rflags, state):
501 """Preprocess the regular expression component of a token definition."""
502 if isinstance(regex, Future):
503 regex = regex.get()
504 return re.compile(regex, rflags).match
506 def _process_token(cls, token):
507 """Preprocess the token component of a token definition."""
508 assert type(token) is _TokenType or callable(token), \
509 'token type must be simple type or callable, not %r' % (token,)
510 return token
512 def _process_new_state(cls, new_state, unprocessed, processed):
513 """Preprocess the state transition action of a token definition."""
514 if isinstance(new_state, str):
515 # an existing state
516 if new_state == '#pop':
517 return -1
518 elif new_state in unprocessed:
519 return (new_state,)
520 elif new_state == '#push':
521 return new_state
522 elif new_state[:5] == '#pop:':
523 return -int(new_state[5:])
524 else:
525 assert False, 'unknown new state %r' % new_state
526 elif isinstance(new_state, combined):
527 # combine a new state from existing ones
528 tmp_state = '_tmp_%d' % cls._tmpname
529 cls._tmpname += 1
530 itokens = []
531 for istate in new_state:
532 assert istate != new_state, 'circular state ref %r' % istate
533 itokens.extend(cls._process_state(unprocessed,
534 processed, istate))
535 processed[tmp_state] = itokens
536 return (tmp_state,)
537 elif isinstance(new_state, tuple):
538 # push more than one state
539 for istate in new_state:
540 assert (istate in unprocessed or
541 istate in ('#pop', '#push')), \
542 'unknown new state ' + istate
543 return new_state
544 else:
545 assert False, 'unknown new state def %r' % new_state
547 def _process_state(cls, unprocessed, processed, state):
548 """Preprocess a single state definition."""
549 assert type(state) is str, "wrong state name %r" % state
550 assert state[0] != '#', "invalid state name %r" % state
551 if state in processed:
552 return processed[state]
553 tokens = processed[state] = []
554 rflags = cls.flags
555 for tdef in unprocessed[state]:
556 if isinstance(tdef, include):
557 # it's a state reference
558 assert tdef != state, "circular state reference %r" % state
559 tokens.extend(cls._process_state(unprocessed, processed,
560 str(tdef)))
561 continue
562 if isinstance(tdef, _inherit):
563 # should be processed already, but may not in the case of:
564 # 1. the state has no counterpart in any parent
565 # 2. the state includes more than one 'inherit'
566 continue
567 if isinstance(tdef, default):
568 new_state = cls._process_new_state(tdef.state, unprocessed, processed)
569 tokens.append((re.compile('').match, None, new_state))
570 continue
572 assert type(tdef) is tuple, "wrong rule def %r" % tdef
574 try:
575 rex = cls._process_regex(tdef[0], rflags, state)
576 except Exception as err:
577 raise ValueError("uncompilable regex %r in state %r of %r: %s" %
578 (tdef[0], state, cls, err)) from err
580 token = cls._process_token(tdef[1])
582 if len(tdef) == 2:
583 new_state = None
584 else:
585 new_state = cls._process_new_state(tdef[2],
586 unprocessed, processed)
588 tokens.append((rex, token, new_state))
589 return tokens
591 def process_tokendef(cls, name, tokendefs=None):
592 """Preprocess a dictionary of token definitions."""
593 processed = cls._all_tokens[name] = {}
594 tokendefs = tokendefs or cls.tokens[name]
595 for state in list(tokendefs):
596 cls._process_state(tokendefs, processed, state)
597 return processed
599 def get_tokendefs(cls):
600 """
601 Merge tokens from superclasses in MRO order, returning a single tokendef
602 dictionary.
604 Any state that is not defined by a subclass will be inherited
605 automatically. States that *are* defined by subclasses will, by
606 default, override that state in the superclass. If a subclass wishes to
607 inherit definitions from a superclass, it can use the special value
608 "inherit", which will cause the superclass' state definition to be
609 included at that point in the state.
610 """
611 tokens = {}
612 inheritable = {}
613 for c in cls.__mro__:
614 toks = c.__dict__.get('tokens', {})
616 for state, items in toks.items():
617 curitems = tokens.get(state)
618 if curitems is None:
619 # N.b. because this is assigned by reference, sufficiently
620 # deep hierarchies are processed incrementally (e.g. for
621 # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
622 # will not see any inherits in B).
623 tokens[state] = items
624 try:
625 inherit_ndx = items.index(inherit)
626 except ValueError:
627 continue
628 inheritable[state] = inherit_ndx
629 continue
631 inherit_ndx = inheritable.pop(state, None)
632 if inherit_ndx is None:
633 continue
635 # Replace the "inherit" value with the items
636 curitems[inherit_ndx:inherit_ndx+1] = items
637 try:
638 # N.b. this is the index in items (that is, the superclass
639 # copy), so offset required when storing below.
640 new_inh_ndx = items.index(inherit)
641 except ValueError:
642 pass
643 else:
644 inheritable[state] = inherit_ndx + new_inh_ndx
646 return tokens
648 def __call__(cls, *args, **kwds):
649 """Instantiate cls after preprocessing its token definitions."""
650 if '_tokens' not in cls.__dict__:
651 cls._all_tokens = {}
652 cls._tmpname = 0
653 if hasattr(cls, 'token_variants') and cls.token_variants:
654 # don't process yet
655 pass
656 else:
657 cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
659 return type.__call__(cls, *args, **kwds)
662class RegexLexer(Lexer, metaclass=RegexLexerMeta):
663 """
664 Base for simple stateful regular expression-based lexers.
665 Simplifies the lexing process so that you need only
666 provide a list of states and regular expressions.
667 """
669 #: Flags for compiling the regular expressions.
670 #: Defaults to MULTILINE.
671 flags = re.MULTILINE
673 #: At all time there is a stack of states. Initially, the stack contains
674 #: a single state 'root'. The top of the stack is called "the current state".
675 #:
676 #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
677 #:
678 #: ``new_state`` can be omitted to signify no state transition.
679 #: If ``new_state`` is a string, it is pushed on the stack. This ensure
680 #: the new current state is ``new_state``.
681 #: If ``new_state`` is a tuple of strings, all of those strings are pushed
682 #: on the stack and the current state will be the last element of the list.
683 #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
684 #: to signify a new, anonymous state combined from the rules of two
685 #: or more existing ones.
686 #: Furthermore, it can be '#pop' to signify going back one step in
687 #: the state stack, or '#push' to push the current state on the stack
688 #: again. Note that if you push while in a combined state, the combined
689 #: state itself is pushed, and not only the state in which the rule is
690 #: defined.
691 #:
692 #: The tuple can also be replaced with ``include('state')``, in which
693 #: case the rules from the state named by the string are included in the
694 #: current one.
695 tokens = {}
697 def get_tokens_unprocessed(self, text, stack=('root',)):
698 """
699 Split ``text`` into (tokentype, text) pairs.
701 ``stack`` is the initial stack (default: ``['root']``)
702 """
703 pos = 0
704 tokendefs = self._tokens
705 statestack = list(stack)
706 statetokens = tokendefs[statestack[-1]]
707 while 1:
708 for rexmatch, action, new_state in statetokens:
709 m = rexmatch(text, pos)
710 if m:
711 if action is not None:
712 if type(action) is _TokenType:
713 yield pos, action, m.group()
714 else:
715 yield from action(self, m)
716 pos = m.end()
717 if new_state is not None:
718 # state transition
719 if isinstance(new_state, tuple):
720 for state in new_state:
721 if state == '#pop':
722 if len(statestack) > 1:
723 statestack.pop()
724 elif state == '#push':
725 statestack.append(statestack[-1])
726 else:
727 statestack.append(state)
728 elif isinstance(new_state, int):
729 # pop, but keep at least one state on the stack
730 # (random code leading to unexpected pops should
731 # not allow exceptions)
732 if abs(new_state) >= len(statestack):
733 del statestack[1:]
734 else:
735 del statestack[new_state:]
736 elif new_state == '#push':
737 statestack.append(statestack[-1])
738 else:
739 assert False, "wrong state def: %r" % new_state
740 statetokens = tokendefs[statestack[-1]]
741 break
742 else:
743 # We are here only if all state tokens have been considered
744 # and there was not a match on any of them.
745 try:
746 if text[pos] == '\n':
747 # at EOL, reset state to "root"
748 statestack = ['root']
749 statetokens = tokendefs['root']
750 yield pos, Whitespace, '\n'
751 pos += 1
752 continue
753 yield pos, Error, text[pos]
754 pos += 1
755 except IndexError:
756 break
759class LexerContext:
760 """
761 A helper object that holds lexer position data.
762 """
764 def __init__(self, text, pos, stack=None, end=None):
765 self.text = text
766 self.pos = pos
767 self.end = end or len(text) # end=0 not supported ;-)
768 self.stack = stack or ['root']
770 def __repr__(self):
771 return 'LexerContext(%r, %r, %r)' % (
772 self.text, self.pos, self.stack)
775class ExtendedRegexLexer(RegexLexer):
776 """
777 A RegexLexer that uses a context object to store its state.
778 """
780 def get_tokens_unprocessed(self, text=None, context=None):
781 """
782 Split ``text`` into (tokentype, text) pairs.
783 If ``context`` is given, use this lexer context instead.
784 """
785 tokendefs = self._tokens
786 if not context:
787 ctx = LexerContext(text, 0)
788 statetokens = tokendefs['root']
789 else:
790 ctx = context
791 statetokens = tokendefs[ctx.stack[-1]]
792 text = ctx.text
793 while 1:
794 for rexmatch, action, new_state in statetokens:
795 m = rexmatch(text, ctx.pos, ctx.end)
796 if m:
797 if action is not None:
798 if type(action) is _TokenType:
799 yield ctx.pos, action, m.group()
800 ctx.pos = m.end()
801 else:
802 yield from action(self, m, ctx)
803 if not new_state:
804 # altered the state stack?
805 statetokens = tokendefs[ctx.stack[-1]]
806 # CAUTION: callback must set ctx.pos!
807 if new_state is not None:
808 # state transition
809 if isinstance(new_state, tuple):
810 for state in new_state:
811 if state == '#pop':
812 if len(ctx.stack) > 1:
813 ctx.stack.pop()
814 elif state == '#push':
815 ctx.stack.append(ctx.stack[-1])
816 else:
817 ctx.stack.append(state)
818 elif isinstance(new_state, int):
819 # see RegexLexer for why this check is made
820 if abs(new_state) >= len(ctx.stack):
821 del ctx.stack[1:]
822 else:
823 del ctx.stack[new_state:]
824 elif new_state == '#push':
825 ctx.stack.append(ctx.stack[-1])
826 else:
827 assert False, "wrong state def: %r" % new_state
828 statetokens = tokendefs[ctx.stack[-1]]
829 break
830 else:
831 try:
832 if ctx.pos >= ctx.end:
833 break
834 if text[ctx.pos] == '\n':
835 # at EOL, reset state to "root"
836 ctx.stack = ['root']
837 statetokens = tokendefs['root']
838 yield ctx.pos, Text, '\n'
839 ctx.pos += 1
840 continue
841 yield ctx.pos, Error, text[ctx.pos]
842 ctx.pos += 1
843 except IndexError:
844 break
847def do_insertions(insertions, tokens):
848 """
849 Helper for lexers which must combine the results of several
850 sublexers.
852 ``insertions`` is a list of ``(index, itokens)`` pairs.
853 Each ``itokens`` iterable should be inserted at position
854 ``index`` into the token stream given by the ``tokens``
855 argument.
857 The result is a combined token stream.
859 TODO: clean up the code here.
860 """
861 insertions = iter(insertions)
862 try:
863 index, itokens = next(insertions)
864 except StopIteration:
865 # no insertions
866 yield from tokens
867 return
869 realpos = None
870 insleft = True
872 # iterate over the token stream where we want to insert
873 # the tokens from the insertion list.
874 for i, t, v in tokens:
875 # first iteration. store the position of first item
876 if realpos is None:
877 realpos = i
878 oldi = 0
879 while insleft and i + len(v) >= index:
880 tmpval = v[oldi:index - i]
881 if tmpval:
882 yield realpos, t, tmpval
883 realpos += len(tmpval)
884 for it_index, it_token, it_value in itokens:
885 yield realpos, it_token, it_value
886 realpos += len(it_value)
887 oldi = index - i
888 try:
889 index, itokens = next(insertions)
890 except StopIteration:
891 insleft = False
892 break # not strictly necessary
893 if oldi < len(v):
894 yield realpos, t, v[oldi:]
895 realpos += len(v) - oldi
897 # leftover tokens
898 while insleft:
899 # no normal tokens, set realpos to zero
900 realpos = realpos or 0
901 for p, t, v in itokens:
902 yield realpos, t, v
903 realpos += len(v)
904 try:
905 index, itokens = next(insertions)
906 except StopIteration:
907 insleft = False
908 break # not strictly necessary
911class ProfilingRegexLexerMeta(RegexLexerMeta):
912 """Metaclass for ProfilingRegexLexer, collects regex timing info."""
914 def _process_regex(cls, regex, rflags, state):
915 if isinstance(regex, words):
916 rex = regex_opt(regex.words, prefix=regex.prefix,
917 suffix=regex.suffix)
918 else:
919 rex = regex
920 compiled = re.compile(rex, rflags)
922 def match_func(text, pos, endpos=sys.maxsize):
923 info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
924 t0 = time.time()
925 res = compiled.match(text, pos, endpos)
926 t1 = time.time()
927 info[0] += 1
928 info[1] += t1 - t0
929 return res
930 return match_func
933class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
934 """Drop-in replacement for RegexLexer that does profiling of its regexes."""
936 _prof_data = []
937 _prof_sort_index = 4 # defaults to time per call
939 def get_tokens_unprocessed(self, text, stack=('root',)):
940 # this needs to be a stack, since using(this) will produce nested calls
941 self.__class__._prof_data.append({})
942 yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
943 rawdata = self.__class__._prof_data.pop()
944 data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
945 n, 1000 * t, 1000 * t / n)
946 for ((s, r), (n, t)) in rawdata.items()),
947 key=lambda x: x[self._prof_sort_index],
948 reverse=True)
949 sum_total = sum(x[3] for x in data)
951 print()
952 print('Profiling result for %s lexing %d chars in %.3f ms' %
953 (self.__class__.__name__, len(text), sum_total))
954 print('=' * 110)
955 print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
956 print('-' * 110)
957 for d in data:
958 print('%-20s %-65s %5d %8.4f %8.4f' % d)
959 print('=' * 110)