Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/pygments/lexer.py: 43%
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
Shortcuts on this page
r m x toggle line displays
j k next/prev highlighted chunk
0 (zero) top of page
1 (one) first highlighted chunk
1"""
2 pygments.lexer
3 ~~~~~~~~~~~~~~
5 Base lexer classes.
7 :copyright: Copyright 2006-present by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
11import re
12import sys
13import time
15from pygments.filter import apply_filters, Filter
16from pygments.filters import get_filter_by_name
17from pygments.token import Error, Text, Other, Whitespace, _TokenType
18from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \
19 make_analysator, Future, guess_decode
20from pygments.regexopt import regex_opt
22__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer',
23 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this',
24 'default', 'words', 'line_re']
26line_re = re.compile('.*?\n')
28_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'),
29 (b'\xff\xfe\0\0', 'utf-32'),
30 (b'\0\0\xfe\xff', 'utf-32be'),
31 (b'\xff\xfe', 'utf-16'),
32 (b'\xfe\xff', 'utf-16be')]
34_default_analyse = staticmethod(lambda x: 0.0)
37class LexerMeta(type):
38 """
39 This metaclass automagically converts ``analyse_text`` methods into
40 static methods which always return float values.
41 """
43 def __new__(mcs, name, bases, d):
44 if 'analyse_text' in d:
45 d['analyse_text'] = make_analysator(d['analyse_text'])
46 return type.__new__(mcs, name, bases, d)
49class Lexer(metaclass=LexerMeta):
50 """
51 Lexer for a specific language.
53 See also :doc:`lexerdevelopment`, a high-level guide to writing
54 lexers.
56 Lexer classes have attributes used for choosing the most appropriate
57 lexer based on various criteria.
59 .. autoattribute:: name
60 :no-value:
61 .. autoattribute:: aliases
62 :no-value:
63 .. autoattribute:: filenames
64 :no-value:
65 .. autoattribute:: alias_filenames
66 .. autoattribute:: mimetypes
67 :no-value:
68 .. autoattribute:: priority
70 Lexers included in Pygments should have two additional attributes:
72 .. autoattribute:: url
73 :no-value:
74 .. autoattribute:: version_added
75 :no-value:
77 Lexers included in Pygments may have additional attributes:
79 .. autoattribute:: _example
80 :no-value:
82 You can pass options to the constructor. The basic options recognized
83 by all lexers and processed by the base `Lexer` class are:
85 ``stripnl``
86 Strip leading and trailing newlines from the input (default: True).
87 ``stripall``
88 Strip all leading and trailing whitespace from the input
89 (default: False).
90 ``ensurenl``
91 Make sure that the input ends with a newline (default: True). This
92 is required for some lexers that consume input linewise.
94 .. versionadded:: 1.3
96 ``tabsize``
97 If given and greater than 0, expand tabs in the input (default: 0).
98 ``encoding``
99 If given, must be an encoding name. This encoding will be used to
100 convert the input string to Unicode, if it is not already a Unicode
101 string (default: ``'guess'``, which uses a simple UTF-8 / Locale /
102 Latin1 detection. Can also be ``'chardet'`` to use the chardet
103 library, if it is installed.
104 ``inencoding``
105 Overrides the ``encoding`` if given.
106 """
108 #: Full name of the lexer, in human-readable form
109 name = None
111 #: A list of short, unique identifiers that can be used to look
112 #: up the lexer from a list, e.g., using `get_lexer_by_name()`.
113 aliases = []
115 #: A list of `fnmatch` patterns that match filenames which contain
116 #: content for this lexer. The patterns in this list should be unique among
117 #: all lexers.
118 filenames = []
120 #: A list of `fnmatch` patterns that match filenames which may or may not
121 #: contain content for this lexer. This list is used by the
122 #: :func:`.guess_lexer_for_filename()` function, to determine which lexers
123 #: are then included in guessing the correct one. That means that
124 #: e.g. every lexer for HTML and a template language should include
125 #: ``\*.html`` in this list.
126 alias_filenames = []
128 #: A list of MIME types for content that can be lexed with this lexer.
129 mimetypes = []
131 #: Priority, should multiple lexers match and no content is provided
132 priority = 0
134 #: URL of the language specification/definition. Used in the Pygments
135 #: documentation. Set to an empty string to disable.
136 url = None
138 #: Version of Pygments in which the lexer was added.
139 version_added = None
141 #: Example file name. Relative to the ``tests/examplefiles`` directory.
142 #: This is used by the documentation generator to show an example.
143 _example = None
145 def __init__(self, **options):
146 """
147 This constructor takes arbitrary options as keyword arguments.
148 Every subclass must first process its own options and then call
149 the `Lexer` constructor, since it processes the basic
150 options like `stripnl`.
152 An example looks like this:
154 .. sourcecode:: python
156 def __init__(self, **options):
157 self.compress = options.get('compress', '')
158 Lexer.__init__(self, **options)
160 As these options must all be specifiable as strings (due to the
161 command line usage), there are various utility functions
162 available to help with that, see `Utilities`_.
163 """
164 self.options = options
165 self.stripnl = get_bool_opt(options, 'stripnl', True)
166 self.stripall = get_bool_opt(options, 'stripall', False)
167 self.ensurenl = get_bool_opt(options, 'ensurenl', True)
168 self.tabsize = get_int_opt(options, 'tabsize', 0)
169 self.encoding = options.get('encoding', 'guess')
170 self.encoding = options.get('inencoding') or self.encoding
171 self.filters = []
172 for filter_ in get_list_opt(options, 'filters', ()):
173 self.add_filter(filter_)
175 def __repr__(self):
176 if self.options:
177 return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>'
178 else:
179 return f'<pygments.lexers.{self.__class__.__name__}>'
181 def add_filter(self, filter_, **options):
182 """
183 Add a new stream filter to this lexer.
184 """
185 if not isinstance(filter_, Filter):
186 filter_ = get_filter_by_name(filter_, **options)
187 self.filters.append(filter_)
189 def analyse_text(text):
190 """
191 A static method which is called for lexer guessing.
193 It should analyse the text and return a float in the range
194 from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer
195 will not be selected as the most probable one, if it returns
196 ``1.0``, it will be selected immediately. This is used by
197 `guess_lexer`.
199 The `LexerMeta` metaclass automatically wraps this function so
200 that it works like a static method (no ``self`` or ``cls``
201 parameter) and the return value is automatically converted to
202 `float`. If the return value is an object that is boolean `False`
203 it's the same as if the return values was ``0.0``.
204 """
206 def _preprocess_lexer_input(self, text):
207 """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines."""
209 if not isinstance(text, str):
210 if self.encoding == 'guess':
211 text, _ = guess_decode(text)
212 elif self.encoding == 'chardet':
213 try:
214 import chardet
215 except ImportError as e:
216 raise ImportError('To enable chardet encoding guessing, '
217 'please install the chardet library '
218 'from http://chardet.feedparser.org/') from e
219 # check for BOM first
220 decoded = None
221 for bom, encoding in _encoding_map:
222 if text.startswith(bom):
223 decoded = text[len(bom):].decode(encoding, 'replace')
224 break
225 # no BOM found, so use chardet
226 if decoded is None:
227 enc = chardet.detect(text[:1024]) # Guess using first 1KB
228 decoded = text.decode(enc.get('encoding') or 'utf-8',
229 'replace')
230 text = decoded
231 else:
232 text = text.decode(self.encoding)
233 if text.startswith('\ufeff'):
234 text = text[len('\ufeff'):]
235 else:
236 if text.startswith('\ufeff'):
237 text = text[len('\ufeff'):]
239 # text now *is* a unicode string
240 text = text.replace('\r\n', '\n')
241 text = text.replace('\r', '\n')
242 if self.stripall:
243 text = text.strip()
244 elif self.stripnl:
245 text = text.strip('\n')
246 if self.tabsize > 0:
247 text = text.expandtabs(self.tabsize)
248 if self.ensurenl and not text.endswith('\n'):
249 text += '\n'
251 return text
253 def get_tokens(self, text, unfiltered=False):
254 """
255 This method is the basic interface of a lexer. It is called by
256 the `highlight()` function. It must process the text and return an
257 iterable of ``(tokentype, value)`` pairs from `text`.
259 Normally, you don't need to override this method. The default
260 implementation processes the options recognized by all lexers
261 (`stripnl`, `stripall` and so on), and then yields all tokens
262 from `get_tokens_unprocessed()`, with the ``index`` dropped.
264 If `unfiltered` is set to `True`, the filtering mechanism is
265 bypassed even if filters are defined.
266 """
267 text = self._preprocess_lexer_input(text)
269 def streamer():
270 for _, t, v in self.get_tokens_unprocessed(text):
271 yield t, v
272 stream = streamer()
273 if not unfiltered:
274 stream = apply_filters(stream, self.filters, self)
275 return stream
277 def get_tokens_unprocessed(self, text):
278 """
279 This method should process the text and return an iterable of
280 ``(index, tokentype, value)`` tuples where ``index`` is the starting
281 position of the token within the input text.
283 It must be overridden by subclasses. It is recommended to
284 implement it as a generator to maximize effectiveness.
285 """
286 raise NotImplementedError
289class DelegatingLexer(Lexer):
290 """
291 This lexer takes two lexer as arguments. A root lexer and
292 a language lexer. First everything is scanned using the language
293 lexer, afterwards all ``Other`` tokens are lexed using the root
294 lexer.
296 The lexers from the ``template`` lexer package use this base lexer.
297 """
299 def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options):
300 self.root_lexer = _root_lexer(**options)
301 self.language_lexer = _language_lexer(**options)
302 self.needle = _needle
303 Lexer.__init__(self, **options)
305 def get_tokens_unprocessed(self, text):
306 buffered = ''
307 insertions = []
308 lng_buffer = []
309 for i, t, v in self.language_lexer.get_tokens_unprocessed(text):
310 if t is self.needle:
311 if lng_buffer:
312 insertions.append((len(buffered), lng_buffer))
313 lng_buffer = []
314 buffered += v
315 else:
316 lng_buffer.append((i, t, v))
317 if lng_buffer:
318 insertions.append((len(buffered), lng_buffer))
319 return do_insertions(insertions,
320 self.root_lexer.get_tokens_unprocessed(buffered))
323# ------------------------------------------------------------------------------
324# RegexLexer and ExtendedRegexLexer
325#
328class include(str): # pylint: disable=invalid-name
329 """
330 Indicates that a state should include rules from another state.
331 """
332 pass
335class _inherit:
336 """
337 Indicates the a state should inherit from its superclass.
338 """
339 def __repr__(self):
340 return 'inherit'
342inherit = _inherit() # pylint: disable=invalid-name
345class combined(tuple): # pylint: disable=invalid-name
346 """
347 Indicates a state combined from multiple states.
348 """
350 def __new__(cls, *args):
351 return tuple.__new__(cls, args)
353 def __init__(self, *args):
354 # tuple.__init__ doesn't do anything
355 pass
358class _PseudoMatch:
359 """
360 A pseudo match object constructed from a string.
361 """
363 def __init__(self, start, text):
364 self._text = text
365 self._start = start
367 def start(self, arg=None):
368 return self._start
370 def end(self, arg=None):
371 return self._start + len(self._text)
373 def group(self, arg=None):
374 if arg:
375 raise IndexError('No such group')
376 return self._text
378 def groups(self):
379 return (self._text,)
381 def groupdict(self):
382 return {}
385def bygroups(*args):
386 """
387 Callback that yields multiple actions for each group in the match.
388 """
389 def callback(lexer, match, ctx=None):
390 for i, action in enumerate(args):
391 if action is None:
392 continue
393 elif type(action) is _TokenType:
394 data = match.group(i + 1)
395 if data:
396 yield match.start(i + 1), action, data
397 else:
398 data = match.group(i + 1)
399 if data is not None:
400 if ctx:
401 ctx.pos = match.start(i + 1)
402 for item in action(lexer,
403 _PseudoMatch(match.start(i + 1), data), ctx):
404 if item:
405 yield item
406 if ctx:
407 ctx.pos = match.end()
408 return callback
411class _This:
412 """
413 Special singleton used for indicating the caller class.
414 Used by ``using``.
415 """
417this = _This()
420def using(_other, **kwargs):
421 """
422 Callback that processes the match with a different lexer.
424 The keyword arguments are forwarded to the lexer, except `state` which
425 is handled separately.
427 `state` specifies the state that the new lexer will start in, and can
428 be an enumerable such as ('root', 'inline', 'string') or a simple
429 string which is assumed to be on top of the root state.
431 Note: For that to work, `_other` must not be an `ExtendedRegexLexer`.
432 """
433 gt_kwargs = {}
434 if 'state' in kwargs:
435 s = kwargs.pop('state')
436 if isinstance(s, (list, tuple)):
437 gt_kwargs['stack'] = s
438 else:
439 gt_kwargs['stack'] = ('root', s)
441 if _other is this:
442 def callback(lexer, match, ctx=None):
443 # if keyword arguments are given the callback
444 # function has to create a new lexer instance
445 if kwargs:
446 # XXX: cache that somehow
447 d = dict(lexer.options)
448 d.update(kwargs)
449 lx = lexer.__class__(**d)
450 else:
451 lx = lexer
452 s = match.start()
453 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
454 yield i + s, t, v
455 if ctx:
456 ctx.pos = match.end()
457 else:
458 def callback(lexer, match, ctx=None):
459 # XXX: cache that somehow
460 d = dict(lexer.options)
461 d.update(kwargs)
462 lx = _other(**d)
464 s = match.start()
465 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs):
466 yield i + s, t, v
467 if ctx:
468 ctx.pos = match.end()
469 return callback
472class default:
473 """
474 Indicates a state or state action (e.g. #pop) to apply.
475 For example default('#pop') is equivalent to ('', Token, '#pop')
476 Note that state tuples may be used as well.
478 .. versionadded:: 2.0
479 """
480 def __init__(self, state):
481 self.state = state
484class words(Future):
485 """
486 Indicates a list of literal words that is transformed into an optimized
487 regex that matches any of the words.
489 .. versionadded:: 2.0
490 """
491 def __init__(self, words, prefix='', suffix=''):
492 self.words = words
493 self.prefix = prefix
494 self.suffix = suffix
496 def get(self):
497 return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix)
500class RegexLexerMeta(LexerMeta):
501 """
502 Metaclass for RegexLexer, creates the self._tokens attribute from
503 self.tokens on the first instantiation.
504 """
506 def _process_regex(cls, regex, rflags, state):
507 """Preprocess the regular expression component of a token definition."""
508 if isinstance(regex, Future):
509 regex = regex.get()
510 return re.compile(regex, rflags).match
512 def _process_token(cls, token):
513 """Preprocess the token component of a token definition."""
514 assert type(token) is _TokenType or callable(token), \
515 f'token type must be simple type or callable, not {token!r}'
516 return token
518 def _process_new_state(cls, new_state, unprocessed, processed):
519 """Preprocess the state transition action of a token definition."""
520 if isinstance(new_state, str):
521 # an existing state
522 if new_state == '#pop':
523 return -1
524 elif new_state in unprocessed:
525 return (new_state,)
526 elif new_state == '#push':
527 return new_state
528 elif new_state[:5] == '#pop:':
529 return -int(new_state[5:])
530 else:
531 assert False, f'unknown new state {new_state!r}'
532 elif isinstance(new_state, combined):
533 # combine a new state from existing ones
534 tmp_state = '_tmp_%d' % cls._tmpname
535 cls._tmpname += 1
536 itokens = []
537 for istate in new_state:
538 assert istate != new_state, f'circular state ref {istate!r}'
539 itokens.extend(cls._process_state(unprocessed,
540 processed, istate))
541 processed[tmp_state] = itokens
542 return (tmp_state,)
543 elif isinstance(new_state, tuple):
544 # push more than one state
545 for istate in new_state:
546 assert (istate in unprocessed or
547 istate in ('#pop', '#push')), \
548 'unknown new state ' + istate
549 return new_state
550 else:
551 assert False, f'unknown new state def {new_state!r}'
553 def _process_state(cls, unprocessed, processed, state):
554 """Preprocess a single state definition."""
555 assert isinstance(state, str), f"wrong state name {state!r}"
556 assert state[0] != '#', f"invalid state name {state!r}"
557 if state in processed:
558 return processed[state]
559 tokens = processed[state] = []
560 rflags = cls.flags
561 for tdef in unprocessed[state]:
562 if isinstance(tdef, include):
563 # it's a state reference
564 assert tdef != state, f"circular state reference {state!r}"
565 tokens.extend(cls._process_state(unprocessed, processed,
566 str(tdef)))
567 continue
568 if isinstance(tdef, _inherit):
569 # should be processed already, but may not in the case of:
570 # 1. the state has no counterpart in any parent
571 # 2. the state includes more than one 'inherit'
572 continue
573 if isinstance(tdef, default):
574 new_state = cls._process_new_state(tdef.state, unprocessed, processed)
575 tokens.append((re.compile('').match, None, new_state))
576 continue
578 assert type(tdef) is tuple, f"wrong rule def {tdef!r}"
580 try:
581 rex = cls._process_regex(tdef[0], rflags, state)
582 except Exception as err:
583 raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err
585 token = cls._process_token(tdef[1])
587 if len(tdef) == 2:
588 new_state = None
589 else:
590 new_state = cls._process_new_state(tdef[2],
591 unprocessed, processed)
593 tokens.append((rex, token, new_state))
594 return tokens
596 def process_tokendef(cls, name, tokendefs=None):
597 """Preprocess a dictionary of token definitions."""
598 processed = cls._all_tokens[name] = {}
599 tokendefs = tokendefs or cls.tokens[name]
600 for state in list(tokendefs):
601 cls._process_state(tokendefs, processed, state)
602 return processed
604 def get_tokendefs(cls):
605 """
606 Merge tokens from superclasses in MRO order, returning a single tokendef
607 dictionary.
609 Any state that is not defined by a subclass will be inherited
610 automatically. States that *are* defined by subclasses will, by
611 default, override that state in the superclass. If a subclass wishes to
612 inherit definitions from a superclass, it can use the special value
613 "inherit", which will cause the superclass' state definition to be
614 included at that point in the state.
615 """
616 tokens = {}
617 inheritable = {}
618 for c in cls.__mro__:
619 toks = c.__dict__.get('tokens', {})
621 for state, items in toks.items():
622 curitems = tokens.get(state)
623 if curitems is None:
624 # N.b. because this is assigned by reference, sufficiently
625 # deep hierarchies are processed incrementally (e.g. for
626 # A(B), B(C), C(RegexLexer), B will be premodified so X(B)
627 # will not see any inherits in B).
628 tokens[state] = items
629 try:
630 inherit_ndx = items.index(inherit)
631 except ValueError:
632 continue
633 inheritable[state] = inherit_ndx
634 continue
636 inherit_ndx = inheritable.pop(state, None)
637 if inherit_ndx is None:
638 continue
640 # Replace the "inherit" value with the items
641 curitems[inherit_ndx:inherit_ndx+1] = items
642 try:
643 # N.b. this is the index in items (that is, the superclass
644 # copy), so offset required when storing below.
645 new_inh_ndx = items.index(inherit)
646 except ValueError:
647 pass
648 else:
649 inheritable[state] = inherit_ndx + new_inh_ndx
651 return tokens
653 def __call__(cls, *args, **kwds):
654 """Instantiate cls after preprocessing its token definitions."""
655 if '_tokens' not in cls.__dict__:
656 cls._all_tokens = {}
657 cls._tmpname = 0
658 if hasattr(cls, 'token_variants') and cls.token_variants:
659 # don't process yet
660 pass
661 else:
662 cls._tokens = cls.process_tokendef('', cls.get_tokendefs())
664 return type.__call__(cls, *args, **kwds)
667class RegexLexer(Lexer, metaclass=RegexLexerMeta):
668 """
669 Base for simple stateful regular expression-based lexers.
670 Simplifies the lexing process so that you need only
671 provide a list of states and regular expressions.
672 """
674 #: Flags for compiling the regular expressions.
675 #: Defaults to MULTILINE.
676 flags = re.MULTILINE
678 #: At all time there is a stack of states. Initially, the stack contains
679 #: a single state 'root'. The top of the stack is called "the current state".
680 #:
681 #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}``
682 #:
683 #: ``new_state`` can be omitted to signify no state transition.
684 #: If ``new_state`` is a string, it is pushed on the stack. This ensure
685 #: the new current state is ``new_state``.
686 #: If ``new_state`` is a tuple of strings, all of those strings are pushed
687 #: on the stack and the current state will be the last element of the list.
688 #: ``new_state`` can also be ``combined('state1', 'state2', ...)``
689 #: to signify a new, anonymous state combined from the rules of two
690 #: or more existing ones.
691 #: Furthermore, it can be '#pop' to signify going back one step in
692 #: the state stack, or '#push' to push the current state on the stack
693 #: again. Note that if you push while in a combined state, the combined
694 #: state itself is pushed, and not only the state in which the rule is
695 #: defined.
696 #:
697 #: The tuple can also be replaced with ``include('state')``, in which
698 #: case the rules from the state named by the string are included in the
699 #: current one.
700 tokens = {}
702 def get_tokens_unprocessed(self, text, stack=('root',)):
703 """
704 Split ``text`` into (tokentype, text) pairs.
706 ``stack`` is the initial stack (default: ``['root']``)
707 """
708 pos = 0
709 tokendefs = self._tokens
710 statestack = list(stack)
711 statetokens = tokendefs[statestack[-1]]
712 while 1:
713 for rexmatch, action, new_state in statetokens:
714 m = rexmatch(text, pos)
715 if m:
716 if action is not None:
717 if type(action) is _TokenType:
718 yield pos, action, m.group()
719 else:
720 yield from action(self, m)
721 pos = m.end()
722 if new_state is not None:
723 # state transition
724 if isinstance(new_state, tuple):
725 for state in new_state:
726 if state == '#pop':
727 if len(statestack) > 1:
728 statestack.pop()
729 elif state == '#push':
730 statestack.append(statestack[-1])
731 else:
732 statestack.append(state)
733 elif isinstance(new_state, int):
734 # pop, but keep at least one state on the stack
735 # (random code leading to unexpected pops should
736 # not allow exceptions)
737 if abs(new_state) >= len(statestack):
738 del statestack[1:]
739 else:
740 del statestack[new_state:]
741 elif new_state == '#push':
742 statestack.append(statestack[-1])
743 else:
744 assert False, f"wrong state def: {new_state!r}"
745 statetokens = tokendefs[statestack[-1]]
746 break
747 else:
748 # We are here only if all state tokens have been considered
749 # and there was not a match on any of them.
750 try:
751 if text[pos] == '\n':
752 # at EOL, reset state to "root"
753 statestack = ['root']
754 statetokens = tokendefs['root']
755 yield pos, Whitespace, '\n'
756 pos += 1
757 continue
758 yield pos, Error, text[pos]
759 pos += 1
760 except IndexError:
761 break
764class LexerContext:
765 """
766 A helper object that holds lexer position data.
767 """
769 def __init__(self, text, pos, stack=None, end=None):
770 self.text = text
771 self.pos = pos
772 self.end = end or len(text) # end=0 not supported ;-)
773 self.stack = stack or ['root']
775 def __repr__(self):
776 return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})'
779class ExtendedRegexLexer(RegexLexer):
780 """
781 A RegexLexer that uses a context object to store its state.
782 """
784 def get_tokens_unprocessed(self, text=None, context=None):
785 """
786 Split ``text`` into (tokentype, text) pairs.
787 If ``context`` is given, use this lexer context instead.
788 """
789 tokendefs = self._tokens
790 if not context:
791 ctx = LexerContext(text, 0)
792 statetokens = tokendefs['root']
793 else:
794 ctx = context
795 statetokens = tokendefs[ctx.stack[-1]]
796 text = ctx.text
797 while 1:
798 for rexmatch, action, new_state in statetokens:
799 m = rexmatch(text, ctx.pos, ctx.end)
800 if m:
801 if action is not None:
802 if type(action) is _TokenType:
803 yield ctx.pos, action, m.group()
804 ctx.pos = m.end()
805 else:
806 yield from action(self, m, ctx)
807 if not new_state:
808 # altered the state stack?
809 statetokens = tokendefs[ctx.stack[-1]]
810 # CAUTION: callback must set ctx.pos!
811 if new_state is not None:
812 # state transition
813 if isinstance(new_state, tuple):
814 for state in new_state:
815 if state == '#pop':
816 if len(ctx.stack) > 1:
817 ctx.stack.pop()
818 elif state == '#push':
819 ctx.stack.append(ctx.stack[-1])
820 else:
821 ctx.stack.append(state)
822 elif isinstance(new_state, int):
823 # see RegexLexer for why this check is made
824 if abs(new_state) >= len(ctx.stack):
825 del ctx.stack[1:]
826 else:
827 del ctx.stack[new_state:]
828 elif new_state == '#push':
829 ctx.stack.append(ctx.stack[-1])
830 else:
831 assert False, f"wrong state def: {new_state!r}"
832 statetokens = tokendefs[ctx.stack[-1]]
833 break
834 else:
835 try:
836 if ctx.pos >= ctx.end:
837 break
838 if text[ctx.pos] == '\n':
839 # at EOL, reset state to "root"
840 ctx.stack = ['root']
841 statetokens = tokendefs['root']
842 yield ctx.pos, Text, '\n'
843 ctx.pos += 1
844 continue
845 yield ctx.pos, Error, text[ctx.pos]
846 ctx.pos += 1
847 except IndexError:
848 break
851def do_insertions(insertions, tokens):
852 """
853 Helper for lexers which must combine the results of several
854 sublexers.
856 ``insertions`` is a list of ``(index, itokens)`` pairs.
857 Each ``itokens`` iterable should be inserted at position
858 ``index`` into the token stream given by the ``tokens``
859 argument.
861 The result is a combined token stream.
863 TODO: clean up the code here.
864 """
865 insertions = iter(insertions)
866 try:
867 index, itokens = next(insertions)
868 except StopIteration:
869 # no insertions
870 yield from tokens
871 return
873 realpos = None
874 insleft = True
876 # iterate over the token stream where we want to insert
877 # the tokens from the insertion list.
878 for i, t, v in tokens:
879 # first iteration. store the position of first item
880 if realpos is None:
881 realpos = i
882 oldi = 0
883 while insleft and i + len(v) >= index:
884 tmpval = v[oldi:index - i]
885 if tmpval:
886 yield realpos, t, tmpval
887 realpos += len(tmpval)
888 for it_index, it_token, it_value in itokens:
889 yield realpos, it_token, it_value
890 realpos += len(it_value)
891 oldi = index - i
892 try:
893 index, itokens = next(insertions)
894 except StopIteration:
895 insleft = False
896 break # not strictly necessary
897 if oldi < len(v):
898 yield realpos, t, v[oldi:]
899 realpos += len(v) - oldi
901 # leftover tokens
902 while insleft:
903 # no normal tokens, set realpos to zero
904 realpos = realpos or 0
905 for p, t, v in itokens:
906 yield realpos, t, v
907 realpos += len(v)
908 try:
909 index, itokens = next(insertions)
910 except StopIteration:
911 insleft = False
912 break # not strictly necessary
915class ProfilingRegexLexerMeta(RegexLexerMeta):
916 """Metaclass for ProfilingRegexLexer, collects regex timing info."""
918 def _process_regex(cls, regex, rflags, state):
919 if isinstance(regex, words):
920 rex = regex_opt(regex.words, prefix=regex.prefix,
921 suffix=regex.suffix)
922 else:
923 rex = regex
924 compiled = re.compile(rex, rflags)
926 def match_func(text, pos, endpos=sys.maxsize):
927 info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0])
928 t0 = time.time()
929 res = compiled.match(text, pos, endpos)
930 t1 = time.time()
931 info[0] += 1
932 info[1] += t1 - t0
933 return res
934 return match_func
937class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta):
938 """Drop-in replacement for RegexLexer that does profiling of its regexes."""
940 _prof_data = []
941 _prof_sort_index = 4 # defaults to time per call
943 def get_tokens_unprocessed(self, text, stack=('root',)):
944 # this needs to be a stack, since using(this) will produce nested calls
945 self.__class__._prof_data.append({})
946 yield from RegexLexer.get_tokens_unprocessed(self, text, stack)
947 rawdata = self.__class__._prof_data.pop()
948 data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65],
949 n, 1000 * t, 1000 * t / n)
950 for ((s, r), (n, t)) in rawdata.items()),
951 key=lambda x: x[self._prof_sort_index],
952 reverse=True)
953 sum_total = sum(x[3] for x in data)
955 print()
956 print('Profiling result for %s lexing %d chars in %.3f ms' %
957 (self.__class__.__name__, len(text), sum_total))
958 print('=' * 110)
959 print('%-20s %-64s ncalls tottime percall' % ('state', 'regex'))
960 print('-' * 110)
961 for d in data:
962 print('%-20s %-65s %5d %8.4f %8.4f' % d)
963 print('=' * 110)