Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.11/site-packages/pygments/lexer.py: 21%

Shortcuts on this page

r m x   toggle line displays

j k   next/prev highlighted chunk

0   (zero) top of page

1   (one) first highlighted chunk

476 statements  

1""" 

2 pygments.lexer 

3 ~~~~~~~~~~~~~~ 

4 

5 Base lexer classes. 

6 

7 :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS. 

8 :license: BSD, see LICENSE for details. 

9""" 

10 

11import re 

12import sys 

13import time 

14 

15from pygments.filter import apply_filters, Filter 

16from pygments.filters import get_filter_by_name 

17from pygments.token import Error, Text, Other, Whitespace, _TokenType 

18from pygments.util import get_bool_opt, get_int_opt, get_list_opt, \ 

19 make_analysator, Future, guess_decode 

20from pygments.regexopt import regex_opt 

21 

22__all__ = ['Lexer', 'RegexLexer', 'ExtendedRegexLexer', 'DelegatingLexer', 

23 'LexerContext', 'include', 'inherit', 'bygroups', 'using', 'this', 

24 'default', 'words', 'line_re'] 

25 

26line_re = re.compile('.*?\n') 

27 

28_encoding_map = [(b'\xef\xbb\xbf', 'utf-8'), 

29 (b'\xff\xfe\0\0', 'utf-32'), 

30 (b'\0\0\xfe\xff', 'utf-32be'), 

31 (b'\xff\xfe', 'utf-16'), 

32 (b'\xfe\xff', 'utf-16be')] 

33 

34_default_analyse = staticmethod(lambda x: 0.0) 

35 

36 

37class LexerMeta(type): 

38 """ 

39 This metaclass automagically converts ``analyse_text`` methods into 

40 static methods which always return float values. 

41 """ 

42 

43 def __new__(mcs, name, bases, d): 

44 if 'analyse_text' in d: 

45 d['analyse_text'] = make_analysator(d['analyse_text']) 

46 return type.__new__(mcs, name, bases, d) 

47 

48 

49class Lexer(metaclass=LexerMeta): 

50 """ 

51 Lexer for a specific language. 

52 

53 See also :doc:`lexerdevelopment`, a high-level guide to writing 

54 lexers. 

55 

56 Lexer classes have attributes used for choosing the most appropriate 

57 lexer based on various criteria. 

58 

59 .. autoattribute:: name 

60 :no-value: 

61 .. autoattribute:: aliases 

62 :no-value: 

63 .. autoattribute:: filenames 

64 :no-value: 

65 .. autoattribute:: alias_filenames 

66 .. autoattribute:: mimetypes 

67 :no-value: 

68 .. autoattribute:: priority 

69 

70 Lexers included in Pygments should have two additional attributes: 

71 

72 .. autoattribute:: url 

73 :no-value: 

74 .. autoattribute:: version_added 

75 :no-value: 

76 

77 Lexers included in Pygments may have additional attributes: 

78 

79 .. autoattribute:: _example 

80 :no-value: 

81 

82 You can pass options to the constructor. The basic options recognized 

83 by all lexers and processed by the base `Lexer` class are: 

84 

85 ``stripnl`` 

86 Strip leading and trailing newlines from the input (default: True). 

87 ``stripall`` 

88 Strip all leading and trailing whitespace from the input 

89 (default: False). 

90 ``ensurenl`` 

91 Make sure that the input ends with a newline (default: True). This 

92 is required for some lexers that consume input linewise. 

93 

94 .. versionadded:: 1.3 

95 

96 ``tabsize`` 

97 If given and greater than 0, expand tabs in the input (default: 0). 

98 ``encoding`` 

99 If given, must be an encoding name. This encoding will be used to 

100 convert the input string to Unicode, if it is not already a Unicode 

101 string (default: ``'guess'``, which uses a simple UTF-8 / Locale / 

102 Latin1 detection. Can also be ``'chardet'`` to use the chardet 

103 library, if it is installed. 

104 ``inencoding`` 

105 Overrides the ``encoding`` if given. 

106 """ 

107 

108 #: Full name of the lexer, in human-readable form 

109 name = None 

110 

111 #: A list of short, unique identifiers that can be used to look 

112 #: up the lexer from a list, e.g., using `get_lexer_by_name()`. 

113 aliases = [] 

114 

115 #: A list of `fnmatch` patterns that match filenames which contain 

116 #: content for this lexer. The patterns in this list should be unique among 

117 #: all lexers. 

118 filenames = [] 

119 

120 #: A list of `fnmatch` patterns that match filenames which may or may not 

121 #: contain content for this lexer. This list is used by the 

122 #: :func:`.guess_lexer_for_filename()` function, to determine which lexers 

123 #: are then included in guessing the correct one. That means that 

124 #: e.g. every lexer for HTML and a template language should include 

125 #: ``\*.html`` in this list. 

126 alias_filenames = [] 

127 

128 #: A list of MIME types for content that can be lexed with this lexer. 

129 mimetypes = [] 

130 

131 #: Priority, should multiple lexers match and no content is provided 

132 priority = 0 

133 

134 #: URL of the language specification/definition. Used in the Pygments 

135 #: documentation. Set to an empty string to disable. 

136 url = None 

137 

138 #: Version of Pygments in which the lexer was added. 

139 version_added = None 

140 

141 #: Example file name. Relative to the ``tests/examplefiles`` directory. 

142 #: This is used by the documentation generator to show an example. 

143 _example = None 

144 

145 def __init__(self, **options): 

146 """ 

147 This constructor takes arbitrary options as keyword arguments. 

148 Every subclass must first process its own options and then call 

149 the `Lexer` constructor, since it processes the basic 

150 options like `stripnl`. 

151 

152 An example looks like this: 

153 

154 .. sourcecode:: python 

155 

156 def __init__(self, **options): 

157 self.compress = options.get('compress', '') 

158 Lexer.__init__(self, **options) 

159 

160 As these options must all be specifiable as strings (due to the 

161 command line usage), there are various utility functions 

162 available to help with that, see `Utilities`_. 

163 """ 

164 self.options = options 

165 self.stripnl = get_bool_opt(options, 'stripnl', True) 

166 self.stripall = get_bool_opt(options, 'stripall', False) 

167 self.ensurenl = get_bool_opt(options, 'ensurenl', True) 

168 self.tabsize = get_int_opt(options, 'tabsize', 0) 

169 self.encoding = options.get('encoding', 'guess') 

170 self.encoding = options.get('inencoding') or self.encoding 

171 self.filters = [] 

172 for filter_ in get_list_opt(options, 'filters', ()): 

173 self.add_filter(filter_) 

174 

175 def __repr__(self): 

176 if self.options: 

177 return f'<pygments.lexers.{self.__class__.__name__} with {self.options!r}>' 

178 else: 

179 return f'<pygments.lexers.{self.__class__.__name__}>' 

180 

181 def add_filter(self, filter_, **options): 

182 """ 

183 Add a new stream filter to this lexer. 

184 """ 

185 if not isinstance(filter_, Filter): 

186 filter_ = get_filter_by_name(filter_, **options) 

187 self.filters.append(filter_) 

188 

189 def analyse_text(text): 

190 """ 

191 A static method which is called for lexer guessing. 

192 

193 It should analyse the text and return a float in the range 

194 from ``0.0`` to ``1.0``. If it returns ``0.0``, the lexer 

195 will not be selected as the most probable one, if it returns 

196 ``1.0``, it will be selected immediately. This is used by 

197 `guess_lexer`. 

198 

199 The `LexerMeta` metaclass automatically wraps this function so 

200 that it works like a static method (no ``self`` or ``cls`` 

201 parameter) and the return value is automatically converted to 

202 `float`. If the return value is an object that is boolean `False` 

203 it's the same as if the return values was ``0.0``. 

204 """ 

205 

206 def _preprocess_lexer_input(self, text): 

207 """Apply preprocessing such as decoding the input, removing BOM and normalizing newlines.""" 

208 

209 if not isinstance(text, str): 

210 if self.encoding == 'guess': 

211 text, _ = guess_decode(text) 

212 elif self.encoding == 'chardet': 

213 try: 

214 import chardet 

215 except ImportError as e: 

216 raise ImportError('To enable chardet encoding guessing, ' 

217 'please install the chardet library ' 

218 'from http://chardet.feedparser.org/') from e 

219 # check for BOM first 

220 decoded = None 

221 for bom, encoding in _encoding_map: 

222 if text.startswith(bom): 

223 decoded = text[len(bom):].decode(encoding, 'replace') 

224 break 

225 # no BOM found, so use chardet 

226 if decoded is None: 

227 enc = chardet.detect(text[:1024]) # Guess using first 1KB 

228 decoded = text.decode(enc.get('encoding') or 'utf-8', 

229 'replace') 

230 text = decoded 

231 else: 

232 text = text.decode(self.encoding) 

233 if text.startswith('\ufeff'): 

234 text = text[len('\ufeff'):] 

235 else: 

236 if text.startswith('\ufeff'): 

237 text = text[len('\ufeff'):] 

238 

239 # text now *is* a unicode string 

240 text = text.replace('\r\n', '\n') 

241 text = text.replace('\r', '\n') 

242 if self.stripall: 

243 text = text.strip() 

244 elif self.stripnl: 

245 text = text.strip('\n') 

246 if self.tabsize > 0: 

247 text = text.expandtabs(self.tabsize) 

248 if self.ensurenl and not text.endswith('\n'): 

249 text += '\n' 

250 

251 return text 

252 

253 def get_tokens(self, text, unfiltered=False): 

254 """ 

255 This method is the basic interface of a lexer. It is called by 

256 the `highlight()` function. It must process the text and return an 

257 iterable of ``(tokentype, value)`` pairs from `text`. 

258 

259 Normally, you don't need to override this method. The default 

260 implementation processes the options recognized by all lexers 

261 (`stripnl`, `stripall` and so on), and then yields all tokens 

262 from `get_tokens_unprocessed()`, with the ``index`` dropped. 

263 

264 If `unfiltered` is set to `True`, the filtering mechanism is 

265 bypassed even if filters are defined. 

266 """ 

267 text = self._preprocess_lexer_input(text) 

268 

269 def streamer(): 

270 for _, t, v in self.get_tokens_unprocessed(text): 

271 yield t, v 

272 stream = streamer() 

273 if not unfiltered: 

274 stream = apply_filters(stream, self.filters, self) 

275 return stream 

276 

277 def get_tokens_unprocessed(self, text): 

278 """ 

279 This method should process the text and return an iterable of 

280 ``(index, tokentype, value)`` tuples where ``index`` is the starting 

281 position of the token within the input text. 

282 

283 It must be overridden by subclasses. It is recommended to 

284 implement it as a generator to maximize effectiveness. 

285 """ 

286 raise NotImplementedError 

287 

288 

289class DelegatingLexer(Lexer): 

290 """ 

291 This lexer takes two lexer as arguments. A root lexer and 

292 a language lexer. First everything is scanned using the language 

293 lexer, afterwards all ``Other`` tokens are lexed using the root 

294 lexer. 

295 

296 The lexers from the ``template`` lexer package use this base lexer. 

297 """ 

298 

299 def __init__(self, _root_lexer, _language_lexer, _needle=Other, **options): 

300 self.root_lexer = _root_lexer(**options) 

301 self.language_lexer = _language_lexer(**options) 

302 self.needle = _needle 

303 Lexer.__init__(self, **options) 

304 

305 def get_tokens_unprocessed(self, text): 

306 buffered = '' 

307 insertions = [] 

308 lng_buffer = [] 

309 for i, t, v in self.language_lexer.get_tokens_unprocessed(text): 

310 if t is self.needle: 

311 if lng_buffer: 

312 insertions.append((len(buffered), lng_buffer)) 

313 lng_buffer = [] 

314 buffered += v 

315 else: 

316 lng_buffer.append((i, t, v)) 

317 if lng_buffer: 

318 insertions.append((len(buffered), lng_buffer)) 

319 return do_insertions(insertions, 

320 self.root_lexer.get_tokens_unprocessed(buffered)) 

321 

322 

323# ------------------------------------------------------------------------------ 

324# RegexLexer and ExtendedRegexLexer 

325# 

326 

327 

328class include(str): # pylint: disable=invalid-name 

329 """ 

330 Indicates that a state should include rules from another state. 

331 """ 

332 pass 

333 

334 

335class _inherit: 

336 """ 

337 Indicates the a state should inherit from its superclass. 

338 """ 

339 def __repr__(self): 

340 return 'inherit' 

341 

342inherit = _inherit() # pylint: disable=invalid-name 

343 

344 

345class combined(tuple): # pylint: disable=invalid-name 

346 """ 

347 Indicates a state combined from multiple states. 

348 """ 

349 

350 def __new__(cls, *args): 

351 return tuple.__new__(cls, args) 

352 

353 def __init__(self, *args): 

354 # tuple.__init__ doesn't do anything 

355 pass 

356 

357 

358class _PseudoMatch: 

359 """ 

360 A pseudo match object constructed from a string. 

361 """ 

362 

363 def __init__(self, start, text): 

364 self._text = text 

365 self._start = start 

366 

367 def start(self, arg=None): 

368 return self._start 

369 

370 def end(self, arg=None): 

371 return self._start + len(self._text) 

372 

373 def group(self, arg=None): 

374 if arg: 

375 raise IndexError('No such group') 

376 return self._text 

377 

378 def groups(self): 

379 return (self._text,) 

380 

381 def groupdict(self): 

382 return {} 

383 

384 

385def bygroups(*args): 

386 """ 

387 Callback that yields multiple actions for each group in the match. 

388 """ 

389 def callback(lexer, match, ctx=None): 

390 for i, action in enumerate(args): 

391 if action is None: 

392 continue 

393 elif type(action) is _TokenType: 

394 data = match.group(i + 1) 

395 if data: 

396 yield match.start(i + 1), action, data 

397 else: 

398 data = match.group(i + 1) 

399 if data is not None: 

400 if ctx: 

401 ctx.pos = match.start(i + 1) 

402 for item in action(lexer, 

403 _PseudoMatch(match.start(i + 1), data), ctx): 

404 if item: 

405 yield item 

406 if ctx: 

407 ctx.pos = match.end() 

408 return callback 

409 

410 

411class _This: 

412 """ 

413 Special singleton used for indicating the caller class. 

414 Used by ``using``. 

415 """ 

416 

417this = _This() 

418 

419 

420def using(_other, **kwargs): 

421 """ 

422 Callback that processes the match with a different lexer. 

423 

424 The keyword arguments are forwarded to the lexer, except `state` which 

425 is handled separately. 

426 

427 `state` specifies the state that the new lexer will start in, and can 

428 be an enumerable such as ('root', 'inline', 'string') or a simple 

429 string which is assumed to be on top of the root state. 

430 

431 Note: For that to work, `_other` must not be an `ExtendedRegexLexer`. 

432 """ 

433 gt_kwargs = {} 

434 if 'state' in kwargs: 

435 s = kwargs.pop('state') 

436 if isinstance(s, (list, tuple)): 

437 gt_kwargs['stack'] = s 

438 else: 

439 gt_kwargs['stack'] = ('root', s) 

440 

441 if _other is this: 

442 def callback(lexer, match, ctx=None): 

443 # if keyword arguments are given the callback 

444 # function has to create a new lexer instance 

445 if kwargs: 

446 # XXX: cache that somehow 

447 kwargs.update(lexer.options) 

448 lx = lexer.__class__(**kwargs) 

449 else: 

450 lx = lexer 

451 s = match.start() 

452 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): 

453 yield i + s, t, v 

454 if ctx: 

455 ctx.pos = match.end() 

456 else: 

457 def callback(lexer, match, ctx=None): 

458 # XXX: cache that somehow 

459 kwargs.update(lexer.options) 

460 lx = _other(**kwargs) 

461 

462 s = match.start() 

463 for i, t, v in lx.get_tokens_unprocessed(match.group(), **gt_kwargs): 

464 yield i + s, t, v 

465 if ctx: 

466 ctx.pos = match.end() 

467 return callback 

468 

469 

470class default: 

471 """ 

472 Indicates a state or state action (e.g. #pop) to apply. 

473 For example default('#pop') is equivalent to ('', Token, '#pop') 

474 Note that state tuples may be used as well. 

475 

476 .. versionadded:: 2.0 

477 """ 

478 def __init__(self, state): 

479 self.state = state 

480 

481 

482class words(Future): 

483 """ 

484 Indicates a list of literal words that is transformed into an optimized 

485 regex that matches any of the words. 

486 

487 .. versionadded:: 2.0 

488 """ 

489 def __init__(self, words, prefix='', suffix=''): 

490 self.words = words 

491 self.prefix = prefix 

492 self.suffix = suffix 

493 

494 def get(self): 

495 return regex_opt(self.words, prefix=self.prefix, suffix=self.suffix) 

496 

497 

498class RegexLexerMeta(LexerMeta): 

499 """ 

500 Metaclass for RegexLexer, creates the self._tokens attribute from 

501 self.tokens on the first instantiation. 

502 """ 

503 

504 def _process_regex(cls, regex, rflags, state): 

505 """Preprocess the regular expression component of a token definition.""" 

506 if isinstance(regex, Future): 

507 regex = regex.get() 

508 return re.compile(regex, rflags).match 

509 

510 def _process_token(cls, token): 

511 """Preprocess the token component of a token definition.""" 

512 assert type(token) is _TokenType or callable(token), \ 

513 f'token type must be simple type or callable, not {token!r}' 

514 return token 

515 

516 def _process_new_state(cls, new_state, unprocessed, processed): 

517 """Preprocess the state transition action of a token definition.""" 

518 if isinstance(new_state, str): 

519 # an existing state 

520 if new_state == '#pop': 

521 return -1 

522 elif new_state in unprocessed: 

523 return (new_state,) 

524 elif new_state == '#push': 

525 return new_state 

526 elif new_state[:5] == '#pop:': 

527 return -int(new_state[5:]) 

528 else: 

529 assert False, f'unknown new state {new_state!r}' 

530 elif isinstance(new_state, combined): 

531 # combine a new state from existing ones 

532 tmp_state = '_tmp_%d' % cls._tmpname 

533 cls._tmpname += 1 

534 itokens = [] 

535 for istate in new_state: 

536 assert istate != new_state, f'circular state ref {istate!r}' 

537 itokens.extend(cls._process_state(unprocessed, 

538 processed, istate)) 

539 processed[tmp_state] = itokens 

540 return (tmp_state,) 

541 elif isinstance(new_state, tuple): 

542 # push more than one state 

543 for istate in new_state: 

544 assert (istate in unprocessed or 

545 istate in ('#pop', '#push')), \ 

546 'unknown new state ' + istate 

547 return new_state 

548 else: 

549 assert False, f'unknown new state def {new_state!r}' 

550 

551 def _process_state(cls, unprocessed, processed, state): 

552 """Preprocess a single state definition.""" 

553 assert isinstance(state, str), f"wrong state name {state!r}" 

554 assert state[0] != '#', f"invalid state name {state!r}" 

555 if state in processed: 

556 return processed[state] 

557 tokens = processed[state] = [] 

558 rflags = cls.flags 

559 for tdef in unprocessed[state]: 

560 if isinstance(tdef, include): 

561 # it's a state reference 

562 assert tdef != state, f"circular state reference {state!r}" 

563 tokens.extend(cls._process_state(unprocessed, processed, 

564 str(tdef))) 

565 continue 

566 if isinstance(tdef, _inherit): 

567 # should be processed already, but may not in the case of: 

568 # 1. the state has no counterpart in any parent 

569 # 2. the state includes more than one 'inherit' 

570 continue 

571 if isinstance(tdef, default): 

572 new_state = cls._process_new_state(tdef.state, unprocessed, processed) 

573 tokens.append((re.compile('').match, None, new_state)) 

574 continue 

575 

576 assert type(tdef) is tuple, f"wrong rule def {tdef!r}" 

577 

578 try: 

579 rex = cls._process_regex(tdef[0], rflags, state) 

580 except Exception as err: 

581 raise ValueError(f"uncompilable regex {tdef[0]!r} in state {state!r} of {cls!r}: {err}") from err 

582 

583 token = cls._process_token(tdef[1]) 

584 

585 if len(tdef) == 2: 

586 new_state = None 

587 else: 

588 new_state = cls._process_new_state(tdef[2], 

589 unprocessed, processed) 

590 

591 tokens.append((rex, token, new_state)) 

592 return tokens 

593 

594 def process_tokendef(cls, name, tokendefs=None): 

595 """Preprocess a dictionary of token definitions.""" 

596 processed = cls._all_tokens[name] = {} 

597 tokendefs = tokendefs or cls.tokens[name] 

598 for state in list(tokendefs): 

599 cls._process_state(tokendefs, processed, state) 

600 return processed 

601 

602 def get_tokendefs(cls): 

603 """ 

604 Merge tokens from superclasses in MRO order, returning a single tokendef 

605 dictionary. 

606 

607 Any state that is not defined by a subclass will be inherited 

608 automatically. States that *are* defined by subclasses will, by 

609 default, override that state in the superclass. If a subclass wishes to 

610 inherit definitions from a superclass, it can use the special value 

611 "inherit", which will cause the superclass' state definition to be 

612 included at that point in the state. 

613 """ 

614 tokens = {} 

615 inheritable = {} 

616 for c in cls.__mro__: 

617 toks = c.__dict__.get('tokens', {}) 

618 

619 for state, items in toks.items(): 

620 curitems = tokens.get(state) 

621 if curitems is None: 

622 # N.b. because this is assigned by reference, sufficiently 

623 # deep hierarchies are processed incrementally (e.g. for 

624 # A(B), B(C), C(RegexLexer), B will be premodified so X(B) 

625 # will not see any inherits in B). 

626 tokens[state] = items 

627 try: 

628 inherit_ndx = items.index(inherit) 

629 except ValueError: 

630 continue 

631 inheritable[state] = inherit_ndx 

632 continue 

633 

634 inherit_ndx = inheritable.pop(state, None) 

635 if inherit_ndx is None: 

636 continue 

637 

638 # Replace the "inherit" value with the items 

639 curitems[inherit_ndx:inherit_ndx+1] = items 

640 try: 

641 # N.b. this is the index in items (that is, the superclass 

642 # copy), so offset required when storing below. 

643 new_inh_ndx = items.index(inherit) 

644 except ValueError: 

645 pass 

646 else: 

647 inheritable[state] = inherit_ndx + new_inh_ndx 

648 

649 return tokens 

650 

651 def __call__(cls, *args, **kwds): 

652 """Instantiate cls after preprocessing its token definitions.""" 

653 if '_tokens' not in cls.__dict__: 

654 cls._all_tokens = {} 

655 cls._tmpname = 0 

656 if hasattr(cls, 'token_variants') and cls.token_variants: 

657 # don't process yet 

658 pass 

659 else: 

660 cls._tokens = cls.process_tokendef('', cls.get_tokendefs()) 

661 

662 return type.__call__(cls, *args, **kwds) 

663 

664 

665class RegexLexer(Lexer, metaclass=RegexLexerMeta): 

666 """ 

667 Base for simple stateful regular expression-based lexers. 

668 Simplifies the lexing process so that you need only 

669 provide a list of states and regular expressions. 

670 """ 

671 

672 #: Flags for compiling the regular expressions. 

673 #: Defaults to MULTILINE. 

674 flags = re.MULTILINE 

675 

676 #: At all time there is a stack of states. Initially, the stack contains 

677 #: a single state 'root'. The top of the stack is called "the current state". 

678 #: 

679 #: Dict of ``{'state': [(regex, tokentype, new_state), ...], ...}`` 

680 #: 

681 #: ``new_state`` can be omitted to signify no state transition. 

682 #: If ``new_state`` is a string, it is pushed on the stack. This ensure 

683 #: the new current state is ``new_state``. 

684 #: If ``new_state`` is a tuple of strings, all of those strings are pushed 

685 #: on the stack and the current state will be the last element of the list. 

686 #: ``new_state`` can also be ``combined('state1', 'state2', ...)`` 

687 #: to signify a new, anonymous state combined from the rules of two 

688 #: or more existing ones. 

689 #: Furthermore, it can be '#pop' to signify going back one step in 

690 #: the state stack, or '#push' to push the current state on the stack 

691 #: again. Note that if you push while in a combined state, the combined 

692 #: state itself is pushed, and not only the state in which the rule is 

693 #: defined. 

694 #: 

695 #: The tuple can also be replaced with ``include('state')``, in which 

696 #: case the rules from the state named by the string are included in the 

697 #: current one. 

698 tokens = {} 

699 

700 def get_tokens_unprocessed(self, text, stack=('root',)): 

701 """ 

702 Split ``text`` into (tokentype, text) pairs. 

703 

704 ``stack`` is the initial stack (default: ``['root']``) 

705 """ 

706 pos = 0 

707 tokendefs = self._tokens 

708 statestack = list(stack) 

709 statetokens = tokendefs[statestack[-1]] 

710 while 1: 

711 for rexmatch, action, new_state in statetokens: 

712 m = rexmatch(text, pos) 

713 if m: 

714 if action is not None: 

715 if type(action) is _TokenType: 

716 yield pos, action, m.group() 

717 else: 

718 yield from action(self, m) 

719 pos = m.end() 

720 if new_state is not None: 

721 # state transition 

722 if isinstance(new_state, tuple): 

723 for state in new_state: 

724 if state == '#pop': 

725 if len(statestack) > 1: 

726 statestack.pop() 

727 elif state == '#push': 

728 statestack.append(statestack[-1]) 

729 else: 

730 statestack.append(state) 

731 elif isinstance(new_state, int): 

732 # pop, but keep at least one state on the stack 

733 # (random code leading to unexpected pops should 

734 # not allow exceptions) 

735 if abs(new_state) >= len(statestack): 

736 del statestack[1:] 

737 else: 

738 del statestack[new_state:] 

739 elif new_state == '#push': 

740 statestack.append(statestack[-1]) 

741 else: 

742 assert False, f"wrong state def: {new_state!r}" 

743 statetokens = tokendefs[statestack[-1]] 

744 break 

745 else: 

746 # We are here only if all state tokens have been considered 

747 # and there was not a match on any of them. 

748 try: 

749 if text[pos] == '\n': 

750 # at EOL, reset state to "root" 

751 statestack = ['root'] 

752 statetokens = tokendefs['root'] 

753 yield pos, Whitespace, '\n' 

754 pos += 1 

755 continue 

756 yield pos, Error, text[pos] 

757 pos += 1 

758 except IndexError: 

759 break 

760 

761 

762class LexerContext: 

763 """ 

764 A helper object that holds lexer position data. 

765 """ 

766 

767 def __init__(self, text, pos, stack=None, end=None): 

768 self.text = text 

769 self.pos = pos 

770 self.end = end or len(text) # end=0 not supported ;-) 

771 self.stack = stack or ['root'] 

772 

773 def __repr__(self): 

774 return f'LexerContext({self.text!r}, {self.pos!r}, {self.stack!r})' 

775 

776 

777class ExtendedRegexLexer(RegexLexer): 

778 """ 

779 A RegexLexer that uses a context object to store its state. 

780 """ 

781 

782 def get_tokens_unprocessed(self, text=None, context=None): 

783 """ 

784 Split ``text`` into (tokentype, text) pairs. 

785 If ``context`` is given, use this lexer context instead. 

786 """ 

787 tokendefs = self._tokens 

788 if not context: 

789 ctx = LexerContext(text, 0) 

790 statetokens = tokendefs['root'] 

791 else: 

792 ctx = context 

793 statetokens = tokendefs[ctx.stack[-1]] 

794 text = ctx.text 

795 while 1: 

796 for rexmatch, action, new_state in statetokens: 

797 m = rexmatch(text, ctx.pos, ctx.end) 

798 if m: 

799 if action is not None: 

800 if type(action) is _TokenType: 

801 yield ctx.pos, action, m.group() 

802 ctx.pos = m.end() 

803 else: 

804 yield from action(self, m, ctx) 

805 if not new_state: 

806 # altered the state stack? 

807 statetokens = tokendefs[ctx.stack[-1]] 

808 # CAUTION: callback must set ctx.pos! 

809 if new_state is not None: 

810 # state transition 

811 if isinstance(new_state, tuple): 

812 for state in new_state: 

813 if state == '#pop': 

814 if len(ctx.stack) > 1: 

815 ctx.stack.pop() 

816 elif state == '#push': 

817 ctx.stack.append(ctx.stack[-1]) 

818 else: 

819 ctx.stack.append(state) 

820 elif isinstance(new_state, int): 

821 # see RegexLexer for why this check is made 

822 if abs(new_state) >= len(ctx.stack): 

823 del ctx.stack[1:] 

824 else: 

825 del ctx.stack[new_state:] 

826 elif new_state == '#push': 

827 ctx.stack.append(ctx.stack[-1]) 

828 else: 

829 assert False, f"wrong state def: {new_state!r}" 

830 statetokens = tokendefs[ctx.stack[-1]] 

831 break 

832 else: 

833 try: 

834 if ctx.pos >= ctx.end: 

835 break 

836 if text[ctx.pos] == '\n': 

837 # at EOL, reset state to "root" 

838 ctx.stack = ['root'] 

839 statetokens = tokendefs['root'] 

840 yield ctx.pos, Text, '\n' 

841 ctx.pos += 1 

842 continue 

843 yield ctx.pos, Error, text[ctx.pos] 

844 ctx.pos += 1 

845 except IndexError: 

846 break 

847 

848 

849def do_insertions(insertions, tokens): 

850 """ 

851 Helper for lexers which must combine the results of several 

852 sublexers. 

853 

854 ``insertions`` is a list of ``(index, itokens)`` pairs. 

855 Each ``itokens`` iterable should be inserted at position 

856 ``index`` into the token stream given by the ``tokens`` 

857 argument. 

858 

859 The result is a combined token stream. 

860 

861 TODO: clean up the code here. 

862 """ 

863 insertions = iter(insertions) 

864 try: 

865 index, itokens = next(insertions) 

866 except StopIteration: 

867 # no insertions 

868 yield from tokens 

869 return 

870 

871 realpos = None 

872 insleft = True 

873 

874 # iterate over the token stream where we want to insert 

875 # the tokens from the insertion list. 

876 for i, t, v in tokens: 

877 # first iteration. store the position of first item 

878 if realpos is None: 

879 realpos = i 

880 oldi = 0 

881 while insleft and i + len(v) >= index: 

882 tmpval = v[oldi:index - i] 

883 if tmpval: 

884 yield realpos, t, tmpval 

885 realpos += len(tmpval) 

886 for it_index, it_token, it_value in itokens: 

887 yield realpos, it_token, it_value 

888 realpos += len(it_value) 

889 oldi = index - i 

890 try: 

891 index, itokens = next(insertions) 

892 except StopIteration: 

893 insleft = False 

894 break # not strictly necessary 

895 if oldi < len(v): 

896 yield realpos, t, v[oldi:] 

897 realpos += len(v) - oldi 

898 

899 # leftover tokens 

900 while insleft: 

901 # no normal tokens, set realpos to zero 

902 realpos = realpos or 0 

903 for p, t, v in itokens: 

904 yield realpos, t, v 

905 realpos += len(v) 

906 try: 

907 index, itokens = next(insertions) 

908 except StopIteration: 

909 insleft = False 

910 break # not strictly necessary 

911 

912 

913class ProfilingRegexLexerMeta(RegexLexerMeta): 

914 """Metaclass for ProfilingRegexLexer, collects regex timing info.""" 

915 

916 def _process_regex(cls, regex, rflags, state): 

917 if isinstance(regex, words): 

918 rex = regex_opt(regex.words, prefix=regex.prefix, 

919 suffix=regex.suffix) 

920 else: 

921 rex = regex 

922 compiled = re.compile(rex, rflags) 

923 

924 def match_func(text, pos, endpos=sys.maxsize): 

925 info = cls._prof_data[-1].setdefault((state, rex), [0, 0.0]) 

926 t0 = time.time() 

927 res = compiled.match(text, pos, endpos) 

928 t1 = time.time() 

929 info[0] += 1 

930 info[1] += t1 - t0 

931 return res 

932 return match_func 

933 

934 

935class ProfilingRegexLexer(RegexLexer, metaclass=ProfilingRegexLexerMeta): 

936 """Drop-in replacement for RegexLexer that does profiling of its regexes.""" 

937 

938 _prof_data = [] 

939 _prof_sort_index = 4 # defaults to time per call 

940 

941 def get_tokens_unprocessed(self, text, stack=('root',)): 

942 # this needs to be a stack, since using(this) will produce nested calls 

943 self.__class__._prof_data.append({}) 

944 yield from RegexLexer.get_tokens_unprocessed(self, text, stack) 

945 rawdata = self.__class__._prof_data.pop() 

946 data = sorted(((s, repr(r).strip('u\'').replace('\\\\', '\\')[:65], 

947 n, 1000 * t, 1000 * t / n) 

948 for ((s, r), (n, t)) in rawdata.items()), 

949 key=lambda x: x[self._prof_sort_index], 

950 reverse=True) 

951 sum_total = sum(x[3] for x in data) 

952 

953 print() 

954 print('Profiling result for %s lexing %d chars in %.3f ms' % 

955 (self.__class__.__name__, len(text), sum_total)) 

956 print('=' * 110) 

957 print('%-20s %-64s ncalls tottime percall' % ('state', 'regex')) 

958 print('-' * 110) 

959 for d in data: 

960 print('%-20s %-65s %5d %8.4f %8.4f' % d) 

961 print('=' * 110)