Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pygments/lexers/markup.py: 55%
283 statements
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-18 06:13 +0000
« prev ^ index » next coverage.py v7.3.1, created at 2023-09-18 06:13 +0000
1"""
2 pygments.lexers.markup
3 ~~~~~~~~~~~~~~~~~~~~~~
5 Lexers for non-HTML markup languages.
7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
11import re
13from pygments.lexers.html import XmlLexer
14from pygments.lexers.javascript import JavascriptLexer
15from pygments.lexers.css import CssLexer
16from pygments.lexers.lilypond import LilyPondLexer
17from pygments.lexers.data import JsonLexer
19from pygments.lexer import RegexLexer, DelegatingLexer, include, bygroups, \
20 using, this, do_insertions, default, words
21from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
22 Number, Punctuation, Generic, Other, Whitespace
23from pygments.util import get_bool_opt, ClassNotFound
25__all__ = ['BBCodeLexer', 'MoinWikiLexer', 'RstLexer', 'TexLexer', 'GroffLexer',
26 'MozPreprocHashLexer', 'MozPreprocPercentLexer',
27 'MozPreprocXulLexer', 'MozPreprocJavascriptLexer',
28 'MozPreprocCssLexer', 'MarkdownLexer', 'TiddlyWiki5Lexer', 'WikitextLexer']
31class BBCodeLexer(RegexLexer):
32 """
33 A lexer that highlights BBCode(-like) syntax.
35 .. versionadded:: 0.6
36 """
38 name = 'BBCode'
39 aliases = ['bbcode']
40 mimetypes = ['text/x-bbcode']
42 tokens = {
43 'root': [
44 (r'[^[]+', Text),
45 # tag/end tag begin
46 (r'\[/?\w+', Keyword, 'tag'),
47 # stray bracket
48 (r'\[', Text),
49 ],
50 'tag': [
51 (r'\s+', Text),
52 # attribute with value
53 (r'(\w+)(=)("?[^\s"\]]+"?)',
54 bygroups(Name.Attribute, Operator, String)),
55 # tag argument (a la [color=green])
56 (r'(=)("?[^\s"\]]+"?)',
57 bygroups(Operator, String)),
58 # tag end
59 (r'\]', Keyword, '#pop'),
60 ],
61 }
64class MoinWikiLexer(RegexLexer):
65 """
66 For MoinMoin (and Trac) Wiki markup.
68 .. versionadded:: 0.7
69 """
71 name = 'MoinMoin/Trac Wiki markup'
72 aliases = ['trac-wiki', 'moin']
73 filenames = []
74 mimetypes = ['text/x-trac-wiki']
75 flags = re.MULTILINE | re.IGNORECASE
77 tokens = {
78 'root': [
79 (r'^#.*$', Comment),
80 (r'(!)(\S+)', bygroups(Keyword, Text)), # Ignore-next
81 # Titles
82 (r'^(=+)([^=]+)(=+)(\s*#.+)?$',
83 bygroups(Generic.Heading, using(this), Generic.Heading, String)),
84 # Literal code blocks, with optional shebang
85 (r'(\{\{\{)(\n#!.+)?', bygroups(Name.Builtin, Name.Namespace), 'codeblock'),
86 (r'(\'\'\'?|\|\||`|__|~~|\^|,,|::)', Comment), # Formatting
87 # Lists
88 (r'^( +)([.*-])( )', bygroups(Text, Name.Builtin, Text)),
89 (r'^( +)([a-z]{1,5}\.)( )', bygroups(Text, Name.Builtin, Text)),
90 # Other Formatting
91 (r'\[\[\w+.*?\]\]', Keyword), # Macro
92 (r'(\[[^\s\]]+)(\s+[^\]]+?)?(\])',
93 bygroups(Keyword, String, Keyword)), # Link
94 (r'^----+$', Keyword), # Horizontal rules
95 (r'[^\n\'\[{!_~^,|]+', Text),
96 (r'\n', Text),
97 (r'.', Text),
98 ],
99 'codeblock': [
100 (r'\}\}\}', Name.Builtin, '#pop'),
101 # these blocks are allowed to be nested in Trac, but not MoinMoin
102 (r'\{\{\{', Text, '#push'),
103 (r'[^{}]+', Comment.Preproc), # slurp boring text
104 (r'.', Comment.Preproc), # allow loose { or }
105 ],
106 }
109class RstLexer(RegexLexer):
110 """
111 For reStructuredText markup.
113 .. versionadded:: 0.7
115 Additional options accepted:
117 `handlecodeblocks`
118 Highlight the contents of ``.. sourcecode:: language``,
119 ``.. code:: language`` and ``.. code-block:: language``
120 directives with a lexer for the given language (default:
121 ``True``).
123 .. versionadded:: 0.8
124 """
125 name = 'reStructuredText'
126 url = 'https://docutils.sourceforge.io/rst.html'
127 aliases = ['restructuredtext', 'rst', 'rest']
128 filenames = ['*.rst', '*.rest']
129 mimetypes = ["text/x-rst", "text/prs.fallenstein.rst"]
130 flags = re.MULTILINE
132 def _handle_sourcecode(self, match):
133 from pygments.lexers import get_lexer_by_name
135 # section header
136 yield match.start(1), Punctuation, match.group(1)
137 yield match.start(2), Text, match.group(2)
138 yield match.start(3), Operator.Word, match.group(3)
139 yield match.start(4), Punctuation, match.group(4)
140 yield match.start(5), Text, match.group(5)
141 yield match.start(6), Keyword, match.group(6)
142 yield match.start(7), Text, match.group(7)
144 # lookup lexer if wanted and existing
145 lexer = None
146 if self.handlecodeblocks:
147 try:
148 lexer = get_lexer_by_name(match.group(6).strip())
149 except ClassNotFound:
150 pass
151 indention = match.group(8)
152 indention_size = len(indention)
153 code = (indention + match.group(9) + match.group(10) + match.group(11))
155 # no lexer for this language. handle it like it was a code block
156 if lexer is None:
157 yield match.start(8), String, code
158 return
160 # highlight the lines with the lexer.
161 ins = []
162 codelines = code.splitlines(True)
163 code = ''
164 for line in codelines:
165 if len(line) > indention_size:
166 ins.append((len(code), [(0, Text, line[:indention_size])]))
167 code += line[indention_size:]
168 else:
169 code += line
170 yield from do_insertions(ins, lexer.get_tokens_unprocessed(code))
172 # from docutils.parsers.rst.states
173 closers = '\'")]}>\u2019\u201d\xbb!?'
174 unicode_delimiters = '\u2010\u2011\u2012\u2013\u2014\u00a0'
175 end_string_suffix = (r'((?=$)|(?=[-/:.,; \n\x00%s%s]))'
176 % (re.escape(unicode_delimiters),
177 re.escape(closers)))
179 tokens = {
180 'root': [
181 # Heading with overline
182 (r'^(=+|-+|`+|:+|\.+|\'+|"+|~+|\^+|_+|\*+|\++|#+)([ \t]*\n)'
183 r'(.+)(\n)(\1)(\n)',
184 bygroups(Generic.Heading, Text, Generic.Heading,
185 Text, Generic.Heading, Text)),
186 # Plain heading
187 (r'^(\S.*)(\n)(={3,}|-{3,}|`{3,}|:{3,}|\.{3,}|\'{3,}|"{3,}|'
188 r'~{3,}|\^{3,}|_{3,}|\*{3,}|\+{3,}|#{3,})(\n)',
189 bygroups(Generic.Heading, Text, Generic.Heading, Text)),
190 # Bulleted lists
191 (r'^(\s*)([-*+])( .+\n(?:\1 .+\n)*)',
192 bygroups(Text, Number, using(this, state='inline'))),
193 # Numbered lists
194 (r'^(\s*)([0-9#ivxlcmIVXLCM]+\.)( .+\n(?:\1 .+\n)*)',
195 bygroups(Text, Number, using(this, state='inline'))),
196 (r'^(\s*)(\(?[0-9#ivxlcmIVXLCM]+\))( .+\n(?:\1 .+\n)*)',
197 bygroups(Text, Number, using(this, state='inline'))),
198 # Numbered, but keep words at BOL from becoming lists
199 (r'^(\s*)([A-Z]+\.)( .+\n(?:\1 .+\n)+)',
200 bygroups(Text, Number, using(this, state='inline'))),
201 (r'^(\s*)(\(?[A-Za-z]+\))( .+\n(?:\1 .+\n)+)',
202 bygroups(Text, Number, using(this, state='inline'))),
203 # Line blocks
204 (r'^(\s*)(\|)( .+\n(?:\| .+\n)*)',
205 bygroups(Text, Operator, using(this, state='inline'))),
206 # Sourcecode directives
207 (r'^( *\.\.)(\s*)((?:source)?code(?:-block)?)(::)([ \t]*)([^\n]+)'
208 r'(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\8.*)?\n)+)',
209 _handle_sourcecode),
210 # A directive
211 (r'^( *\.\.)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
212 bygroups(Punctuation, Text, Operator.Word, Punctuation, Text,
213 using(this, state='inline'))),
214 # A reference target
215 (r'^( *\.\.)(\s*)(_(?:[^:\\]|\\.)+:)(.*?)$',
216 bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
217 # A footnote/citation target
218 (r'^( *\.\.)(\s*)(\[.+\])(.*?)$',
219 bygroups(Punctuation, Text, Name.Tag, using(this, state='inline'))),
220 # A substitution def
221 (r'^( *\.\.)(\s*)(\|.+\|)(\s*)([\w:-]+?)(::)(?:([ \t]*)(.*))',
222 bygroups(Punctuation, Text, Name.Tag, Text, Operator.Word,
223 Punctuation, Text, using(this, state='inline'))),
224 # Comments
225 (r'^ *\.\..*(\n( +.*\n|\n)+)?', Comment.Preproc),
226 # Field list marker
227 (r'^( *)(:(?:\\\\|\\:|[^:\n])+:(?=\s))([ \t]*)',
228 bygroups(Text, Name.Class, Text)),
229 # Definition list
230 (r'^(\S.*(?<!::)\n)((?:(?: +.*)\n)+)',
231 bygroups(using(this, state='inline'), using(this, state='inline'))),
232 # Code blocks
233 (r'(::)(\n[ \t]*\n)([ \t]+)(.*)(\n)((?:(?:\3.*)?\n)+)',
234 bygroups(String.Escape, Text, String, String, Text, String)),
235 include('inline'),
236 ],
237 'inline': [
238 (r'\\.', Text), # escape
239 (r'``', String, 'literal'), # code
240 (r'(`.+?)(<.+?>)(`__?)', # reference with inline target
241 bygroups(String, String.Interpol, String)),
242 (r'`.+?`__?', String), # reference
243 (r'(`.+?`)(:[a-zA-Z0-9:-]+?:)?',
244 bygroups(Name.Variable, Name.Attribute)), # role
245 (r'(:[a-zA-Z0-9:-]+?:)(`.+?`)',
246 bygroups(Name.Attribute, Name.Variable)), # role (content first)
247 (r'\*\*.+?\*\*', Generic.Strong), # Strong emphasis
248 (r'\*.+?\*', Generic.Emph), # Emphasis
249 (r'\[.*?\]_', String), # Footnote or citation
250 (r'<.+?>', Name.Tag), # Hyperlink
251 (r'[^\\\n\[*`:]+', Text),
252 (r'.', Text),
253 ],
254 'literal': [
255 (r'[^`]+', String),
256 (r'``' + end_string_suffix, String, '#pop'),
257 (r'`', String),
258 ]
259 }
261 def __init__(self, **options):
262 self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
263 RegexLexer.__init__(self, **options)
265 def analyse_text(text):
266 if text[:2] == '..' and text[2:3] != '.':
267 return 0.3
268 p1 = text.find("\n")
269 p2 = text.find("\n", p1 + 1)
270 if (p2 > -1 and # has two lines
271 p1 * 2 + 1 == p2 and # they are the same length
272 text[p1+1] in '-=' and # the next line both starts and ends with
273 text[p1+1] == text[p2-1]): # ...a sufficiently high header
274 return 0.5
277class TexLexer(RegexLexer):
278 """
279 Lexer for the TeX and LaTeX typesetting languages.
280 """
282 name = 'TeX'
283 aliases = ['tex', 'latex']
284 filenames = ['*.tex', '*.aux', '*.toc']
285 mimetypes = ['text/x-tex', 'text/x-latex']
287 tokens = {
288 'general': [
289 (r'%.*?\n', Comment),
290 (r'[{}]', Name.Builtin),
291 (r'[&_^]', Name.Builtin),
292 ],
293 'root': [
294 (r'\\\[', String.Backtick, 'displaymath'),
295 (r'\\\(', String, 'inlinemath'),
296 (r'\$\$', String.Backtick, 'displaymath'),
297 (r'\$', String, 'inlinemath'),
298 (r'\\([a-zA-Z]+|.)', Keyword, 'command'),
299 (r'\\$', Keyword),
300 include('general'),
301 (r'[^\\$%&_^{}]+', Text),
302 ],
303 'math': [
304 (r'\\([a-zA-Z]+|.)', Name.Variable),
305 include('general'),
306 (r'[0-9]+', Number),
307 (r'[-=!+*/()\[\]]', Operator),
308 (r'[^=!+*/()\[\]\\$%&_^{}0-9-]+', Name.Builtin),
309 ],
310 'inlinemath': [
311 (r'\\\)', String, '#pop'),
312 (r'\$', String, '#pop'),
313 include('math'),
314 ],
315 'displaymath': [
316 (r'\\\]', String, '#pop'),
317 (r'\$\$', String, '#pop'),
318 (r'\$', Name.Builtin),
319 include('math'),
320 ],
321 'command': [
322 (r'\[.*?\]', Name.Attribute),
323 (r'\*', Keyword),
324 default('#pop'),
325 ],
326 }
328 def analyse_text(text):
329 for start in ("\\documentclass", "\\input", "\\documentstyle",
330 "\\relax"):
331 if text[:len(start)] == start:
332 return True
335class GroffLexer(RegexLexer):
336 """
337 Lexer for the (g)roff typesetting language, supporting groff
338 extensions. Mainly useful for highlighting manpage sources.
340 .. versionadded:: 0.6
341 """
343 name = 'Groff'
344 aliases = ['groff', 'nroff', 'man']
345 filenames = ['*.[1-9]', '*.man', '*.1p', '*.3pm']
346 mimetypes = ['application/x-troff', 'text/troff']
348 tokens = {
349 'root': [
350 (r'(\.)(\w+)', bygroups(Text, Keyword), 'request'),
351 (r'\.', Punctuation, 'request'),
352 # Regular characters, slurp till we find a backslash or newline
353 (r'[^\\\n]+', Text, 'textline'),
354 default('textline'),
355 ],
356 'textline': [
357 include('escapes'),
358 (r'[^\\\n]+', Text),
359 (r'\n', Text, '#pop'),
360 ],
361 'escapes': [
362 # groff has many ways to write escapes.
363 (r'\\"[^\n]*', Comment),
364 (r'\\[fn]\w', String.Escape),
365 (r'\\\(.{2}', String.Escape),
366 (r'\\.\[.*\]', String.Escape),
367 (r'\\.', String.Escape),
368 (r'\\\n', Text, 'request'),
369 ],
370 'request': [
371 (r'\n', Text, '#pop'),
372 include('escapes'),
373 (r'"[^\n"]+"', String.Double),
374 (r'\d+', Number),
375 (r'\S+', String),
376 (r'\s+', Text),
377 ],
378 }
380 def analyse_text(text):
381 if text[:1] != '.':
382 return False
383 if text[:3] == '.\\"':
384 return True
385 if text[:4] == '.TH ':
386 return True
387 if text[1:3].isalnum() and text[3].isspace():
388 return 0.9
391class MozPreprocHashLexer(RegexLexer):
392 """
393 Lexer for Mozilla Preprocessor files (with '#' as the marker).
395 Other data is left untouched.
397 .. versionadded:: 2.0
398 """
399 name = 'mozhashpreproc'
400 aliases = [name]
401 filenames = []
402 mimetypes = []
404 tokens = {
405 'root': [
406 (r'^#', Comment.Preproc, ('expr', 'exprstart')),
407 (r'.+', Other),
408 ],
409 'exprstart': [
410 (r'(literal)(.*)', bygroups(Comment.Preproc, Text), '#pop:2'),
411 (words((
412 'define', 'undef', 'if', 'ifdef', 'ifndef', 'else', 'elif',
413 'elifdef', 'elifndef', 'endif', 'expand', 'filter', 'unfilter',
414 'include', 'includesubst', 'error')),
415 Comment.Preproc, '#pop'),
416 ],
417 'expr': [
418 (words(('!', '!=', '==', '&&', '||')), Operator),
419 (r'(defined)(\()', bygroups(Keyword, Punctuation)),
420 (r'\)', Punctuation),
421 (r'[0-9]+', Number.Decimal),
422 (r'__\w+?__', Name.Variable),
423 (r'@\w+?@', Name.Class),
424 (r'\w+', Name),
425 (r'\n', Text, '#pop'),
426 (r'\s+', Text),
427 (r'\S', Punctuation),
428 ],
429 }
432class MozPreprocPercentLexer(MozPreprocHashLexer):
433 """
434 Lexer for Mozilla Preprocessor files (with '%' as the marker).
436 Other data is left untouched.
438 .. versionadded:: 2.0
439 """
440 name = 'mozpercentpreproc'
441 aliases = [name]
442 filenames = []
443 mimetypes = []
445 tokens = {
446 'root': [
447 (r'^%', Comment.Preproc, ('expr', 'exprstart')),
448 (r'.+', Other),
449 ],
450 }
453class MozPreprocXulLexer(DelegatingLexer):
454 """
455 Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
456 `XmlLexer`.
458 .. versionadded:: 2.0
459 """
460 name = "XUL+mozpreproc"
461 aliases = ['xul+mozpreproc']
462 filenames = ['*.xul.in']
463 mimetypes = []
465 def __init__(self, **options):
466 super().__init__(XmlLexer, MozPreprocHashLexer, **options)
469class MozPreprocJavascriptLexer(DelegatingLexer):
470 """
471 Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
472 `JavascriptLexer`.
474 .. versionadded:: 2.0
475 """
476 name = "Javascript+mozpreproc"
477 aliases = ['javascript+mozpreproc']
478 filenames = ['*.js.in']
479 mimetypes = []
481 def __init__(self, **options):
482 super().__init__(JavascriptLexer, MozPreprocHashLexer, **options)
485class MozPreprocCssLexer(DelegatingLexer):
486 """
487 Subclass of the `MozPreprocHashLexer` that highlights unlexed data with the
488 `CssLexer`.
490 .. versionadded:: 2.0
491 """
492 name = "CSS+mozpreproc"
493 aliases = ['css+mozpreproc']
494 filenames = ['*.css.in']
495 mimetypes = []
497 def __init__(self, **options):
498 super().__init__(CssLexer, MozPreprocPercentLexer, **options)
501class MarkdownLexer(RegexLexer):
502 """
503 For Markdown markup.
505 .. versionadded:: 2.2
506 """
507 name = 'Markdown'
508 url = 'https://daringfireball.net/projects/markdown/'
509 aliases = ['markdown', 'md']
510 filenames = ['*.md', '*.markdown']
511 mimetypes = ["text/x-markdown"]
512 flags = re.MULTILINE
514 def _handle_codeblock(self, match):
515 from pygments.lexers import get_lexer_by_name
517 yield match.start('initial'), String.Backtick, match.group('initial')
518 yield match.start('lang'), String.Backtick, match.group('lang')
519 if match.group('afterlang') is not None:
520 yield match.start('whitespace'), Whitespace, match.group('whitespace')
521 yield match.start('extra'), Text, match.group('extra')
522 yield match.start('newline'), Whitespace, match.group('newline')
524 # lookup lexer if wanted and existing
525 lexer = None
526 if self.handlecodeblocks:
527 try:
528 lexer = get_lexer_by_name(match.group('lang').strip())
529 except ClassNotFound:
530 pass
531 code = match.group('code')
532 # no lexer for this language. handle it like it was a code block
533 if lexer is None:
534 yield match.start('code'), String, code
535 else:
536 # FIXME: aren't the offsets wrong?
537 yield from do_insertions([], lexer.get_tokens_unprocessed(code))
539 yield match.start('terminator'), String.Backtick, match.group('terminator')
541 tokens = {
542 'root': [
543 # heading with '#' prefix (atx-style)
544 (r'(^#[^#].+)(\n)', bygroups(Generic.Heading, Text)),
545 # subheading with '#' prefix (atx-style)
546 (r'(^#{2,6}[^#].+)(\n)', bygroups(Generic.Subheading, Text)),
547 # heading with '=' underlines (Setext-style)
548 (r'^(.+)(\n)(=+)(\n)', bygroups(Generic.Heading, Text, Generic.Heading, Text)),
549 # subheading with '-' underlines (Setext-style)
550 (r'^(.+)(\n)(-+)(\n)', bygroups(Generic.Subheading, Text, Generic.Subheading, Text)),
551 # task list
552 (r'^(\s*)([*-] )(\[[ xX]\])( .+\n)',
553 bygroups(Whitespace, Keyword, Keyword, using(this, state='inline'))),
554 # bulleted list
555 (r'^(\s*)([*-])(\s)(.+\n)',
556 bygroups(Whitespace, Keyword, Whitespace, using(this, state='inline'))),
557 # numbered list
558 (r'^(\s*)([0-9]+\.)( .+\n)',
559 bygroups(Whitespace, Keyword, using(this, state='inline'))),
560 # quote
561 (r'^(\s*>\s)(.+\n)', bygroups(Keyword, Generic.Emph)),
562 # code block fenced by 3 backticks
563 (r'^(\s*```\n[\w\W]*?^\s*```$\n)', String.Backtick),
564 # code block with language
565 # Some tools include extra stuff after the language name, just
566 # highlight that as text. For example: https://docs.enola.dev/use/execmd
567 (r'''(?x)
568 ^(?P<initial>\s*```)
569 (?P<lang>[\w\-]+)
570 (?P<afterlang>
571 (?P<whitespace>[^\S\n]+)
572 (?P<extra>.*))?
573 (?P<newline>\n)
574 (?P<code>(.|\n)*?)
575 (?P<terminator>^\s*```$\n)
576 ''',
577 _handle_codeblock),
579 include('inline'),
580 ],
581 'inline': [
582 # escape
583 (r'\\.', Text),
584 # inline code
585 (r'([^`]?)(`[^`\n]+`)', bygroups(Text, String.Backtick)),
586 # warning: the following rules eat outer tags.
587 # eg. **foo _bar_ baz** => foo and baz are not recognized as bold
588 # bold fenced by '**'
589 (r'([^\*]?)(\*\*[^* \n][^*\n]*\*\*)', bygroups(Text, Generic.Strong)),
590 # bold fenced by '__'
591 (r'([^_]?)(__[^_ \n][^_\n]*__)', bygroups(Text, Generic.Strong)),
592 # italics fenced by '*'
593 (r'([^\*]?)(\*[^* \n][^*\n]*\*)', bygroups(Text, Generic.Emph)),
594 # italics fenced by '_'
595 (r'([^_]?)(_[^_ \n][^_\n]*_)', bygroups(Text, Generic.Emph)),
596 # strikethrough
597 (r'([^~]?)(~~[^~ \n][^~\n]*~~)', bygroups(Text, Generic.Deleted)),
598 # mentions and topics (twitter and github stuff)
599 (r'[@#][\w/:]+', Name.Entity),
600 # (image?) links eg: 
601 (r'(!?\[)([^]]+)(\])(\()([^)]+)(\))',
602 bygroups(Text, Name.Tag, Text, Text, Name.Attribute, Text)),
603 # reference-style links, e.g.:
604 # [an example][id]
605 # [id]: http://example.com/
606 (r'(\[)([^]]+)(\])(\[)([^]]*)(\])',
607 bygroups(Text, Name.Tag, Text, Text, Name.Label, Text)),
608 (r'^(\s*\[)([^]]*)(\]:\s*)(.+)',
609 bygroups(Text, Name.Label, Text, Name.Attribute)),
611 # general text, must come last!
612 (r'[^\\\s]+', Text),
613 (r'.', Text),
614 ],
615 }
617 def __init__(self, **options):
618 self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
619 RegexLexer.__init__(self, **options)
622class TiddlyWiki5Lexer(RegexLexer):
623 """
624 For TiddlyWiki5 markup.
626 .. versionadded:: 2.7
627 """
628 name = 'tiddler'
629 url = 'https://tiddlywiki.com/#TiddlerFiles'
630 aliases = ['tid']
631 filenames = ['*.tid']
632 mimetypes = ["text/vnd.tiddlywiki"]
633 flags = re.MULTILINE
635 def _handle_codeblock(self, match):
636 """
637 match args: 1:backticks, 2:lang_name, 3:newline, 4:code, 5:backticks
638 """
639 from pygments.lexers import get_lexer_by_name
641 # section header
642 yield match.start(1), String, match.group(1)
643 yield match.start(2), String, match.group(2)
644 yield match.start(3), Text, match.group(3)
646 # lookup lexer if wanted and existing
647 lexer = None
648 if self.handlecodeblocks:
649 try:
650 lexer = get_lexer_by_name(match.group(2).strip())
651 except ClassNotFound:
652 pass
653 code = match.group(4)
655 # no lexer for this language. handle it like it was a code block
656 if lexer is None:
657 yield match.start(4), String, code
658 return
660 yield from do_insertions([], lexer.get_tokens_unprocessed(code))
662 yield match.start(5), String, match.group(5)
664 def _handle_cssblock(self, match):
665 """
666 match args: 1:style tag 2:newline, 3:code, 4:closing style tag
667 """
668 from pygments.lexers import get_lexer_by_name
670 # section header
671 yield match.start(1), String, match.group(1)
672 yield match.start(2), String, match.group(2)
674 lexer = None
675 if self.handlecodeblocks:
676 try:
677 lexer = get_lexer_by_name('css')
678 except ClassNotFound:
679 pass
680 code = match.group(3)
682 # no lexer for this language. handle it like it was a code block
683 if lexer is None:
684 yield match.start(3), String, code
685 return
687 yield from do_insertions([], lexer.get_tokens_unprocessed(code))
689 yield match.start(4), String, match.group(4)
691 tokens = {
692 'root': [
693 # title in metadata section
694 (r'^(title)(:\s)(.+\n)', bygroups(Keyword, Text, Generic.Heading)),
695 # headings
696 (r'^(!)([^!].+\n)', bygroups(Generic.Heading, Text)),
697 (r'^(!{2,6})(.+\n)', bygroups(Generic.Subheading, Text)),
698 # bulleted or numbered lists or single-line block quotes
699 # (can be mixed)
700 (r'^(\s*)([*#>]+)(\s*)(.+\n)',
701 bygroups(Text, Keyword, Text, using(this, state='inline'))),
702 # multi-line block quotes
703 (r'^(<<<.*\n)([\w\W]*?)(^<<<.*$)', bygroups(String, Text, String)),
704 # table header
705 (r'^(\|.*?\|h)$', bygroups(Generic.Strong)),
706 # table footer or caption
707 (r'^(\|.*?\|[cf])$', bygroups(Generic.Emph)),
708 # table class
709 (r'^(\|.*?\|k)$', bygroups(Name.Tag)),
710 # definitions
711 (r'^(;.*)$', bygroups(Generic.Strong)),
712 # text block
713 (r'^(```\n)([\w\W]*?)(^```$)', bygroups(String, Text, String)),
714 # code block with language
715 (r'^(```)(\w+)(\n)([\w\W]*?)(^```$)', _handle_codeblock),
716 # CSS style block
717 (r'^(<style>)(\n)([\w\W]*?)(^</style>$)', _handle_cssblock),
719 include('keywords'),
720 include('inline'),
721 ],
722 'keywords': [
723 (words((
724 '\\define', '\\end', 'caption', 'created', 'modified', 'tags',
725 'title', 'type'), prefix=r'^', suffix=r'\b'),
726 Keyword),
727 ],
728 'inline': [
729 # escape
730 (r'\\.', Text),
731 # created or modified date
732 (r'\d{17}', Number.Integer),
733 # italics
734 (r'(\s)(//[^/]+//)((?=\W|\n))',
735 bygroups(Text, Generic.Emph, Text)),
736 # superscript
737 (r'(\s)(\^\^[^\^]+\^\^)', bygroups(Text, Generic.Emph)),
738 # subscript
739 (r'(\s)(,,[^,]+,,)', bygroups(Text, Generic.Emph)),
740 # underscore
741 (r'(\s)(__[^_]+__)', bygroups(Text, Generic.Strong)),
742 # bold
743 (r"(\s)(''[^']+'')((?=\W|\n))",
744 bygroups(Text, Generic.Strong, Text)),
745 # strikethrough
746 (r'(\s)(~~[^~]+~~)((?=\W|\n))',
747 bygroups(Text, Generic.Deleted, Text)),
748 # TiddlyWiki variables
749 (r'<<[^>]+>>', Name.Tag),
750 (r'\$\$[^$]+\$\$', Name.Tag),
751 (r'\$\([^)]+\)\$', Name.Tag),
752 # TiddlyWiki style or class
753 (r'^@@.*$', Name.Tag),
754 # HTML tags
755 (r'</?[^>]+>', Name.Tag),
756 # inline code
757 (r'`[^`]+`', String.Backtick),
758 # HTML escaped symbols
759 (r'&\S*?;', String.Regex),
760 # Wiki links
761 (r'(\[{2})([^]\|]+)(\]{2})', bygroups(Text, Name.Tag, Text)),
762 # External links
763 (r'(\[{2})([^]\|]+)(\|)([^]\|]+)(\]{2})',
764 bygroups(Text, Name.Tag, Text, Name.Attribute, Text)),
765 # Transclusion
766 (r'(\{{2})([^}]+)(\}{2})', bygroups(Text, Name.Tag, Text)),
767 # URLs
768 (r'(\b.?.?tps?://[^\s"]+)', bygroups(Name.Attribute)),
770 # general text, must come last!
771 (r'[\w]+', Text),
772 (r'.', Text)
773 ],
774 }
776 def __init__(self, **options):
777 self.handlecodeblocks = get_bool_opt(options, 'handlecodeblocks', True)
778 RegexLexer.__init__(self, **options)
781class WikitextLexer(RegexLexer):
782 """
783 For MediaWiki Wikitext.
785 Parsing Wikitext is tricky, and results vary between different MediaWiki
786 installations, so we only highlight common syntaxes (built-in or from
787 popular extensions), and also assume templates produce no unbalanced
788 syntaxes.
790 .. versionadded:: 2.15
791 """
792 name = 'Wikitext'
793 url = 'https://www.mediawiki.org/wiki/Wikitext'
794 aliases = ['wikitext', 'mediawiki']
795 filenames = []
796 mimetypes = ['text/x-wiki']
797 flags = re.MULTILINE
799 def nowiki_tag_rules(tag_name):
800 return [
801 (r'(?i)(</)({})(\s*)(>)'.format(tag_name), bygroups(Punctuation,
802 Name.Tag, Whitespace, Punctuation), '#pop'),
803 include('entity'),
804 include('text'),
805 ]
807 def plaintext_tag_rules(tag_name):
808 return [
809 (r'(?si)(.*?)(</)({})(\s*)(>)'.format(tag_name), bygroups(Text,
810 Punctuation, Name.Tag, Whitespace, Punctuation), '#pop'),
811 ]
813 def delegate_tag_rules(tag_name, lexer):
814 return [
815 (r'(?i)(</)({})(\s*)(>)'.format(tag_name), bygroups(Punctuation,
816 Name.Tag, Whitespace, Punctuation), '#pop'),
817 (r'(?si).+?(?=</{}\s*>)'.format(tag_name), using(lexer)),
818 ]
820 def text_rules(token):
821 return [
822 (r'\w+', token),
823 (r'[^\S\n]+', token),
824 (r'(?s).', token),
825 ]
827 def handle_syntaxhighlight(self, match, ctx):
828 from pygments.lexers import get_lexer_by_name
830 attr_content = match.group()
831 start = 0
832 index = 0
833 while True:
834 index = attr_content.find('>', start)
835 # Exclude comment end (-->)
836 if attr_content[index-2:index] != '--':
837 break
838 start = index + 1
840 if index == -1:
841 # No tag end
842 yield from self.get_tokens_unprocessed(attr_content, stack=['root', 'attr'])
843 return
844 attr = attr_content[:index]
845 yield from self.get_tokens_unprocessed(attr, stack=['root', 'attr'])
846 yield match.start(3) + index, Punctuation, '>'
848 lexer = None
849 content = attr_content[index+1:]
850 lang_match = re.findall(r'\blang=("|\'|)(\w+)(\1)', attr)
852 if len(lang_match) >= 1:
853 # Pick the last match in case of multiple matches
854 lang = lang_match[-1][1]
855 try:
856 lexer = get_lexer_by_name(lang)
857 except ClassNotFound:
858 pass
860 if lexer is None:
861 yield match.start() + index + 1, Text, content
862 else:
863 yield from lexer.get_tokens_unprocessed(content)
865 def handle_score(self, match, ctx):
866 attr_content = match.group()
867 start = 0
868 index = 0
869 while True:
870 index = attr_content.find('>', start)
871 # Exclude comment end (-->)
872 if attr_content[index-2:index] != '--':
873 break
874 start = index + 1
876 if index == -1:
877 # No tag end
878 yield from self.get_tokens_unprocessed(attr_content, stack=['root', 'attr'])
879 return
880 attr = attr_content[:index]
881 content = attr_content[index+1:]
882 yield from self.get_tokens_unprocessed(attr, stack=['root', 'attr'])
883 yield match.start(3) + index, Punctuation, '>'
885 lang_match = re.findall(r'\blang=("|\'|)(\w+)(\1)', attr)
886 # Pick the last match in case of multiple matches
887 lang = lang_match[-1][1] if len(lang_match) >= 1 else 'lilypond'
889 if lang == 'lilypond': # Case sensitive
890 yield from LilyPondLexer().get_tokens_unprocessed(content)
891 else: # ABC
892 # FIXME: Use ABC lexer in the future
893 yield match.start() + index + 1, Text, content
895 # a-z removed to prevent linter from complaining, REMEMBER to use (?i)
896 title_char = r' %!"$&\'()*,\-./0-9:;=?@A-Z\\\^_`~+\u0080-\uFFFF'
897 nbsp_char = r'(?:\t| |&\#0*160;|&\#[Xx]0*[Aa]0;|[ \xA0\u1680\u2000-\u200A\u202F\u205F\u3000])'
898 link_address = r'(?:[0-9.]+|\[[0-9a-f:.]+\]|[^\x00-\x20"<>\[\]\x7F\xA0\u1680\u2000-\u200A\u202F\u205F\u3000\uFFFD])'
899 link_char_class = r'[^\x00-\x20"<>\[\]\x7F\xA0\u1680\u2000-\u200A\u202F\u205F\u3000\uFFFD]'
900 double_slashes_i = {
901 '__FORCETOC__', '__NOCONTENTCONVERT__', '__NOCC__', '__NOEDITSECTION__', '__NOGALLERY__',
902 '__NOTITLECONVERT__', '__NOTC__', '__NOTOC__', '__TOC__',
903 }
904 double_slashes = {
905 '__EXPECTUNUSEDCATEGORY__', '__HIDDENCAT__', '__INDEX__', '__NEWSECTIONLINK__',
906 '__NOINDEX__', '__NONEWSECTIONLINK__', '__STATICREDIRECT__', '__NOGLOBAL__',
907 '__DISAMBIG__', '__EXPECTED_UNCONNECTED_PAGE__',
908 }
909 protocols = {
910 'bitcoin:', 'ftp://', 'ftps://', 'geo:', 'git://', 'gopher://', 'http://', 'https://',
911 'irc://', 'ircs://', 'magnet:', 'mailto:', 'mms://', 'news:', 'nntp://', 'redis://',
912 'sftp://', 'sip:', 'sips:', 'sms:', 'ssh://', 'svn://', 'tel:', 'telnet://', 'urn:',
913 'worldwind://', 'xmpp:', '//',
914 }
915 non_relative_protocols = protocols - {'//'}
916 html_tags = {
917 'abbr', 'b', 'bdi', 'bdo', 'big', 'blockquote', 'br', 'caption', 'center', 'cite', 'code',
918 'data', 'dd', 'del', 'dfn', 'div', 'dl', 'dt', 'em', 'font', 'h1', 'h2', 'h3', 'h4', 'h5',
919 'h6', 'hr', 'i', 'ins', 'kbd', 'li', 'link', 'mark', 'meta', 'ol', 'p', 'q', 'rb', 'rp',
920 'rt', 'rtc', 'ruby', 's', 'samp', 'small', 'span', 'strike', 'strong', 'sub', 'sup',
921 'table', 'td', 'th', 'time', 'tr', 'tt', 'u', 'ul', 'var', 'wbr',
922 }
923 parser_tags = {
924 'graph', 'charinsert', 'rss', 'chem', 'categorytree', 'nowiki', 'inputbox', 'math',
925 'hiero', 'score', 'pre', 'ref', 'translate', 'imagemap', 'templatestyles', 'languages',
926 'noinclude', 'mapframe', 'section', 'poem', 'syntaxhighlight', 'includeonly', 'tvar',
927 'onlyinclude', 'templatedata', 'langconvert', 'timeline', 'dynamicpagelist', 'gallery',
928 'maplink', 'ce', 'references',
929 }
930 variant_langs = {
931 # ZhConverter.php
932 'zh', 'zh-hans', 'zh-hant', 'zh-cn', 'zh-hk', 'zh-mo', 'zh-my', 'zh-sg', 'zh-tw',
933 # UnConverter.php
934 'uz', 'uz-latn', 'uz-cyrl',
935 # TlyConverter.php
936 'tly', 'tly-cyrl',
937 # TgConverter.php
938 'tg', 'tg-latn',
939 # SrConverter.php
940 'sr', 'sr-ec', 'sr-el',
941 # ShiConverter.php
942 'shi', 'shi-tfng', 'shi-latn',
943 # ShConverter.php
944 'sh-latn', 'sh-cyrl',
945 # KuConverter.php
946 'ku', 'ku-arab', 'ku-latn',
947 # KkConverter.php
948 'kk', 'kk-cyrl', 'kk-latn', 'kk-arab', 'kk-kz', 'kk-tr', 'kk-cn',
949 # IuConverter.php
950 'iu', 'ike-cans', 'ike-latn',
951 # GanConverter.php
952 'gan', 'gan-hans', 'gan-hant',
953 # EnConverter.php
954 'en', 'en-x-piglatin',
955 # CrhConverter.php
956 'crh', 'crh-cyrl', 'crh-latn',
957 # BanConverter.php
958 'ban', 'ban-bali', 'ban-x-dharma', 'ban-x-palmleaf', 'ban-x-pku',
959 }
960 magic_vars_i = {
961 'ARTICLEPATH', 'INT', 'PAGEID', 'SCRIPTPATH', 'SERVER', 'SERVERNAME', 'STYLEPATH',
962 }
963 magic_vars = {
964 '!', '=', 'BASEPAGENAME', 'BASEPAGENAMEE', 'CASCADINGSOURCES', 'CONTENTLANGUAGE',
965 'CONTENTLANG', 'CURRENTDAY', 'CURRENTDAY2', 'CURRENTDAYNAME', 'CURRENTDOW', 'CURRENTHOUR',
966 'CURRENTMONTH', 'CURRENTMONTH2', 'CURRENTMONTH1', 'CURRENTMONTHABBREV', 'CURRENTMONTHNAME',
967 'CURRENTMONTHNAMEGEN', 'CURRENTTIME', 'CURRENTTIMESTAMP', 'CURRENTVERSION', 'CURRENTWEEK',
968 'CURRENTYEAR', 'DIRECTIONMARK', 'DIRMARK', 'FULLPAGENAME', 'FULLPAGENAMEE', 'LOCALDAY',
969 'LOCALDAY2', 'LOCALDAYNAME', 'LOCALDOW', 'LOCALHOUR', 'LOCALMONTH', 'LOCALMONTH2',
970 'LOCALMONTH1', 'LOCALMONTHABBREV', 'LOCALMONTHNAME', 'LOCALMONTHNAMEGEN', 'LOCALTIME',
971 'LOCALTIMESTAMP', 'LOCALWEEK', 'LOCALYEAR', 'NAMESPACE', 'NAMESPACEE', 'NAMESPACENUMBER',
972 'NUMBEROFACTIVEUSERS', 'NUMBEROFADMINS', 'NUMBEROFARTICLES', 'NUMBEROFEDITS',
973 'NUMBEROFFILES', 'NUMBEROFPAGES', 'NUMBEROFUSERS', 'PAGELANGUAGE', 'PAGENAME', 'PAGENAMEE',
974 'REVISIONDAY', 'REVISIONDAY2', 'REVISIONID', 'REVISIONMONTH', 'REVISIONMONTH1',
975 'REVISIONSIZE', 'REVISIONTIMESTAMP', 'REVISIONUSER', 'REVISIONYEAR', 'ROOTPAGENAME',
976 'ROOTPAGENAMEE', 'SITENAME', 'SUBJECTPAGENAME', 'ARTICLEPAGENAME', 'SUBJECTPAGENAMEE',
977 'ARTICLEPAGENAMEE', 'SUBJECTSPACE', 'ARTICLESPACE', 'SUBJECTSPACEE', 'ARTICLESPACEE',
978 'SUBPAGENAME', 'SUBPAGENAMEE', 'TALKPAGENAME', 'TALKPAGENAMEE', 'TALKSPACE', 'TALKSPACEE',
979 }
980 parser_functions_i = {
981 'ANCHORENCODE', 'BIDI', 'CANONICALURL', 'CANONICALURLE', 'FILEPATH', 'FORMATNUM',
982 'FULLURL', 'FULLURLE', 'GENDER', 'GRAMMAR', 'INT', r'\#LANGUAGE', 'LC', 'LCFIRST', 'LOCALURL',
983 'LOCALURLE', 'NS', 'NSE', 'PADLEFT', 'PADRIGHT', 'PAGEID', 'PLURAL', 'UC', 'UCFIRST',
984 'URLENCODE',
985 }
986 parser_functions = {
987 'BASEPAGENAME', 'BASEPAGENAMEE', 'CASCADINGSOURCES', 'DEFAULTSORT', 'DEFAULTSORTKEY',
988 'DEFAULTCATEGORYSORT', 'FULLPAGENAME', 'FULLPAGENAMEE', 'NAMESPACE', 'NAMESPACEE',
989 'NAMESPACENUMBER', 'NUMBERINGROUP', 'NUMINGROUP', 'NUMBEROFACTIVEUSERS', 'NUMBEROFADMINS',
990 'NUMBEROFARTICLES', 'NUMBEROFEDITS', 'NUMBEROFFILES', 'NUMBEROFPAGES', 'NUMBEROFUSERS',
991 'PAGENAME', 'PAGENAMEE', 'PAGESINCATEGORY', 'PAGESINCAT', 'PAGESIZE', 'PROTECTIONEXPIRY',
992 'PROTECTIONLEVEL', 'REVISIONDAY', 'REVISIONDAY2', 'REVISIONID', 'REVISIONMONTH',
993 'REVISIONMONTH1', 'REVISIONTIMESTAMP', 'REVISIONUSER', 'REVISIONYEAR', 'ROOTPAGENAME',
994 'ROOTPAGENAMEE', 'SUBJECTPAGENAME', 'ARTICLEPAGENAME', 'SUBJECTPAGENAMEE',
995 'ARTICLEPAGENAMEE', 'SUBJECTSPACE', 'ARTICLESPACE', 'SUBJECTSPACEE', 'ARTICLESPACEE',
996 'SUBPAGENAME', 'SUBPAGENAMEE', 'TALKPAGENAME', 'TALKPAGENAMEE', 'TALKSPACE', 'TALKSPACEE',
997 'INT', 'DISPLAYTITLE', 'PAGESINNAMESPACE', 'PAGESINNS',
998 }
1000 tokens = {
1001 'root': [
1002 # Redirects
1003 (r"""(?xi)
1004 (\A\s*?)(\#REDIRECT:?) # may contain a colon
1005 (\s+)(\[\[) (?=[^\]\n]* \]\]$)
1006 """,
1007 bygroups(Whitespace, Keyword, Whitespace, Punctuation), 'redirect-inner'),
1008 # Subheadings
1009 (r'^(={2,6})(.+?)(\1)(\s*$\n)',
1010 bygroups(Generic.Subheading, Generic.Subheading, Generic.Subheading, Whitespace)),
1011 # Headings
1012 (r'^(=.+?=)(\s*$\n)',
1013 bygroups(Generic.Heading, Whitespace)),
1014 # Double-slashed magic words
1015 (words(double_slashes_i, prefix=r'(?i)'), Name.Function.Magic),
1016 (words(double_slashes), Name.Function.Magic),
1017 # Raw URLs
1018 (r'(?i)\b(?:{}){}{}*'.format('|'.join(protocols),
1019 link_address, link_char_class), Name.Label),
1020 # Magic links
1021 (r'\b(?:RFC|PMID){}+[0-9]+\b'.format(nbsp_char),
1022 Name.Function.Magic),
1023 (r"""(?x)
1024 \bISBN {nbsp_char}
1025 (?: 97[89] {nbsp_dash}? )?
1026 (?: [0-9] {nbsp_dash}? ){{9}} # escape format()
1027 [0-9Xx]\b
1028 """.format(nbsp_char=nbsp_char, nbsp_dash=f'(?:-|{nbsp_char})'), Name.Function.Magic),
1029 include('list'),
1030 include('inline'),
1031 include('text'),
1032 ],
1033 'redirect-inner': [
1034 (r'(\]\])(\s*?\n)', bygroups(Punctuation, Whitespace), '#pop'),
1035 (r'(\#)([^#]*?)', bygroups(Punctuation, Name.Label)),
1036 (r'(?i)[{}]+'.format(title_char), Name.Tag),
1037 ],
1038 'list': [
1039 # Description lists
1040 (r'^;', Keyword, 'dt'),
1041 # Ordered lists, unordered lists and indents
1042 (r'^[#:*]+', Keyword),
1043 # Horizontal rules
1044 (r'^-{4,}', Keyword),
1045 ],
1046 'inline': [
1047 # Signatures
1048 (r'~{3,5}', Keyword),
1049 # Entities
1050 include('entity'),
1051 # Bold & italic
1052 (r"('')(''')(?!')", bygroups(Generic.Emph,
1053 Generic.EmphStrong), 'inline-italic-bold'),
1054 (r"'''(?!')", Generic.Strong, 'inline-bold'),
1055 (r"''(?!')", Generic.Emph, 'inline-italic'),
1056 # Comments & parameters & templates
1057 include('replaceable'),
1058 # Media links
1059 (
1060 r"""(?xi)
1061 (\[\[)
1062 (File|Image) (:)
1063 ((?: [%s] | \{{2,3}[^{}]*?\}{2,3} | <!--[\s\S]*?--> )*)
1064 (?: (\#) ([%s]*?) )?
1065 """ % (title_char, f'{title_char}#'),
1066 bygroups(Punctuation, Name.Namespace, Punctuation,
1067 using(this, state=['wikilink-name']), Punctuation, Name.Label),
1068 'medialink-inner'
1069 ),
1070 # Wikilinks
1071 (
1072 r"""(?xi)
1073 (\[\[)(?!%s) # Should not contain URLs
1074 (?: ([%s]*) (:))?
1075 ((?: [%s] | \{{2,3}[^{}]*?\}{2,3} | <!--[\s\S]*?--> )*?)
1076 (?: (\#) ([%s]*?) )?
1077 (\]\])
1078 """ % ('|'.join(protocols), title_char.replace('/', ''),
1079 title_char, f'{title_char}#'),
1080 bygroups(Punctuation, Name.Namespace, Punctuation,
1081 using(this, state=['wikilink-name']), Punctuation, Name.Label, Punctuation)
1082 ),
1083 (
1084 r"""(?xi)
1085 (\[\[)(?!%s)
1086 (?: ([%s]*) (:))?
1087 ((?: [%s] | \{{2,3}[^{}]*?\}{2,3} | <!--[\s\S]*?--> )*?)
1088 (?: (\#) ([%s]*?) )?
1089 (\|)
1090 """ % ('|'.join(protocols), title_char.replace('/', ''),
1091 title_char, f'{title_char}#'),
1092 bygroups(Punctuation, Name.Namespace, Punctuation,
1093 using(this, state=['wikilink-name']), Punctuation, Name.Label, Punctuation),
1094 'wikilink-inner'
1095 ),
1096 # External links
1097 (
1098 r"""(?xi)
1099 (\[)
1100 ((?:{}) {} {}*)
1101 (\s*)
1102 """.format('|'.join(protocols), link_address, link_char_class),
1103 bygroups(Punctuation, Name.Label, Whitespace),
1104 'extlink-inner'
1105 ),
1106 # Tables
1107 (r'^(:*)(\s*?)(\{\|)([^\n]*)$', bygroups(Keyword,
1108 Whitespace, Punctuation, using(this, state=['root', 'attr'])), 'table'),
1109 # HTML tags
1110 (r'(?i)(<)({})\b'.format('|'.join(html_tags)),
1111 bygroups(Punctuation, Name.Tag), 'tag-inner-ordinary'),
1112 (r'(?i)(</)({})\b(\s*)(>)'.format('|'.join(html_tags)),
1113 bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
1114 # <nowiki>
1115 (r'(?i)(<)(nowiki)\b', bygroups(Punctuation,
1116 Name.Tag), ('tag-nowiki', 'tag-inner')),
1117 # <pre>
1118 (r'(?i)(<)(pre)\b', bygroups(Punctuation,
1119 Name.Tag), ('tag-pre', 'tag-inner')),
1120 # <categorytree>
1121 (r'(?i)(<)(categorytree)\b', bygroups(
1122 Punctuation, Name.Tag), ('tag-categorytree', 'tag-inner')),
1123 # <hiero>
1124 (r'(?i)(<)(hiero)\b', bygroups(Punctuation,
1125 Name.Tag), ('tag-hiero', 'tag-inner')),
1126 # <math>
1127 (r'(?i)(<)(math)\b', bygroups(Punctuation,
1128 Name.Tag), ('tag-math', 'tag-inner')),
1129 # <chem>
1130 (r'(?i)(<)(chem)\b', bygroups(Punctuation,
1131 Name.Tag), ('tag-chem', 'tag-inner')),
1132 # <ce>
1133 (r'(?i)(<)(ce)\b', bygroups(Punctuation,
1134 Name.Tag), ('tag-ce', 'tag-inner')),
1135 # <charinsert>
1136 (r'(?i)(<)(charinsert)\b', bygroups(
1137 Punctuation, Name.Tag), ('tag-charinsert', 'tag-inner')),
1138 # <templatedata>
1139 (r'(?i)(<)(templatedata)\b', bygroups(
1140 Punctuation, Name.Tag), ('tag-templatedata', 'tag-inner')),
1141 # <gallery>
1142 (r'(?i)(<)(gallery)\b', bygroups(
1143 Punctuation, Name.Tag), ('tag-gallery', 'tag-inner')),
1144 # <graph>
1145 (r'(?i)(<)(gallery)\b', bygroups(
1146 Punctuation, Name.Tag), ('tag-graph', 'tag-inner')),
1147 # <dynamicpagelist>
1148 (r'(?i)(<)(dynamicpagelist)\b', bygroups(
1149 Punctuation, Name.Tag), ('tag-dynamicpagelist', 'tag-inner')),
1150 # <inputbox>
1151 (r'(?i)(<)(inputbox)\b', bygroups(
1152 Punctuation, Name.Tag), ('tag-inputbox', 'tag-inner')),
1153 # <rss>
1154 (r'(?i)(<)(rss)\b', bygroups(
1155 Punctuation, Name.Tag), ('tag-rss', 'tag-inner')),
1156 # <imagemap>
1157 (r'(?i)(<)(imagemap)\b', bygroups(
1158 Punctuation, Name.Tag), ('tag-imagemap', 'tag-inner')),
1159 # <syntaxhighlight>
1160 (r'(?i)(</)(syntaxhighlight)\b(\s*)(>)',
1161 bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
1162 (r'(?si)(<)(syntaxhighlight)\b([^>]*?(?<!/)>.*?)(?=</\2\s*>)',
1163 bygroups(Punctuation, Name.Tag, handle_syntaxhighlight)),
1164 # <syntaxhighlight>: Fallback case for self-closing tags
1165 (r'(?i)(<)(syntaxhighlight)\b(\s*?)((?:[^>]|-->)*?)(/\s*?(?<!--)>)', bygroups(
1166 Punctuation, Name.Tag, Whitespace, using(this, state=['root', 'attr']), Punctuation)),
1167 # <source>
1168 (r'(?i)(</)(source)\b(\s*)(>)',
1169 bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
1170 (r'(?si)(<)(source)\b([^>]*?(?<!/)>.*?)(?=</\2\s*>)',
1171 bygroups(Punctuation, Name.Tag, handle_syntaxhighlight)),
1172 # <source>: Fallback case for self-closing tags
1173 (r'(?i)(<)(source)\b(\s*?)((?:[^>]|-->)*?)(/\s*?(?<!--)>)', bygroups(
1174 Punctuation, Name.Tag, Whitespace, using(this, state=['root', 'attr']), Punctuation)),
1175 # <score>
1176 (r'(?i)(</)(score)\b(\s*)(>)',
1177 bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
1178 (r'(?si)(<)(score)\b([^>]*?(?<!/)>.*?)(?=</\2\s*>)',
1179 bygroups(Punctuation, Name.Tag, handle_score)),
1180 # <score>: Fallback case for self-closing tags
1181 (r'(?i)(<)(score)\b(\s*?)((?:[^>]|-->)*?)(/\s*?(?<!--)>)', bygroups(
1182 Punctuation, Name.Tag, Whitespace, using(this, state=['root', 'attr']), Punctuation)),
1183 # Other parser tags
1184 (r'(?i)(<)({})\b'.format('|'.join(parser_tags)),
1185 bygroups(Punctuation, Name.Tag), 'tag-inner-ordinary'),
1186 (r'(?i)(</)({})\b(\s*)(>)'.format('|'.join(parser_tags)),
1187 bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
1188 # LanguageConverter markups
1189 (
1190 r"""(?xi)
1191 (-\{{) # Escape format()
1192 (?: ([^|]) (\|))?
1193 (?: (\s* (?:{variants}) \s*) (=>))?
1194 (\s* (?:{variants}) \s*) (:)
1195 """.format(variants='|'.join(variant_langs)),
1196 bygroups(Punctuation, Keyword, Punctuation,
1197 Name.Label, Operator, Name.Label, Punctuation),
1198 'lc-inner'
1199 ),
1200 (r'-\{(?!\{)', Punctuation, 'lc-raw'),
1201 ],
1202 'wikilink-name': [
1203 include('replaceable'),
1204 (r'[^{<]+', Name.Tag),
1205 (r'(?s).', Name.Tag),
1206 ],
1207 'wikilink-inner': [
1208 # Quit in case of another wikilink
1209 (r'(?=\[\[)', Punctuation, '#pop'),
1210 (r'\]\]', Punctuation, '#pop'),
1211 include('inline'),
1212 include('text'),
1213 ],
1214 'medialink-inner': [
1215 (r'\]\]', Punctuation, '#pop'),
1216 (r'(\|)([^\n=|]*)(=)',
1217 bygroups(Punctuation, Name.Attribute, Operator)),
1218 (r'\|', Punctuation),
1219 include('inline'),
1220 include('text'),
1221 ],
1222 'quote-common': [
1223 # Quit in case of link/template endings
1224 (r'(?=\]\]|\{\{|\}\})', Punctuation, '#pop'),
1225 (r'\n', Text, '#pop'),
1226 ],
1227 'inline-italic': [
1228 include('quote-common'),
1229 (r"('')(''')(?!')", bygroups(Generic.Emph,
1230 Generic.Strong), ('#pop', 'inline-bold')),
1231 (r"'''(?!')", Generic.EmphStrong, ('#pop', 'inline-italic-bold')),
1232 (r"''(?!')", Generic.Emph, '#pop'),
1233 include('inline'),
1234 include('text-italic'),
1235 ],
1236 'inline-bold': [
1237 include('quote-common'),
1238 (r"(''')('')(?!')", bygroups(
1239 Generic.Strong, Generic.Emph), ('#pop', 'inline-italic')),
1240 (r"'''(?!')", Generic.Strong, '#pop'),
1241 (r"''(?!')", Generic.EmphStrong, ('#pop', 'inline-bold-italic')),
1242 include('inline'),
1243 include('text-bold'),
1244 ],
1245 'inline-bold-italic': [
1246 include('quote-common'),
1247 (r"('')(''')(?!')", bygroups(Generic.EmphStrong,
1248 Generic.Strong), '#pop'),
1249 (r"'''(?!')", Generic.EmphStrong, ('#pop', 'inline-italic')),
1250 (r"''(?!')", Generic.EmphStrong, ('#pop', 'inline-bold')),
1251 include('inline'),
1252 include('text-bold-italic'),
1253 ],
1254 'inline-italic-bold': [
1255 include('quote-common'),
1256 (r"(''')('')(?!')", bygroups(
1257 Generic.EmphStrong, Generic.Emph), '#pop'),
1258 (r"'''(?!')", Generic.EmphStrong, ('#pop', 'inline-italic')),
1259 (r"''(?!')", Generic.EmphStrong, ('#pop', 'inline-bold')),
1260 include('inline'),
1261 include('text-bold-italic'),
1262 ],
1263 'lc-inner': [
1264 (
1265 r"""(?xi)
1266 (;)
1267 (?: (\s* (?:{variants}) \s*) (=>))?
1268 (\s* (?:{variants}) \s*) (:)
1269 """.format(variants='|'.join(variant_langs)),
1270 bygroups(Punctuation, Name.Label,
1271 Operator, Name.Label, Punctuation)
1272 ),
1273 (r';?\s*?\}-', Punctuation, '#pop'),
1274 include('inline'),
1275 include('text'),
1276 ],
1277 'lc-raw': [
1278 (r'\}-', Punctuation, '#pop'),
1279 include('inline'),
1280 include('text'),
1281 ],
1282 'replaceable': [
1283 # Comments
1284 (r'<!--[\s\S]*?(?:-->|\Z)', Comment.Multiline),
1285 # Parameters
1286 (
1287 r"""(?x)
1288 (\{{3})
1289 ([^|]*?)
1290 (?=\}{3}|\|)
1291 """,
1292 bygroups(Punctuation, Name.Variable),
1293 'parameter-inner',
1294 ),
1295 # Magic variables
1296 (r'(?i)(\{\{)(\s*)(%s)(\s*)(\}\})' % '|'.join(magic_vars_i),
1297 bygroups(Punctuation, Whitespace, Name.Function, Whitespace, Punctuation)),
1298 (r'(\{\{)(\s*)(%s)(\s*)(\}\})' % '|'.join(magic_vars),
1299 bygroups(Punctuation, Whitespace, Name.Function, Whitespace, Punctuation)),
1300 # Parser functions & templates
1301 (r'\{\{', Punctuation, 'template-begin-space'),
1302 # <tvar> legacy syntax
1303 (r'(?i)(<)(tvar)\b(\|)([^>]*?)(>)', bygroups(Punctuation,
1304 Name.Tag, Punctuation, String, Punctuation)),
1305 (r'</>', Punctuation, '#pop'),
1306 # <tvar>
1307 (r'(?i)(<)(tvar)\b', bygroups(Punctuation, Name.Tag), 'tag-inner-ordinary'),
1308 (r'(?i)(</)(tvar)\b(\s*)(>)',
1309 bygroups(Punctuation, Name.Tag, Whitespace, Punctuation)),
1310 ],
1311 'parameter-inner': [
1312 (r'\}{3}', Punctuation, '#pop'),
1313 (r'\|', Punctuation),
1314 include('inline'),
1315 include('text'),
1316 ],
1317 'template-begin-space': [
1318 # Templates allow line breaks at the beginning, and due to how MediaWiki handles
1319 # comments, an extra state is required to handle things like {{\n<!---->\n name}}
1320 (r'<!--[\s\S]*?(?:-->|\Z)', Comment.Multiline),
1321 (r'\s+', Whitespace),
1322 # Parser functions
1323 (
1324 r'(?i)(\#[%s]*?|%s)(:)' % (title_char,
1325 '|'.join(parser_functions_i)),
1326 bygroups(Name.Function, Punctuation), ('#pop', 'template-inner')
1327 ),
1328 (
1329 r'(%s)(:)' % ('|'.join(parser_functions)),
1330 bygroups(Name.Function, Punctuation), ('#pop', 'template-inner')
1331 ),
1332 # Templates
1333 (
1334 r'(?i)([%s]*?)(:)' % title_char,
1335 bygroups(Name.Namespace, Punctuation), ('#pop', 'template-name')
1336 ),
1337 default(('#pop', 'template-name'),),
1338 ],
1339 'template-name': [
1340 (r'(\s*?)(\|)', bygroups(Text, Punctuation), ('#pop', 'template-inner')),
1341 (r'\}\}', Punctuation, '#pop'),
1342 (r'\n', Text, '#pop'),
1343 include('replaceable'),
1344 *text_rules(Name.Tag),
1345 ],
1346 'template-inner': [
1347 (r'\}\}', Punctuation, '#pop'),
1348 (r'\|', Punctuation),
1349 (
1350 r"""(?x)
1351 (?<=\|)
1352 ( (?: (?! \{\{ | \}\} )[^=\|<])*? ) # Exclude templates and tags
1353 (=)
1354 """,
1355 bygroups(Name.Label, Operator)
1356 ),
1357 include('inline'),
1358 include('text'),
1359 ],
1360 'table': [
1361 # Use [ \t\n\r\0\x0B] instead of \s to follow PHP trim() behavior
1362 # Endings
1363 (r'^([ \t\n\r\0\x0B]*?)(\|\})',
1364 bygroups(Whitespace, Punctuation), '#pop'),
1365 # Table rows
1366 (r'^([ \t\n\r\0\x0B]*?)(\|-+)(.*)$', bygroups(Whitespace, Punctuation,
1367 using(this, state=['root', 'attr']))),
1368 # Captions
1369 (
1370 r"""(?x)
1371 ^([ \t\n\r\0\x0B]*?)(\|\+)
1372 # Exclude links, template and tags
1373 (?: ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )(\|) )?
1374 (.*?)$
1375 """,
1376 bygroups(Whitespace, Punctuation, using(this, state=[
1377 'root', 'attr']), Punctuation, Generic.Heading),
1378 ),
1379 # Table data
1380 (
1381 r"""(?x)
1382 ( ^(?:[ \t\n\r\0\x0B]*?)\| | \|\| )
1383 (?: ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )(\|)(?!\|) )?
1384 """,
1385 bygroups(Punctuation, using(this, state=[
1386 'root', 'attr']), Punctuation),
1387 ),
1388 # Table headers
1389 (
1390 r"""(?x)
1391 ( ^(?:[ \t\n\r\0\x0B]*?)! )
1392 (?: ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )(\|)(?!\|) )?
1393 """,
1394 bygroups(Punctuation, using(this, state=[
1395 'root', 'attr']), Punctuation),
1396 'table-header',
1397 ),
1398 include('list'),
1399 include('inline'),
1400 include('text'),
1401 ],
1402 'table-header': [
1403 # Requires another state for || handling inside headers
1404 (r'\n', Text, '#pop'),
1405 (
1406 r"""(?x)
1407 (!!|\|\|)
1408 (?:
1409 ( (?: (?! \[\[ | \{\{ )[^|\n<] )*? )
1410 (\|)(?!\|)
1411 )?
1412 """,
1413 bygroups(Punctuation, using(this, state=[
1414 'root', 'attr']), Punctuation)
1415 ),
1416 *text_rules(Generic.Subheading),
1417 ],
1418 'entity': [
1419 (r'&\S*?;', Name.Entity),
1420 ],
1421 'dt': [
1422 (r'\n', Text, '#pop'),
1423 include('inline'),
1424 (r':', Keyword, '#pop'),
1425 include('text'),
1426 ],
1427 'extlink-inner': [
1428 (r'\]', Punctuation, '#pop'),
1429 include('inline'),
1430 include('text'),
1431 ],
1432 'nowiki-ish': [
1433 include('entity'),
1434 include('text'),
1435 ],
1436 'attr': [
1437 include('replaceable'),
1438 (r'\s+', Whitespace),
1439 (r'(=)(\s*)(")', bygroups(Operator, Whitespace, String.Double), 'attr-val-2'),
1440 (r"(=)(\s*)(')", bygroups(Operator, Whitespace, String.Single), 'attr-val-1'),
1441 (r'(=)(\s*)', bygroups(Operator, Whitespace), 'attr-val-0'),
1442 (r'[\w:-]+', Name.Attribute),
1444 ],
1445 'attr-val-0': [
1446 (r'\s', Whitespace, '#pop'),
1447 include('replaceable'),
1448 *text_rules(String),
1449 ],
1450 'attr-val-1': [
1451 (r"'", String.Single, '#pop'),
1452 include('replaceable'),
1453 *text_rules(String.Single),
1454 ],
1455 'attr-val-2': [
1456 (r'"', String.Double, '#pop'),
1457 include('replaceable'),
1458 *text_rules(String.Double),
1459 ],
1460 'tag-inner-ordinary': [
1461 (r'/?\s*>', Punctuation, '#pop'),
1462 include('tag-attr'),
1463 ],
1464 'tag-inner': [
1465 # Return to root state for self-closing tags
1466 (r'/\s*>', Punctuation, '#pop:2'),
1467 (r'\s*>', Punctuation, '#pop'),
1468 include('tag-attr'),
1469 ],
1470 # There states below are just like their non-tag variants, the key difference is
1471 # they forcibly quit when encountering tag closing markup
1472 'tag-attr': [
1473 include('replaceable'),
1474 (r'\s+', Whitespace),
1475 (r'(=)(\s*)(")', bygroups(Operator,
1476 Whitespace, String.Double), 'tag-attr-val-2'),
1477 (r"(=)(\s*)(')", bygroups(Operator,
1478 Whitespace, String.Single), 'tag-attr-val-1'),
1479 (r'(=)(\s*)', bygroups(Operator, Whitespace), 'tag-attr-val-0'),
1480 (r'[\w:-]+', Name.Attribute),
1482 ],
1483 'tag-attr-val-0': [
1484 (r'\s', Whitespace, '#pop'),
1485 (r'/?>', Punctuation, '#pop:2'),
1486 include('replaceable'),
1487 *text_rules(String),
1488 ],
1489 'tag-attr-val-1': [
1490 (r"'", String.Single, '#pop'),
1491 (r'/?>', Punctuation, '#pop:2'),
1492 include('replaceable'),
1493 *text_rules(String.Single),
1494 ],
1495 'tag-attr-val-2': [
1496 (r'"', String.Double, '#pop'),
1497 (r'/?>', Punctuation, '#pop:2'),
1498 include('replaceable'),
1499 *text_rules(String.Double),
1500 ],
1501 'tag-nowiki': nowiki_tag_rules('nowiki'),
1502 'tag-pre': nowiki_tag_rules('pre'),
1503 'tag-categorytree': plaintext_tag_rules('categorytree'),
1504 'tag-dynamicpagelist': plaintext_tag_rules('dynamicpagelist'),
1505 'tag-hiero': plaintext_tag_rules('hiero'),
1506 'tag-inputbox': plaintext_tag_rules('inputbox'),
1507 'tag-imagemap': plaintext_tag_rules('imagemap'),
1508 'tag-charinsert': plaintext_tag_rules('charinsert'),
1509 'tag-timeline': plaintext_tag_rules('timeline'),
1510 'tag-gallery': plaintext_tag_rules('gallery'),
1511 'tag-graph': plaintext_tag_rules('graph'),
1512 'tag-rss': plaintext_tag_rules('rss'),
1513 'tag-math': delegate_tag_rules('math', TexLexer),
1514 'tag-chem': delegate_tag_rules('chem', TexLexer),
1515 'tag-ce': delegate_tag_rules('ce', TexLexer),
1516 'tag-templatedata': delegate_tag_rules('templatedata', JsonLexer),
1517 'text-italic': text_rules(Generic.Emph),
1518 'text-bold': text_rules(Generic.Strong),
1519 'text-bold-italic': text_rules(Generic.EmphStrong),
1520 'text': text_rules(Text),
1521 }