1"""
2 pygments.lexers.html
3 ~~~~~~~~~~~~~~~~~~~~
4
5 Lexers for HTML, XML and related markup.
6
7 :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
10
11import re
12
13from pygments.lexer import RegexLexer, ExtendedRegexLexer, include, bygroups, \
14 default, using, inherit, this
15from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
16 Punctuation, Whitespace
17from pygments.util import looks_like_xml, html_doctype_matches
18
19from pygments.lexers.javascript import JavascriptLexer
20from pygments.lexers.jvm import ScalaLexer
21from pygments.lexers.css import CssLexer, _indentation, _starts_block
22from pygments.lexers.ruby import RubyLexer
23
24__all__ = ['HtmlLexer', 'DtdLexer', 'XmlLexer', 'XsltLexer', 'HamlLexer',
25 'ScamlLexer', 'PugLexer', 'VueLexer', 'UrlEncodedLexer']
26
27
28class HtmlLexer(RegexLexer):
29 """
30 For HTML 4 and XHTML 1 markup. Nested JavaScript and CSS is highlighted
31 by the appropriate lexer.
32 """
33
34 name = 'HTML'
35 url = 'https://html.spec.whatwg.org/'
36 aliases = ['html']
37 filenames = ['*.html', '*.htm', '*.xhtml', '*.xslt']
38 mimetypes = ['text/html', 'application/xhtml+xml']
39 version_added = ''
40
41 flags = re.IGNORECASE | re.DOTALL
42 tokens = {
43 'root': [
44 ('[^<&]+', Text),
45 (r'&\S*?;', Name.Entity),
46 (r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
47 (r'<!--.*?-->', Comment.Multiline),
48 (r'<\?.*?\?>', Comment.Preproc),
49 ('<![^>]*>', Comment.Preproc),
50 (r'(<)(\s*)(script)(\s*)',
51 bygroups(Punctuation, Text, Name.Tag, Text),
52 ('script-content', 'tag')),
53 (r'(<)(\s*)(style)(\s*)',
54 bygroups(Punctuation, Text, Name.Tag, Text),
55 ('style-content', 'tag')),
56 # note: this allows tag names not used in HTML like <x:with-dash>,
57 # this is to support yet-unknown template engines and the like
58 (r'(<)(\s*)([\w:.-]+)',
59 bygroups(Punctuation, Text, Name.Tag), 'tag'),
60 (r'(<)(\s*)(/)(\s*)([\w:.-]+)(\s*)(>)',
61 bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
62 Punctuation)),
63 ],
64 'tag': [
65 (r'\s+', Text),
66 (r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
67 'attr'),
68 (r'[\w:-]+', Name.Attribute),
69 (r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
70 ],
71 'script-content': [
72 (r'(<)(\s*)(/)(\s*)(script)(\s*)(>)',
73 bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
74 Punctuation), '#pop'),
75 (r'.+?(?=<\s*/\s*script\s*>)', using(JavascriptLexer)),
76 # fallback cases for when there is no closing script tag
77 # first look for newline and then go back into root state
78 # if that fails just read the rest of the file
79 # this is similar to the error handling logic in lexer.py
80 (r'.+?\n', using(JavascriptLexer), '#pop'),
81 (r'.+', using(JavascriptLexer), '#pop'),
82 ],
83 'style-content': [
84 (r'(<)(\s*)(/)(\s*)(style)(\s*)(>)',
85 bygroups(Punctuation, Text, Punctuation, Text, Name.Tag, Text,
86 Punctuation),'#pop'),
87 (r'.+?(?=<\s*/\s*style\s*>)', using(CssLexer)),
88 # fallback cases for when there is no closing style tag
89 # first look for newline and then go back into root state
90 # if that fails just read the rest of the file
91 # this is similar to the error handling logic in lexer.py
92 (r'.+?\n', using(CssLexer), '#pop'),
93 (r'.+', using(CssLexer), '#pop'),
94 ],
95 'attr': [
96 ('".*?"', String, '#pop'),
97 ("'.*?'", String, '#pop'),
98 (r'[^\s>]+', String, '#pop'),
99 ],
100 }
101
102 def analyse_text(text):
103 if html_doctype_matches(text):
104 return 0.5
105
106
107class DtdLexer(RegexLexer):
108 """
109 A lexer for DTDs (Document Type Definitions).
110 """
111
112 flags = re.MULTILINE | re.DOTALL
113
114 name = 'DTD'
115 aliases = ['dtd']
116 filenames = ['*.dtd']
117 mimetypes = ['application/xml-dtd']
118 url = 'https://en.wikipedia.org/wiki/Document_type_definition'
119 version_added = '1.5'
120
121 tokens = {
122 'root': [
123 include('common'),
124
125 (r'(<!ELEMENT)(\s+)(\S+)',
126 bygroups(Keyword, Text, Name.Tag), 'element'),
127 (r'(<!ATTLIST)(\s+)(\S+)',
128 bygroups(Keyword, Text, Name.Tag), 'attlist'),
129 (r'(<!ENTITY)(\s+)(\S+)',
130 bygroups(Keyword, Text, Name.Entity), 'entity'),
131 (r'(<!NOTATION)(\s+)(\S+)',
132 bygroups(Keyword, Text, Name.Tag), 'notation'),
133 (r'(<!\[)([^\[\s]+)(\s*)(\[)', # conditional sections
134 bygroups(Keyword, Name.Entity, Text, Keyword)),
135
136 (r'(<!DOCTYPE)(\s+)([^>\s]+)',
137 bygroups(Keyword, Text, Name.Tag)),
138 (r'PUBLIC|SYSTEM', Keyword.Constant),
139 (r'[\[\]>]', Keyword),
140 ],
141
142 'common': [
143 (r'\s+', Text),
144 (r'(%|&)[^;]*;', Name.Entity),
145 ('<!--', Comment, 'comment'),
146 (r'[(|)*,?+]', Operator),
147 (r'"[^"]*"', String.Double),
148 (r'\'[^\']*\'', String.Single),
149 ],
150
151 'comment': [
152 ('[^-]+', Comment),
153 ('-->', Comment, '#pop'),
154 ('-', Comment),
155 ],
156
157 'element': [
158 include('common'),
159 (r'EMPTY|ANY|#PCDATA', Keyword.Constant),
160 (r'[^>\s|()?+*,]+', Name.Tag),
161 (r'>', Keyword, '#pop'),
162 ],
163
164 'attlist': [
165 include('common'),
166 (r'CDATA|IDREFS|IDREF|ID|NMTOKENS|NMTOKEN|ENTITIES|ENTITY|NOTATION',
167 Keyword.Constant),
168 (r'#REQUIRED|#IMPLIED|#FIXED', Keyword.Constant),
169 (r'xml:space|xml:lang', Keyword.Reserved),
170 (r'[^>\s|()?+*,]+', Name.Attribute),
171 (r'>', Keyword, '#pop'),
172 ],
173
174 'entity': [
175 include('common'),
176 (r'SYSTEM|PUBLIC|NDATA', Keyword.Constant),
177 (r'[^>\s|()?+*,]+', Name.Entity),
178 (r'>', Keyword, '#pop'),
179 ],
180
181 'notation': [
182 include('common'),
183 (r'SYSTEM|PUBLIC', Keyword.Constant),
184 (r'[^>\s|()?+*,]+', Name.Attribute),
185 (r'>', Keyword, '#pop'),
186 ],
187 }
188
189 def analyse_text(text):
190 if not looks_like_xml(text) and \
191 ('<!ELEMENT' in text or '<!ATTLIST' in text or '<!ENTITY' in text):
192 return 0.8
193
194
195class XmlLexer(RegexLexer):
196 """
197 Generic lexer for XML (eXtensible Markup Language).
198 """
199
200 flags = re.MULTILINE | re.DOTALL
201
202 name = 'XML'
203 aliases = ['xml']
204 filenames = ['*.xml', '*.xsl', '*.rss', '*.xslt', '*.xsd',
205 '*.wsdl', '*.wsf']
206 mimetypes = ['text/xml', 'application/xml', 'image/svg+xml',
207 'application/rss+xml', 'application/atom+xml']
208 url = 'https://www.w3.org/XML'
209 version_added = ''
210
211 tokens = {
212 'root': [
213 (r'[^<&\s]+', Text),
214 (r'[^<&\S]+', Whitespace),
215 (r'&\S*?;', Name.Entity),
216 (r'\<\!\[CDATA\[.*?\]\]\>', Comment.Preproc),
217 (r'<!--.*?-->', Comment.Multiline),
218 (r'<\?.*?\?>', Comment.Preproc),
219 ('<![^>]*>', Comment.Preproc),
220 (r'<\s*[\w:.-]+', Name.Tag, 'tag'),
221 (r'<\s*/\s*[\w:.-]+\s*>', Name.Tag),
222 ],
223 'tag': [
224 (r'\s+', Whitespace),
225 (r'[\w.:-]+\s*=', Name.Attribute, 'attr'),
226 (r'/?\s*>', Name.Tag, '#pop'),
227 ],
228 'attr': [
229 (r'\s+', Whitespace),
230 ('".*?"', String, '#pop'),
231 ("'.*?'", String, '#pop'),
232 (r'[^\s>]+', String, '#pop'),
233 ],
234 }
235
236 def analyse_text(text):
237 if looks_like_xml(text):
238 return 0.45 # less than HTML
239
240
241class XsltLexer(XmlLexer):
242 """
243 A lexer for XSLT.
244 """
245
246 name = 'XSLT'
247 aliases = ['xslt']
248 filenames = ['*.xsl', '*.xslt', '*.xpl'] # xpl is XProc
249 mimetypes = ['application/xsl+xml', 'application/xslt+xml']
250 url = 'https://www.w3.org/TR/xslt-30'
251 version_added = '0.10'
252
253 EXTRA_KEYWORDS = {
254 'apply-imports', 'apply-templates', 'attribute',
255 'attribute-set', 'call-template', 'choose', 'comment',
256 'copy', 'copy-of', 'decimal-format', 'element', 'fallback',
257 'for-each', 'if', 'import', 'include', 'key', 'message',
258 'namespace-alias', 'number', 'otherwise', 'output', 'param',
259 'preserve-space', 'processing-instruction', 'sort',
260 'strip-space', 'stylesheet', 'template', 'text', 'transform',
261 'value-of', 'variable', 'when', 'with-param'
262 }
263
264 def get_tokens_unprocessed(self, text):
265 for index, token, value in XmlLexer.get_tokens_unprocessed(self, text):
266 m = re.match('</?xsl:([^>]*)/?>?', value)
267
268 if token is Name.Tag and m and m.group(1) in self.EXTRA_KEYWORDS:
269 yield index, Keyword, value
270 else:
271 yield index, token, value
272
273 def analyse_text(text):
274 if looks_like_xml(text) and '<xsl' in text:
275 return 0.8
276
277
278class HamlLexer(ExtendedRegexLexer):
279 """
280 For Haml markup.
281 """
282
283 name = 'Haml'
284 aliases = ['haml']
285 filenames = ['*.haml']
286 mimetypes = ['text/x-haml']
287 url = 'https://haml.info'
288 version_added = '1.3'
289
290 flags = re.IGNORECASE
291 # Haml can include " |\n" anywhere,
292 # which is ignored and used to wrap long lines.
293 # To accommodate this, use this custom faux dot instead.
294 _dot = r'(?: \|\n(?=.* \|)|.)'
295
296 # In certain places, a comma at the end of the line
297 # allows line wrapping as well.
298 _comma_dot = r'(?:,\s*\n|' + _dot + ')'
299 tokens = {
300 'root': [
301 (r'[ \t]*\n', Text),
302 (r'[ \t]*', _indentation),
303 ],
304
305 'css': [
306 (r'\.[\w:-]+', Name.Class, 'tag'),
307 (r'\#[\w:-]+', Name.Function, 'tag'),
308 ],
309
310 'eval-or-plain': [
311 (r'[&!]?==', Punctuation, 'plain'),
312 (r'([&!]?[=~])(' + _comma_dot + r'*\n)',
313 bygroups(Punctuation, using(RubyLexer)),
314 'root'),
315 default('plain'),
316 ],
317
318 'content': [
319 include('css'),
320 (r'%[\w:-]+', Name.Tag, 'tag'),
321 (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
322 (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
323 bygroups(Comment, Comment.Special, Comment),
324 '#pop'),
325 (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
326 '#pop'),
327 (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
328 'haml-comment-block'), '#pop'),
329 (r'(-)(' + _comma_dot + r'*\n)',
330 bygroups(Punctuation, using(RubyLexer)),
331 '#pop'),
332 (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
333 '#pop'),
334 include('eval-or-plain'),
335 ],
336
337 'tag': [
338 include('css'),
339 (r'\{(,\n|' + _dot + r')*?\}', using(RubyLexer)),
340 (r'\[' + _dot + r'*?\]', using(RubyLexer)),
341 (r'\(', Text, 'html-attributes'),
342 (r'/[ \t]*\n', Punctuation, '#pop:2'),
343 (r'[<>]{1,2}(?=[ \t=])', Punctuation),
344 include('eval-or-plain'),
345 ],
346
347 'plain': [
348 (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
349 (r'(#\{)(' + _dot + r'*?)(\})',
350 bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
351 (r'\n', Text, 'root'),
352 ],
353
354 'html-attributes': [
355 (r'\s+', Text),
356 (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
357 (r'[\w:-]+', Name.Attribute),
358 (r'\)', Text, '#pop'),
359 ],
360
361 'html-attribute-value': [
362 (r'[ \t]+', Text),
363 (r'\w+', Name.Variable, '#pop'),
364 (r'@\w+', Name.Variable.Instance, '#pop'),
365 (r'\$\w+', Name.Variable.Global, '#pop'),
366 (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
367 (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
368 ],
369
370 'html-comment-block': [
371 (_dot + '+', Comment),
372 (r'\n', Text, 'root'),
373 ],
374
375 'haml-comment-block': [
376 (_dot + '+', Comment.Preproc),
377 (r'\n', Text, 'root'),
378 ],
379
380 'filter-block': [
381 (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
382 (r'(#\{)(' + _dot + r'*?)(\})',
383 bygroups(String.Interpol, using(RubyLexer), String.Interpol)),
384 (r'\n', Text, 'root'),
385 ],
386 }
387
388
389class ScamlLexer(ExtendedRegexLexer):
390 """
391 For Scaml markup. Scaml is Haml for Scala.
392 """
393
394 name = 'Scaml'
395 aliases = ['scaml']
396 filenames = ['*.scaml']
397 mimetypes = ['text/x-scaml']
398 url = 'https://scalate.github.io/scalate/'
399 version_added = '1.4'
400
401 flags = re.IGNORECASE
402 # Scaml does not yet support the " |\n" notation to
403 # wrap long lines. Once it does, use the custom faux
404 # dot instead.
405 # _dot = r'(?: \|\n(?=.* \|)|.)'
406 _dot = r'.'
407
408 tokens = {
409 'root': [
410 (r'[ \t]*\n', Text),
411 (r'[ \t]*', _indentation),
412 ],
413
414 'css': [
415 (r'\.[\w:-]+', Name.Class, 'tag'),
416 (r'\#[\w:-]+', Name.Function, 'tag'),
417 ],
418
419 'eval-or-plain': [
420 (r'[&!]?==', Punctuation, 'plain'),
421 (r'([&!]?[=~])(' + _dot + r'*\n)',
422 bygroups(Punctuation, using(ScalaLexer)),
423 'root'),
424 default('plain'),
425 ],
426
427 'content': [
428 include('css'),
429 (r'%[\w:-]+', Name.Tag, 'tag'),
430 (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
431 (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
432 bygroups(Comment, Comment.Special, Comment),
433 '#pop'),
434 (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
435 '#pop'),
436 (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
437 'scaml-comment-block'), '#pop'),
438 (r'(-@\s*)(import)?(' + _dot + r'*\n)',
439 bygroups(Punctuation, Keyword, using(ScalaLexer)),
440 '#pop'),
441 (r'(-)(' + _dot + r'*\n)',
442 bygroups(Punctuation, using(ScalaLexer)),
443 '#pop'),
444 (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
445 '#pop'),
446 include('eval-or-plain'),
447 ],
448
449 'tag': [
450 include('css'),
451 (r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
452 (r'\[' + _dot + r'*?\]', using(ScalaLexer)),
453 (r'\(', Text, 'html-attributes'),
454 (r'/[ \t]*\n', Punctuation, '#pop:2'),
455 (r'[<>]{1,2}(?=[ \t=])', Punctuation),
456 include('eval-or-plain'),
457 ],
458
459 'plain': [
460 (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
461 (r'(#\{)(' + _dot + r'*?)(\})',
462 bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
463 (r'\n', Text, 'root'),
464 ],
465
466 'html-attributes': [
467 (r'\s+', Text),
468 (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
469 (r'[\w:-]+', Name.Attribute),
470 (r'\)', Text, '#pop'),
471 ],
472
473 'html-attribute-value': [
474 (r'[ \t]+', Text),
475 (r'\w+', Name.Variable, '#pop'),
476 (r'@\w+', Name.Variable.Instance, '#pop'),
477 (r'\$\w+', Name.Variable.Global, '#pop'),
478 (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
479 (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
480 ],
481
482 'html-comment-block': [
483 (_dot + '+', Comment),
484 (r'\n', Text, 'root'),
485 ],
486
487 'scaml-comment-block': [
488 (_dot + '+', Comment.Preproc),
489 (r'\n', Text, 'root'),
490 ],
491
492 'filter-block': [
493 (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
494 (r'(#\{)(' + _dot + r'*?)(\})',
495 bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
496 (r'\n', Text, 'root'),
497 ],
498 }
499
500
501class PugLexer(ExtendedRegexLexer):
502 """
503 For Pug markup.
504 Pug is a variant of Scaml, see:
505 http://scalate.fusesource.org/documentation/scaml-reference.html
506 """
507
508 name = 'Pug'
509 aliases = ['pug', 'jade']
510 filenames = ['*.pug', '*.jade']
511 mimetypes = ['text/x-pug', 'text/x-jade']
512 url = 'https://pugjs.org'
513 version_added = '1.4'
514
515 flags = re.IGNORECASE
516 _dot = r'.'
517
518 tokens = {
519 'root': [
520 (r'[ \t]*\n', Text),
521 (r'[ \t]*', _indentation),
522 ],
523
524 'css': [
525 (r'\.[\w:-]+', Name.Class, 'tag'),
526 (r'\#[\w:-]+', Name.Function, 'tag'),
527 ],
528
529 'eval-or-plain': [
530 (r'[&!]?==', Punctuation, 'plain'),
531 (r'([&!]?[=~])(' + _dot + r'*\n)',
532 bygroups(Punctuation, using(ScalaLexer)), 'root'),
533 default('plain'),
534 ],
535
536 'content': [
537 include('css'),
538 (r'!!!' + _dot + r'*\n', Name.Namespace, '#pop'),
539 (r'(/)(\[' + _dot + r'*?\])(' + _dot + r'*\n)',
540 bygroups(Comment, Comment.Special, Comment),
541 '#pop'),
542 (r'/' + _dot + r'*\n', _starts_block(Comment, 'html-comment-block'),
543 '#pop'),
544 (r'-#' + _dot + r'*\n', _starts_block(Comment.Preproc,
545 'scaml-comment-block'), '#pop'),
546 (r'(-@\s*)(import)?(' + _dot + r'*\n)',
547 bygroups(Punctuation, Keyword, using(ScalaLexer)),
548 '#pop'),
549 (r'(-)(' + _dot + r'*\n)',
550 bygroups(Punctuation, using(ScalaLexer)),
551 '#pop'),
552 (r':' + _dot + r'*\n', _starts_block(Name.Decorator, 'filter-block'),
553 '#pop'),
554 (r'[\w:-]+', Name.Tag, 'tag'),
555 (r'\|', Text, 'eval-or-plain'),
556 ],
557
558 'tag': [
559 include('css'),
560 (r'\{(,\n|' + _dot + r')*?\}', using(ScalaLexer)),
561 (r'\[' + _dot + r'*?\]', using(ScalaLexer)),
562 (r'\(', Text, 'html-attributes'),
563 (r'/[ \t]*\n', Punctuation, '#pop:2'),
564 (r'[<>]{1,2}(?=[ \t=])', Punctuation),
565 include('eval-or-plain'),
566 ],
567
568 'plain': [
569 (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Text),
570 (r'(#\{)(' + _dot + r'*?)(\})',
571 bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
572 (r'\n', Text, 'root'),
573 ],
574
575 'html-attributes': [
576 (r'\s+', Text),
577 (r'[\w:-]+[ \t]*=', Name.Attribute, 'html-attribute-value'),
578 (r'[\w:-]+', Name.Attribute),
579 (r'\)', Text, '#pop'),
580 ],
581
582 'html-attribute-value': [
583 (r'[ \t]+', Text),
584 (r'\w+', Name.Variable, '#pop'),
585 (r'@\w+', Name.Variable.Instance, '#pop'),
586 (r'\$\w+', Name.Variable.Global, '#pop'),
587 (r"'(\\\\|\\[^\\]|[^'\\\n])*'", String, '#pop'),
588 (r'"(\\\\|\\[^\\]|[^"\\\n])*"', String, '#pop'),
589 ],
590
591 'html-comment-block': [
592 (_dot + '+', Comment),
593 (r'\n', Text, 'root'),
594 ],
595
596 'scaml-comment-block': [
597 (_dot + '+', Comment.Preproc),
598 (r'\n', Text, 'root'),
599 ],
600
601 'filter-block': [
602 (r'([^#\n]|#[^{\n]|(\\\\)*\\#\{)+', Name.Decorator),
603 (r'(#\{)(' + _dot + r'*?)(\})',
604 bygroups(String.Interpol, using(ScalaLexer), String.Interpol)),
605 (r'\n', Text, 'root'),
606 ],
607 }
608JadeLexer = PugLexer # compat
609
610
611class UrlEncodedLexer(RegexLexer):
612 """
613 Lexer for urlencoded data
614 """
615
616 name = 'urlencoded'
617 aliases = ['urlencoded']
618 mimetypes = ['application/x-www-form-urlencoded']
619 url = 'https://en.wikipedia.org/wiki/Percent-encoding'
620 version_added = '2.16'
621
622 tokens = {
623 'root': [
624 ('([^&=]*)(=)([^=&]*)(&?)', bygroups(Name.Tag, Operator, String, Punctuation)),
625 ],
626 }
627
628
629class VueLexer(HtmlLexer):
630 """
631 For Vue Single-File Component.
632 """
633
634 name = 'Vue'
635 url = 'https://vuejs.org/api/sfc-spec.html'
636 aliases = ['vue']
637 filenames = ['*.vue']
638 mimetypes = []
639 version_added = '2.19'
640
641 flags = re.IGNORECASE | re.DOTALL
642 tokens = {
643 'root': [
644 (r'(\{\{)(.*?)(\}\})', bygroups(Comment.Preproc,
645 using(JavascriptLexer), Comment.Preproc)),
646 ('[^<&{]+', Text),
647 inherit,
648 ],
649 'tag': [
650 (r'\s+', Text),
651 (r'((?:[@:]|v-)(?:[.\w:-]|\[[^\]]*?\])+\s*)(=)(\s*)',
652 bygroups(using(this, state=['name']), Operator, Text),
653 'attr-directive'),
654 (r'([\w:-]+\s*)(=)(\s*)', bygroups(Name.Attribute, Operator, Text),
655 'attr'),
656 (r'[\w:-]+', Name.Attribute),
657 (r'(/?)(\s*)(>)', bygroups(Punctuation, Text, Punctuation), '#pop'),
658 ],
659 'name': [
660 (r'[\w-]+', Name.Attribute),
661 (r'[:@.]', Punctuation),
662 (r'(\[)([^\]]*?)(\])', bygroups(Comment.Preproc,
663 using(JavascriptLexer), Comment.Preproc)),
664 ],
665 'attr-directive': [
666 (r'(["\'])(.*?)(\1)', bygroups(String,
667 using(JavascriptLexer), String), '#pop'),
668 (r'[^\s>]+', using(JavascriptLexer), '#pop'),
669 ],
670 }