1"""
2 pygments.lexers.templates
3 ~~~~~~~~~~~~~~~~~~~~~~~~~
4
5 Lexers for various template engines' markup.
6
7 :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
10
11import re
12
13from pygments.lexers.html import HtmlLexer, XmlLexer
14from pygments.lexers.javascript import JavascriptLexer, LassoLexer
15from pygments.lexers.css import CssLexer
16from pygments.lexers.php import PhpLexer
17from pygments.lexers.python import PythonLexer
18from pygments.lexers.perl import PerlLexer
19from pygments.lexers.jvm import JavaLexer, TeaLangLexer
20from pygments.lexers.data import YamlLexer
21from pygments.lexers.sql import SqlLexer
22from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \
23 include, using, this, default, combined
24from pygments.token import Error, Punctuation, Whitespace, \
25 Text, Comment, Operator, Keyword, Name, String, Number, Other, Token
26from pygments.util import html_doctype_matches, looks_like_xml
27
28__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer',
29 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer',
30 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer',
31 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer',
32 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer',
33 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer',
34 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer',
35 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer',
36 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer',
37 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer',
38 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer',
39 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer',
40 'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer',
41 'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer',
42 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer',
43 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer',
44 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer',
45 'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer',
46 'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer',
47 'TwigLexer', 'TwigHtmlLexer', 'Angular2Lexer', 'Angular2HtmlLexer',
48 'SqlJinjaLexer']
49
50
51class ErbLexer(Lexer):
52 """
53 Generic ERB (Ruby Templating) lexer.
54
55 Just highlights ruby code between the preprocessor directives, other data
56 is left untouched by the lexer.
57
58 All options are also forwarded to the `RubyLexer`.
59 """
60
61 name = 'ERB'
62 url = 'https://github.com/ruby/erb'
63 aliases = ['erb']
64 mimetypes = ['application/x-ruby-templating']
65 version_added = ''
66
67 _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M)
68
69 def __init__(self, **options):
70 from pygments.lexers.ruby import RubyLexer
71 self.ruby_lexer = RubyLexer(**options)
72 Lexer.__init__(self, **options)
73
74 def get_tokens_unprocessed(self, text):
75 """
76 Since ERB doesn't allow "<%" and other tags inside of ruby
77 blocks we have to use a split approach here that fails for
78 that too.
79 """
80 tokens = self._block_re.split(text)
81 tokens.reverse()
82 state = idx = 0
83 try:
84 while True:
85 # text
86 if state == 0:
87 val = tokens.pop()
88 yield idx, Other, val
89 idx += len(val)
90 state = 1
91 # block starts
92 elif state == 1:
93 tag = tokens.pop()
94 # literals
95 if tag in ('<%%', '%%>'):
96 yield idx, Other, tag
97 idx += 3
98 state = 0
99 # comment
100 elif tag == '<%#':
101 yield idx, Comment.Preproc, tag
102 val = tokens.pop()
103 yield idx + 3, Comment, val
104 idx += 3 + len(val)
105 state = 2
106 # blocks or output
107 elif tag in ('<%', '<%=', '<%-'):
108 yield idx, Comment.Preproc, tag
109 idx += len(tag)
110 data = tokens.pop()
111 r_idx = 0
112 for r_idx, r_token, r_value in \
113 self.ruby_lexer.get_tokens_unprocessed(data):
114 yield r_idx + idx, r_token, r_value
115 idx += len(data)
116 state = 2
117 elif tag in ('%>', '-%>'):
118 yield idx, Error, tag
119 idx += len(tag)
120 state = 0
121 # % raw ruby statements
122 else:
123 yield idx, Comment.Preproc, tag[0]
124 r_idx = 0
125 for r_idx, r_token, r_value in \
126 self.ruby_lexer.get_tokens_unprocessed(tag[1:]):
127 yield idx + 1 + r_idx, r_token, r_value
128 idx += len(tag)
129 state = 0
130 # block ends
131 elif state == 2:
132 tag = tokens.pop()
133 if tag not in ('%>', '-%>'):
134 yield idx, Other, tag
135 else:
136 yield idx, Comment.Preproc, tag
137 idx += len(tag)
138 state = 0
139 except IndexError:
140 return
141
142 def analyse_text(text):
143 if '<%' in text and '%>' in text:
144 return 0.4
145
146
147class SmartyLexer(RegexLexer):
148 """
149 Generic Smarty template lexer.
150
151 Just highlights smarty code between the preprocessor directives, other
152 data is left untouched by the lexer.
153 """
154
155 name = 'Smarty'
156 url = 'https://www.smarty.net/'
157 aliases = ['smarty']
158 filenames = ['*.tpl']
159 mimetypes = ['application/x-smarty']
160 version_added = ''
161
162 flags = re.MULTILINE | re.DOTALL
163
164 tokens = {
165 'root': [
166 (r'[^{]+', Other),
167 (r'(\{)(\*.*?\*)(\})',
168 bygroups(Comment.Preproc, Comment, Comment.Preproc)),
169 (r'(\{php\})(.*?)(\{/php\})',
170 bygroups(Comment.Preproc, using(PhpLexer, startinline=True),
171 Comment.Preproc)),
172 (r'(\{)(/?[a-zA-Z_]\w*)(\s*)',
173 bygroups(Comment.Preproc, Name.Function, Text), 'smarty'),
174 (r'\{', Comment.Preproc, 'smarty')
175 ],
176 'smarty': [
177 (r'\s+', Text),
178 (r'\{', Comment.Preproc, '#push'),
179 (r'\}', Comment.Preproc, '#pop'),
180 (r'#[a-zA-Z_]\w*#', Name.Variable),
181 (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable),
182 (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator),
183 (r'(true|false|null)\b', Keyword.Constant),
184 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
185 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
186 (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
187 (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
188 (r'[a-zA-Z_]\w*', Name.Attribute)
189 ]
190 }
191
192 def analyse_text(text):
193 rv = 0.0
194 if re.search(r'\{if\s+.*?\}.*?\{/if\}', text):
195 rv += 0.15
196 if re.search(r'\{include\s+file=.*?\}', text):
197 rv += 0.15
198 if re.search(r'\{foreach\s+.*?\}.*?\{/foreach\}', text):
199 rv += 0.15
200 if re.search(r'\{\$.*?\}', text):
201 rv += 0.01
202 return rv
203
204
205class VelocityLexer(RegexLexer):
206 """
207 Generic Velocity template lexer.
208
209 Just highlights velocity directives and variable references, other
210 data is left untouched by the lexer.
211 """
212
213 name = 'Velocity'
214 url = 'https://velocity.apache.org/'
215 aliases = ['velocity']
216 filenames = ['*.vm', '*.fhtml']
217 version_added = ''
218
219 flags = re.MULTILINE | re.DOTALL
220
221 identifier = r'[a-zA-Z_]\w*'
222
223 tokens = {
224 'root': [
225 (r'[^{#$]+', Other),
226 (r'(#)(\*.*?\*)(#)',
227 bygroups(Comment.Preproc, Comment, Comment.Preproc)),
228 (r'(##)(.*?$)',
229 bygroups(Comment.Preproc, Comment)),
230 (r'(#\{?)(' + identifier + r')(\}?)(\s?\()',
231 bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation),
232 'directiveparams'),
233 (r'(#\{?)(' + identifier + r')(\}|\b)',
234 bygroups(Comment.Preproc, Name.Function, Comment.Preproc)),
235 (r'\$!?\{?', Punctuation, 'variable')
236 ],
237 'variable': [
238 (identifier, Name.Variable),
239 (r'\(', Punctuation, 'funcparams'),
240 (r'(\.)(' + identifier + r')',
241 bygroups(Punctuation, Name.Variable), '#push'),
242 (r'\}', Punctuation, '#pop'),
243 default('#pop')
244 ],
245 'directiveparams': [
246 (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b',
247 Operator),
248 (r'\[', Operator, 'rangeoperator'),
249 (r'\b' + identifier + r'\b', Name.Function),
250 include('funcparams')
251 ],
252 'rangeoperator': [
253 (r'\.\.', Operator),
254 include('funcparams'),
255 (r'\]', Operator, '#pop')
256 ],
257 'funcparams': [
258 (r'\$!?\{?', Punctuation, 'variable'),
259 (r'\s+', Text),
260 (r'[,:]', Punctuation),
261 (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
262 (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
263 (r"0[xX][0-9a-fA-F]+[Ll]?", Number),
264 (r"\b[0-9]+\b", Number),
265 (r'(true|false|null)\b', Keyword.Constant),
266 (r'\(', Punctuation, '#push'),
267 (r'\)', Punctuation, '#pop'),
268 (r'\{', Punctuation, '#push'),
269 (r'\}', Punctuation, '#pop'),
270 (r'\[', Punctuation, '#push'),
271 (r'\]', Punctuation, '#pop'),
272 ]
273 }
274
275 def analyse_text(text):
276 rv = 0.0
277 if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text, re.DOTALL):
278 rv += 0.25
279 if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text, re.DOTALL):
280 rv += 0.15
281 if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text, re.DOTALL):
282 rv += 0.15
283 if re.search(r'\$!?\{?[a-zA-Z_]\w*(\([^)]*\))?'
284 r'(\.\w+(\([^)]*\))?)*\}?', text):
285 rv += 0.01
286 return rv
287
288
289class VelocityHtmlLexer(DelegatingLexer):
290 """
291 Subclass of the `VelocityLexer` that highlights unlexed data
292 with the `HtmlLexer`.
293
294 """
295
296 name = 'HTML+Velocity'
297 aliases = ['html+velocity']
298 version_added = ''
299 alias_filenames = ['*.html', '*.fhtml']
300 mimetypes = ['text/html+velocity']
301 url = 'https://velocity.apache.org/'
302
303 def __init__(self, **options):
304 super().__init__(HtmlLexer, VelocityLexer, **options)
305
306
307class VelocityXmlLexer(DelegatingLexer):
308 """
309 Subclass of the `VelocityLexer` that highlights unlexed data
310 with the `XmlLexer`.
311
312 """
313
314 name = 'XML+Velocity'
315 aliases = ['xml+velocity']
316 version_added = ''
317 alias_filenames = ['*.xml', '*.vm']
318 mimetypes = ['application/xml+velocity']
319 url = 'https://velocity.apache.org/'
320
321 def __init__(self, **options):
322 super().__init__(XmlLexer, VelocityLexer, **options)
323
324 def analyse_text(text):
325 rv = VelocityLexer.analyse_text(text) - 0.01
326 if looks_like_xml(text):
327 rv += 0.4
328 return rv
329
330
331class DjangoLexer(RegexLexer):
332 """
333 Generic `Django <https://www.djangoproject.com/documentation/templates/>`_
334 and `Jinja <https://jinja.palletsprojects.com>`_ template lexer.
335
336 It just highlights django/jinja code between the preprocessor directives,
337 other data is left untouched by the lexer.
338 """
339
340 name = 'Django/Jinja'
341 aliases = ['django', 'jinja']
342 mimetypes = ['application/x-django-templating', 'application/x-jinja']
343 url = 'https://www.djangoproject.com/documentation/templates'
344 version_added = ''
345
346 flags = re.M | re.S
347
348 tokens = {
349 'root': [
350 (r'[^{]+', Other),
351 (r'\{\{', Comment.Preproc, 'var'),
352 # jinja/django comments
353 (r'\{#.*?#\}', Comment),
354 # django comments
355 (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)'
356 r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})',
357 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
358 Comment, Comment.Preproc, Text, Keyword, Text,
359 Comment.Preproc)),
360 # raw jinja blocks
361 (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
362 r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
363 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
364 Text, Comment.Preproc, Text, Keyword, Text,
365 Comment.Preproc)),
366 # filter blocks
367 (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)',
368 bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
369 'block'),
370 (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
371 bygroups(Comment.Preproc, Text, Keyword), 'block'),
372 (r'\{', Other)
373 ],
374 'varnames': [
375 (r'(\|)(\s*)([a-zA-Z_]\w*)',
376 bygroups(Operator, Text, Name.Function)),
377 (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)',
378 bygroups(Keyword, Text, Keyword, Text, Name.Function)),
379 (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo),
380 (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|'
381 r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b',
382 Keyword),
383 (r'(loop|block|super|forloop)\b', Name.Builtin),
384 (r'[a-zA-Z_][\w-]*', Name.Variable),
385 (r'\.\w+', Name.Variable),
386 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
387 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
388 (r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', Operator),
389 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
390 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
391 ],
392 'var': [
393 (r'\s+', Text),
394 (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
395 include('varnames')
396 ],
397 'block': [
398 (r'\s+', Text),
399 (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
400 include('varnames'),
401 (r'.', Punctuation)
402 ]
403 }
404
405 def analyse_text(text):
406 rv = 0.0
407 if re.search(r'\{%\s*(block|extends)', text) is not None:
408 rv += 0.4
409 if re.search(r'\{%\s*if\s*.*?%\}', text) is not None:
410 rv += 0.1
411 if re.search(r'\{\{.*?\}\}', text) is not None:
412 rv += 0.1
413 return rv
414
415
416class MyghtyLexer(RegexLexer):
417 """
418 Generic myghty templates lexer. Code that isn't Myghty
419 markup is yielded as `Token.Other`.
420 """
421
422 name = 'Myghty'
423 url = 'http://www.myghty.org/'
424 aliases = ['myghty']
425 filenames = ['*.myt', 'autodelegate']
426 mimetypes = ['application/x-myghty']
427 version_added = '0.6'
428
429 tokens = {
430 'root': [
431 (r'\s+', Text),
432 (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
433 bygroups(Name.Tag, Text, Name.Function, Name.Tag,
434 using(this), Name.Tag)),
435 (r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)',
436 bygroups(Name.Tag, Name.Function, Name.Tag,
437 using(PythonLexer), Name.Tag)),
438 (r'(<&[^|])(.*?)(,.*?)?(&>)',
439 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
440 (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
441 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)),
442 (r'</&>', Name.Tag),
443 (r'(?s)(<%!?)(.*?)(%>)',
444 bygroups(Name.Tag, using(PythonLexer), Name.Tag)),
445 (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
446 (r'(?<=^)(%)([^\n]*)(\n|\Z)',
447 bygroups(Name.Tag, using(PythonLexer), Other)),
448 (r"""(?sx)
449 (.+?) # anything, followed by:
450 (?:
451 (?<=\n)(?=[%#]) | # an eval or comment line
452 (?=</?[%&]) | # a substitution or block or
453 # call start or end
454 # - don't consume
455 (\\\n) | # an escaped newline
456 \Z # end of string
457 )""", bygroups(Other, Operator)),
458 ]
459 }
460
461
462class MyghtyHtmlLexer(DelegatingLexer):
463 """
464 Subclass of the `MyghtyLexer` that highlights unlexed data
465 with the `HtmlLexer`.
466 """
467
468 name = 'HTML+Myghty'
469 aliases = ['html+myghty']
470 mimetypes = ['text/html+myghty']
471 url = 'http://www.myghty.org/'
472 version_added = '0.6'
473
474 def __init__(self, **options):
475 super().__init__(HtmlLexer, MyghtyLexer, **options)
476
477
478class MyghtyXmlLexer(DelegatingLexer):
479 """
480 Subclass of the `MyghtyLexer` that highlights unlexed data
481 with the `XmlLexer`.
482 """
483
484 name = 'XML+Myghty'
485 aliases = ['xml+myghty']
486 mimetypes = ['application/xml+myghty']
487 url = 'http://www.myghty.org/'
488 version_added = '0.6'
489
490 def __init__(self, **options):
491 super().__init__(XmlLexer, MyghtyLexer, **options)
492
493
494class MyghtyJavascriptLexer(DelegatingLexer):
495 """
496 Subclass of the `MyghtyLexer` that highlights unlexed data
497 with the `JavascriptLexer`.
498 """
499
500 name = 'JavaScript+Myghty'
501 aliases = ['javascript+myghty', 'js+myghty']
502 mimetypes = ['application/x-javascript+myghty',
503 'text/x-javascript+myghty',
504 'text/javascript+mygthy']
505 url = 'http://www.myghty.org/'
506 version_added = '0.6'
507
508 def __init__(self, **options):
509 super().__init__(JavascriptLexer, MyghtyLexer, **options)
510
511
512class MyghtyCssLexer(DelegatingLexer):
513 """
514 Subclass of the `MyghtyLexer` that highlights unlexed data
515 with the `CssLexer`.
516 """
517
518 name = 'CSS+Myghty'
519 aliases = ['css+myghty']
520 mimetypes = ['text/css+myghty']
521 url = 'http://www.myghty.org/'
522 version_added = '0.6'
523
524 def __init__(self, **options):
525 super().__init__(CssLexer, MyghtyLexer, **options)
526
527
528class MasonLexer(RegexLexer):
529 """
530 Generic mason templates lexer. Stolen from Myghty lexer. Code that isn't
531 Mason markup is HTML.
532 """
533 name = 'Mason'
534 url = 'http://www.masonhq.com/'
535 aliases = ['mason']
536 filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler']
537 mimetypes = ['application/x-mason']
538 version_added = '1.4'
539
540 tokens = {
541 'root': [
542 (r'\s+', Whitespace),
543 (r'(?s)(<%doc>)(.*?)(</%doc>)',
544 bygroups(Name.Tag, Comment.Multiline, Name.Tag)),
545 (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)',
546 bygroups(Name.Tag, Whitespace, Name.Function, Name.Tag,
547 using(this), Name.Tag)),
548 (r'(?s)(<%(\w+)(.*?)(>))(.*?)(</%\2\s*>)',
549 bygroups(Name.Tag, None, None, None, using(PerlLexer), Name.Tag)),
550 (r'(?s)(<&[^|])(.*?)(,.*?)?(&>)',
551 bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
552 (r'(?s)(<&\|)(.*?)(,.*?)?(&>)',
553 bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)),
554 (r'</&>', Name.Tag),
555 (r'(?s)(<%!?)(.*?)(%>)',
556 bygroups(Name.Tag, using(PerlLexer), Name.Tag)),
557 (r'(?<=^)#[^\n]*(\n|\Z)', Comment),
558 (r'(?<=^)(%)([^\n]*)(\n|\Z)',
559 bygroups(Name.Tag, using(PerlLexer), Other)),
560 (r"""(?sx)
561 (.+?) # anything, followed by:
562 (?:
563 (?<=\n)(?=[%#]) | # an eval or comment line
564 (?=</?[%&]) | # a substitution or block or
565 # call start or end
566 # - don't consume
567 (\\\n) | # an escaped newline
568 \Z # end of string
569 )""", bygroups(using(HtmlLexer), Operator)),
570 ]
571 }
572
573 def analyse_text(text):
574 result = 0.0
575 if re.search(r'</%(class|doc|init)>', text) is not None:
576 result = 1.0
577 elif re.search(r'<&.+&>', text, re.DOTALL) is not None:
578 result = 0.11
579 return result
580
581
582class MakoLexer(RegexLexer):
583 """
584 Generic mako templates lexer. Code that isn't Mako
585 markup is yielded as `Token.Other`.
586 """
587
588 name = 'Mako'
589 url = 'http://www.makotemplates.org/'
590 aliases = ['mako']
591 filenames = ['*.mao']
592 mimetypes = ['application/x-mako']
593 version_added = '0.7'
594
595 tokens = {
596 'root': [
597 (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)',
598 bygroups(Text.Whitespace, Comment.Preproc, Keyword, Other)),
599 (r'(\s*)(%)([^\n]*)(\n|\Z)',
600 bygroups(Text.Whitespace, Comment.Preproc, using(PythonLexer), Other)),
601 (r'(\s*)(##[^\n]*)(\n|\Z)',
602 bygroups(Text.Whitespace, Comment.Single, Text.Whitespace)),
603 (r'(?s)<%doc>.*?</%doc>', Comment.Multiline),
604 (r'(<%)([\w.:]+)',
605 bygroups(Comment.Preproc, Name.Builtin), 'tag'),
606 (r'(</%)([\w.:]+)(>)',
607 bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)),
608 (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'),
609 (r'(?s)(<%(?:!?))(.*?)(%>)',
610 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
611 (r'(\$\{)(.*?)(\})',
612 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
613 (r'''(?sx)
614 (.+?) # anything, followed by:
615 (?:
616 (?<=\n)(?=%|\#\#) | # an eval or comment line
617 (?=\#\*) | # multiline comment
618 (?=</?%) | # a python block
619 # call start or end
620 (?=\$\{) | # a substitution
621 (?<=\n)(?=\s*%) |
622 # - don't consume
623 (\\\n) | # an escaped newline
624 \Z # end of string
625 )
626 ''', bygroups(Other, Operator)),
627 (r'\s+', Text),
628 ],
629 'ondeftags': [
630 (r'<%', Comment.Preproc),
631 (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin),
632 include('tag'),
633 ],
634 'tag': [
635 (r'((?:\w+)\s*=)(\s*)(".*?")',
636 bygroups(Name.Attribute, Text, String)),
637 (r'/?\s*>', Comment.Preproc, '#pop'),
638 (r'\s+', Text),
639 ],
640 'attr': [
641 ('".*?"', String, '#pop'),
642 ("'.*?'", String, '#pop'),
643 (r'[^\s>]+', String, '#pop'),
644 ],
645 }
646
647
648class MakoHtmlLexer(DelegatingLexer):
649 """
650 Subclass of the `MakoLexer` that highlights unlexed data
651 with the `HtmlLexer`.
652 """
653
654 name = 'HTML+Mako'
655 aliases = ['html+mako']
656 mimetypes = ['text/html+mako']
657 url = 'http://www.makotemplates.org/'
658 version_added = '0.7'
659
660 def __init__(self, **options):
661 super().__init__(HtmlLexer, MakoLexer, **options)
662
663
664class MakoXmlLexer(DelegatingLexer):
665 """
666 Subclass of the `MakoLexer` that highlights unlexed data
667 with the `XmlLexer`.
668 """
669
670 name = 'XML+Mako'
671 aliases = ['xml+mako']
672 mimetypes = ['application/xml+mako']
673 url = 'http://www.makotemplates.org/'
674 version_added = '0.7'
675
676 def __init__(self, **options):
677 super().__init__(XmlLexer, MakoLexer, **options)
678
679
680class MakoJavascriptLexer(DelegatingLexer):
681 """
682 Subclass of the `MakoLexer` that highlights unlexed data
683 with the `JavascriptLexer`.
684 """
685
686 name = 'JavaScript+Mako'
687 aliases = ['javascript+mako', 'js+mako']
688 mimetypes = ['application/x-javascript+mako',
689 'text/x-javascript+mako',
690 'text/javascript+mako']
691 url = 'http://www.makotemplates.org/'
692 version_added = '0.7'
693
694 def __init__(self, **options):
695 super().__init__(JavascriptLexer, MakoLexer, **options)
696
697
698class MakoCssLexer(DelegatingLexer):
699 """
700 Subclass of the `MakoLexer` that highlights unlexed data
701 with the `CssLexer`.
702 """
703
704 name = 'CSS+Mako'
705 aliases = ['css+mako']
706 mimetypes = ['text/css+mako']
707 url = 'http://www.makotemplates.org/'
708 version_added = '0.7'
709
710 def __init__(self, **options):
711 super().__init__(CssLexer, MakoLexer, **options)
712
713
714# Genshi and Cheetah lexers courtesy of Matt Good.
715
716class CheetahPythonLexer(Lexer):
717 """
718 Lexer for handling Cheetah's special $ tokens in Python syntax.
719 """
720
721 def get_tokens_unprocessed(self, text):
722 pylexer = PythonLexer(**self.options)
723 for pos, type_, value in pylexer.get_tokens_unprocessed(text):
724 if type_ == Token.Error and value == '$':
725 type_ = Comment.Preproc
726 yield pos, type_, value
727
728
729class CheetahLexer(RegexLexer):
730 """
731 Generic cheetah templates lexer. Code that isn't Cheetah
732 markup is yielded as `Token.Other`. This also works for
733 `spitfire templates`_ which use the same syntax.
734
735 .. _spitfire templates: http://code.google.com/p/spitfire/
736 """
737
738 name = 'Cheetah'
739 url = 'http://www.cheetahtemplate.org/'
740 aliases = ['cheetah', 'spitfire']
741 filenames = ['*.tmpl', '*.spt']
742 mimetypes = ['application/x-cheetah', 'application/x-spitfire']
743 version_added = ''
744
745 tokens = {
746 'root': [
747 (r'(##[^\n]*)$',
748 (bygroups(Comment))),
749 (r'#[*](.|\n)*?[*]#', Comment),
750 (r'#end[^#\n]*(?:#|$)', Comment.Preproc),
751 (r'#slurp$', Comment.Preproc),
752 (r'(#[a-zA-Z]+)([^#\n]*)(#|$)',
753 (bygroups(Comment.Preproc, using(CheetahPythonLexer),
754 Comment.Preproc))),
755 # TODO support other Python syntax like $foo['bar']
756 (r'(\$)([a-zA-Z_][\w.]*\w)',
757 bygroups(Comment.Preproc, using(CheetahPythonLexer))),
758 (r'(?s)(\$\{!?)(.*?)(\})',
759 bygroups(Comment.Preproc, using(CheetahPythonLexer),
760 Comment.Preproc)),
761 (r'''(?sx)
762 (.+?) # anything, followed by:
763 (?:
764 (?=\#[#a-zA-Z]*) | # an eval comment
765 (?=\$[a-zA-Z_{]) | # a substitution
766 \Z # end of string
767 )
768 ''', Other),
769 (r'\s+', Text),
770 ],
771 }
772
773
774class CheetahHtmlLexer(DelegatingLexer):
775 """
776 Subclass of the `CheetahLexer` that highlights unlexed data
777 with the `HtmlLexer`.
778 """
779
780 name = 'HTML+Cheetah'
781 aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah']
782 mimetypes = ['text/html+cheetah', 'text/html+spitfire']
783 url = 'http://www.cheetahtemplate.org/'
784 version_added = ''
785
786 def __init__(self, **options):
787 super().__init__(HtmlLexer, CheetahLexer, **options)
788
789
790class CheetahXmlLexer(DelegatingLexer):
791 """
792 Subclass of the `CheetahLexer` that highlights unlexed data
793 with the `XmlLexer`.
794 """
795
796 name = 'XML+Cheetah'
797 aliases = ['xml+cheetah', 'xml+spitfire']
798 mimetypes = ['application/xml+cheetah', 'application/xml+spitfire']
799 url = 'http://www.cheetahtemplate.org/'
800 version_added = ''
801
802 def __init__(self, **options):
803 super().__init__(XmlLexer, CheetahLexer, **options)
804
805
806class CheetahJavascriptLexer(DelegatingLexer):
807 """
808 Subclass of the `CheetahLexer` that highlights unlexed data
809 with the `JavascriptLexer`.
810 """
811
812 name = 'JavaScript+Cheetah'
813 aliases = ['javascript+cheetah', 'js+cheetah',
814 'javascript+spitfire', 'js+spitfire']
815 mimetypes = ['application/x-javascript+cheetah',
816 'text/x-javascript+cheetah',
817 'text/javascript+cheetah',
818 'application/x-javascript+spitfire',
819 'text/x-javascript+spitfire',
820 'text/javascript+spitfire']
821 url = 'http://www.cheetahtemplate.org/'
822 version_added = ''
823
824 def __init__(self, **options):
825 super().__init__(JavascriptLexer, CheetahLexer, **options)
826
827
828class GenshiTextLexer(RegexLexer):
829 """
830 A lexer that highlights genshi text templates.
831 """
832
833 name = 'Genshi Text'
834 url = 'https://genshi.edgewall.org/'
835 aliases = ['genshitext']
836 mimetypes = ['application/x-genshi-text', 'text/x-genshi']
837 version_added = ''
838
839 tokens = {
840 'root': [
841 (r'[^#$\s]+', Other),
842 (r'^(\s*)(##.*)$', bygroups(Text, Comment)),
843 (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'),
844 include('variable'),
845 (r'[#$\s]', Other),
846 ],
847 'directive': [
848 (r'\n', Text, '#pop'),
849 (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'),
850 (r'(choose|when|with)([^\S\n]+)(.*)',
851 bygroups(Keyword, Text, using(PythonLexer)), '#pop'),
852 (r'(choose|otherwise)\b', Keyword, '#pop'),
853 (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'),
854 ],
855 'variable': [
856 (r'(?<!\$)(\$\{)(.+?)(\})',
857 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
858 (r'(?<!\$)(\$)([a-zA-Z_][\w.]*)',
859 Name.Variable),
860 ]
861 }
862
863
864class GenshiMarkupLexer(RegexLexer):
865 """
866 Base lexer for Genshi markup, used by `HtmlGenshiLexer` and
867 `GenshiLexer`.
868 """
869
870 flags = re.DOTALL
871
872 tokens = {
873 'root': [
874 (r'[^<$]+', Other),
875 (r'(<\?python)(.*?)(\?>)',
876 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
877 # yield style and script blocks as Other
878 (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other),
879 (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'),
880 (r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'),
881 include('variable'),
882 (r'[<$]', Other),
883 ],
884 'pytag': [
885 (r'\s+', Text),
886 (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'),
887 (r'/?\s*>', Name.Tag, '#pop'),
888 ],
889 'pyattr': [
890 ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'),
891 ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'),
892 (r'[^\s>]+', String, '#pop'),
893 ],
894 'tag': [
895 (r'\s+', Text),
896 (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'),
897 (r'[\w:-]+\s*=', Name.Attribute, 'attr'),
898 (r'/?\s*>', Name.Tag, '#pop'),
899 ],
900 'attr': [
901 ('"', String, 'attr-dstring'),
902 ("'", String, 'attr-sstring'),
903 (r'[^\s>]*', String, '#pop')
904 ],
905 'attr-dstring': [
906 ('"', String, '#pop'),
907 include('strings'),
908 ("'", String)
909 ],
910 'attr-sstring': [
911 ("'", String, '#pop'),
912 include('strings'),
913 ("'", String)
914 ],
915 'strings': [
916 ('[^"\'$]+', String),
917 include('variable')
918 ],
919 'variable': [
920 (r'(?<!\$)(\$\{)(.+?)(\})',
921 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)),
922 (r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)',
923 Name.Variable),
924 ]
925 }
926
927
928class HtmlGenshiLexer(DelegatingLexer):
929 """
930 A lexer that highlights `genshi <https://genshi.edgewall.org/>`_ and
931 `kid <http://kid-templating.org/>`_ kid HTML templates.
932 """
933
934 name = 'HTML+Genshi'
935 aliases = ['html+genshi', 'html+kid']
936 version_added = ''
937 alias_filenames = ['*.html', '*.htm', '*.xhtml']
938 mimetypes = ['text/html+genshi']
939 url = 'https://genshi.edgewall.org/'
940
941 def __init__(self, **options):
942 super().__init__(HtmlLexer, GenshiMarkupLexer, **options)
943
944 def analyse_text(text):
945 rv = 0.0
946 if re.search(r'\$\{.*?\}', text) is not None:
947 rv += 0.2
948 if re.search(r'py:(.*?)=["\']', text) is not None:
949 rv += 0.2
950 return rv + HtmlLexer.analyse_text(text) - 0.01
951
952
953class GenshiLexer(DelegatingLexer):
954 """
955 A lexer that highlights `genshi <https://genshi.edgewall.org/>`_ and
956 `kid <http://kid-templating.org/>`_ kid XML templates.
957 """
958
959 name = 'Genshi'
960 aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid']
961 filenames = ['*.kid']
962 version_added = ''
963 alias_filenames = ['*.xml']
964 mimetypes = ['application/x-genshi', 'application/x-kid']
965 url = 'https://genshi.edgewall.org/'
966
967 def __init__(self, **options):
968 super().__init__(XmlLexer, GenshiMarkupLexer, **options)
969
970 def analyse_text(text):
971 rv = 0.0
972 if re.search(r'\$\{.*?\}', text) is not None:
973 rv += 0.2
974 if re.search(r'py:(.*?)=["\']', text) is not None:
975 rv += 0.2
976 return rv + XmlLexer.analyse_text(text) - 0.01
977
978
979class JavascriptGenshiLexer(DelegatingLexer):
980 """
981 A lexer that highlights javascript code in genshi text templates.
982 """
983
984 name = 'JavaScript+Genshi Text'
985 aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext',
986 'javascript+genshi']
987 version_added = ''
988 alias_filenames = ['*.js']
989 mimetypes = ['application/x-javascript+genshi',
990 'text/x-javascript+genshi',
991 'text/javascript+genshi']
992 url = 'https://genshi.edgewall.org'
993
994 def __init__(self, **options):
995 super().__init__(JavascriptLexer, GenshiTextLexer, **options)
996
997 def analyse_text(text):
998 return GenshiLexer.analyse_text(text) - 0.05
999
1000
1001class CssGenshiLexer(DelegatingLexer):
1002 """
1003 A lexer that highlights CSS definitions in genshi text templates.
1004 """
1005
1006 name = 'CSS+Genshi Text'
1007 aliases = ['css+genshitext', 'css+genshi']
1008 version_added = ''
1009 alias_filenames = ['*.css']
1010 mimetypes = ['text/css+genshi']
1011 url = 'https://genshi.edgewall.org'
1012
1013 def __init__(self, **options):
1014 super().__init__(CssLexer, GenshiTextLexer, **options)
1015
1016 def analyse_text(text):
1017 return GenshiLexer.analyse_text(text) - 0.05
1018
1019
1020class RhtmlLexer(DelegatingLexer):
1021 """
1022 Subclass of the ERB lexer that highlights the unlexed data with the
1023 html lexer.
1024
1025 Nested Javascript and CSS is highlighted too.
1026 """
1027
1028 name = 'RHTML'
1029 aliases = ['rhtml', 'html+erb', 'html+ruby']
1030 filenames = ['*.rhtml']
1031 version_added = ''
1032 alias_filenames = ['*.html', '*.htm', '*.xhtml']
1033 mimetypes = ['text/html+ruby']
1034 url = 'https://github.com/ruby/erb'
1035
1036
1037 def __init__(self, **options):
1038 super().__init__(HtmlLexer, ErbLexer, **options)
1039
1040 def analyse_text(text):
1041 rv = ErbLexer.analyse_text(text) - 0.01
1042 if html_doctype_matches(text):
1043 # one more than the XmlErbLexer returns
1044 rv += 0.5
1045 return rv
1046
1047
1048class XmlErbLexer(DelegatingLexer):
1049 """
1050 Subclass of `ErbLexer` which highlights data outside preprocessor
1051 directives with the `XmlLexer`.
1052 """
1053
1054 name = 'XML+Ruby'
1055 aliases = ['xml+ruby', 'xml+erb']
1056 version_added = ''
1057 alias_filenames = ['*.xml']
1058 mimetypes = ['application/xml+ruby']
1059 url = 'https://github.com/ruby/erb'
1060
1061 def __init__(self, **options):
1062 super().__init__(XmlLexer, ErbLexer, **options)
1063
1064 def analyse_text(text):
1065 rv = ErbLexer.analyse_text(text) - 0.01
1066 if looks_like_xml(text):
1067 rv += 0.4
1068 return rv
1069
1070
1071class CssErbLexer(DelegatingLexer):
1072 """
1073 Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`.
1074 """
1075
1076 name = 'CSS+Ruby'
1077 aliases = ['css+ruby', 'css+erb']
1078 version_added = ''
1079 alias_filenames = ['*.css']
1080 mimetypes = ['text/css+ruby']
1081 url = 'https://github.com/ruby/erb'
1082
1083 def __init__(self, **options):
1084 super().__init__(CssLexer, ErbLexer, **options)
1085
1086 def analyse_text(text):
1087 return ErbLexer.analyse_text(text) - 0.05
1088
1089
1090class JavascriptErbLexer(DelegatingLexer):
1091 """
1092 Subclass of `ErbLexer` which highlights unlexed data with the
1093 `JavascriptLexer`.
1094 """
1095
1096 name = 'JavaScript+Ruby'
1097 aliases = ['javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb']
1098 version_added = ''
1099 alias_filenames = ['*.js']
1100 mimetypes = ['application/x-javascript+ruby',
1101 'text/x-javascript+ruby',
1102 'text/javascript+ruby']
1103 url = 'https://github.com/ruby/erb'
1104
1105 def __init__(self, **options):
1106 super().__init__(JavascriptLexer, ErbLexer, **options)
1107
1108 def analyse_text(text):
1109 return ErbLexer.analyse_text(text) - 0.05
1110
1111
1112class HtmlPhpLexer(DelegatingLexer):
1113 """
1114 Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`.
1115
1116 Nested Javascript and CSS is highlighted too.
1117 """
1118
1119 name = 'HTML+PHP'
1120 aliases = ['html+php']
1121 filenames = ['*.phtml']
1122 version_added = ''
1123 alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml',
1124 '*.php[345]']
1125 mimetypes = ['application/x-php',
1126 'application/x-httpd-php', 'application/x-httpd-php3',
1127 'application/x-httpd-php4', 'application/x-httpd-php5']
1128 url = 'https://www.php.net'
1129
1130
1131 def __init__(self, **options):
1132 super().__init__(HtmlLexer, PhpLexer, **options)
1133
1134 def analyse_text(text):
1135 rv = PhpLexer.analyse_text(text) - 0.01
1136 if html_doctype_matches(text):
1137 rv += 0.5
1138 return rv
1139
1140
1141class XmlPhpLexer(DelegatingLexer):
1142 """
1143 Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`.
1144 """
1145
1146 name = 'XML+PHP'
1147 aliases = ['xml+php']
1148 version_added = ''
1149 alias_filenames = ['*.xml', '*.php', '*.php[345]']
1150 mimetypes = ['application/xml+php']
1151 url = 'https://www.php.net'
1152
1153 def __init__(self, **options):
1154 super().__init__(XmlLexer, PhpLexer, **options)
1155
1156 def analyse_text(text):
1157 rv = PhpLexer.analyse_text(text) - 0.01
1158 if looks_like_xml(text):
1159 rv += 0.4
1160 return rv
1161
1162
1163class CssPhpLexer(DelegatingLexer):
1164 """
1165 Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`.
1166 """
1167
1168 name = 'CSS+PHP'
1169 aliases = ['css+php']
1170 version_added = ''
1171 alias_filenames = ['*.css']
1172 mimetypes = ['text/css+php']
1173 url = 'https://www.php.net'
1174
1175 def __init__(self, **options):
1176 super().__init__(CssLexer, PhpLexer, **options)
1177
1178 def analyse_text(text):
1179 return PhpLexer.analyse_text(text) - 0.05
1180
1181
1182class JavascriptPhpLexer(DelegatingLexer):
1183 """
1184 Subclass of `PhpLexer` which highlights unmatched data with the
1185 `JavascriptLexer`.
1186 """
1187
1188 name = 'JavaScript+PHP'
1189 aliases = ['javascript+php', 'js+php']
1190 version_added = ''
1191 alias_filenames = ['*.js']
1192 mimetypes = ['application/x-javascript+php',
1193 'text/x-javascript+php',
1194 'text/javascript+php']
1195 url = 'https://www.php.net'
1196
1197 def __init__(self, **options):
1198 super().__init__(JavascriptLexer, PhpLexer, **options)
1199
1200 def analyse_text(text):
1201 return PhpLexer.analyse_text(text)
1202
1203
1204class HtmlSmartyLexer(DelegatingLexer):
1205 """
1206 Subclass of the `SmartyLexer` that highlights unlexed data with the
1207 `HtmlLexer`.
1208
1209 Nested Javascript and CSS is highlighted too.
1210 """
1211
1212 name = 'HTML+Smarty'
1213 aliases = ['html+smarty']
1214 version_added = ''
1215 alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl']
1216 mimetypes = ['text/html+smarty']
1217 url = 'https://www.smarty.net/'
1218
1219 def __init__(self, **options):
1220 super().__init__(HtmlLexer, SmartyLexer, **options)
1221
1222 def analyse_text(text):
1223 rv = SmartyLexer.analyse_text(text) - 0.01
1224 if html_doctype_matches(text):
1225 rv += 0.5
1226 return rv
1227
1228
1229class XmlSmartyLexer(DelegatingLexer):
1230 """
1231 Subclass of the `SmartyLexer` that highlights unlexed data with the
1232 `XmlLexer`.
1233 """
1234
1235 name = 'XML+Smarty'
1236 aliases = ['xml+smarty']
1237 version_added = ''
1238 alias_filenames = ['*.xml', '*.tpl']
1239 mimetypes = ['application/xml+smarty']
1240 url = 'https://www.smarty.net/'
1241
1242 def __init__(self, **options):
1243 super().__init__(XmlLexer, SmartyLexer, **options)
1244
1245 def analyse_text(text):
1246 rv = SmartyLexer.analyse_text(text) - 0.01
1247 if looks_like_xml(text):
1248 rv += 0.4
1249 return rv
1250
1251
1252class CssSmartyLexer(DelegatingLexer):
1253 """
1254 Subclass of the `SmartyLexer` that highlights unlexed data with the
1255 `CssLexer`.
1256 """
1257
1258 name = 'CSS+Smarty'
1259 aliases = ['css+smarty']
1260 version_added = ''
1261 alias_filenames = ['*.css', '*.tpl']
1262 mimetypes = ['text/css+smarty']
1263 url = 'https://www.smarty.net/'
1264
1265 def __init__(self, **options):
1266 super().__init__(CssLexer, SmartyLexer, **options)
1267
1268 def analyse_text(text):
1269 return SmartyLexer.analyse_text(text) - 0.05
1270
1271
1272class JavascriptSmartyLexer(DelegatingLexer):
1273 """
1274 Subclass of the `SmartyLexer` that highlights unlexed data with the
1275 `JavascriptLexer`.
1276 """
1277
1278 name = 'JavaScript+Smarty'
1279 aliases = ['javascript+smarty', 'js+smarty']
1280 version_added = ''
1281 alias_filenames = ['*.js', '*.tpl']
1282 mimetypes = ['application/x-javascript+smarty',
1283 'text/x-javascript+smarty',
1284 'text/javascript+smarty']
1285 url = 'https://www.smarty.net/'
1286
1287 def __init__(self, **options):
1288 super().__init__(JavascriptLexer, SmartyLexer, **options)
1289
1290 def analyse_text(text):
1291 return SmartyLexer.analyse_text(text) - 0.05
1292
1293
1294class HtmlDjangoLexer(DelegatingLexer):
1295 """
1296 Subclass of the `DjangoLexer` that highlights unlexed data with the
1297 `HtmlLexer`.
1298
1299 Nested Javascript and CSS is highlighted too.
1300 """
1301
1302 name = 'HTML+Django/Jinja'
1303 aliases = ['html+django', 'html+jinja', 'htmldjango']
1304 filenames = ['*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2']
1305 version_added = ''
1306 alias_filenames = ['*.html', '*.htm', '*.xhtml']
1307 mimetypes = ['text/html+django', 'text/html+jinja']
1308 url = 'https://www.djangoproject.com/documentation/templates'
1309
1310 def __init__(self, **options):
1311 super().__init__(HtmlLexer, DjangoLexer, **options)
1312
1313 def analyse_text(text):
1314 rv = DjangoLexer.analyse_text(text) - 0.01
1315 if html_doctype_matches(text):
1316 rv += 0.5
1317 return rv
1318
1319
1320class XmlDjangoLexer(DelegatingLexer):
1321 """
1322 Subclass of the `DjangoLexer` that highlights unlexed data with the
1323 `XmlLexer`.
1324 """
1325
1326 name = 'XML+Django/Jinja'
1327 aliases = ['xml+django', 'xml+jinja']
1328 filenames = ['*.xml.j2', '*.xml.jinja2']
1329 version_added = ''
1330 alias_filenames = ['*.xml']
1331 mimetypes = ['application/xml+django', 'application/xml+jinja']
1332 url = 'https://www.djangoproject.com/documentation/templates'
1333
1334 def __init__(self, **options):
1335 super().__init__(XmlLexer, DjangoLexer, **options)
1336
1337 def analyse_text(text):
1338 rv = DjangoLexer.analyse_text(text) - 0.01
1339 if looks_like_xml(text):
1340 rv += 0.4
1341 return rv
1342
1343
1344class CssDjangoLexer(DelegatingLexer):
1345 """
1346 Subclass of the `DjangoLexer` that highlights unlexed data with the
1347 `CssLexer`.
1348 """
1349
1350 name = 'CSS+Django/Jinja'
1351 aliases = ['css+django', 'css+jinja']
1352 filenames = ['*.css.j2', '*.css.jinja2']
1353 version_added = ''
1354 alias_filenames = ['*.css']
1355 mimetypes = ['text/css+django', 'text/css+jinja']
1356 url = 'https://www.djangoproject.com/documentation/templates'
1357
1358 def __init__(self, **options):
1359 super().__init__(CssLexer, DjangoLexer, **options)
1360
1361 def analyse_text(text):
1362 return DjangoLexer.analyse_text(text) - 0.05
1363
1364
1365class JavascriptDjangoLexer(DelegatingLexer):
1366 """
1367 Subclass of the `DjangoLexer` that highlights unlexed data with the
1368 `JavascriptLexer`.
1369 """
1370
1371 name = 'JavaScript+Django/Jinja'
1372 aliases = ['javascript+django', 'js+django',
1373 'javascript+jinja', 'js+jinja']
1374 filenames = ['*.js.j2', '*.js.jinja2']
1375 version_added = ''
1376 alias_filenames = ['*.js']
1377 mimetypes = ['application/x-javascript+django',
1378 'application/x-javascript+jinja',
1379 'text/x-javascript+django',
1380 'text/x-javascript+jinja',
1381 'text/javascript+django',
1382 'text/javascript+jinja']
1383 url = 'https://www.djangoproject.com/documentation/templates'
1384
1385 def __init__(self, **options):
1386 super().__init__(JavascriptLexer, DjangoLexer, **options)
1387
1388 def analyse_text(text):
1389 return DjangoLexer.analyse_text(text) - 0.05
1390
1391
1392class JspRootLexer(RegexLexer):
1393 """
1394 Base for the `JspLexer`. Yields `Token.Other` for area outside of
1395 JSP tags.
1396
1397 .. versionadded:: 0.7
1398 """
1399
1400 tokens = {
1401 'root': [
1402 (r'<%\S?', Keyword, 'sec'),
1403 # FIXME: I want to make these keywords but still parse attributes.
1404 (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>',
1405 Keyword),
1406 (r'[^<]+', Other),
1407 (r'<', Other),
1408 ],
1409 'sec': [
1410 (r'%>', Keyword, '#pop'),
1411 # note: '\w\W' != '.' without DOTALL.
1412 (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)),
1413 ],
1414 }
1415
1416
1417class JspLexer(DelegatingLexer):
1418 """
1419 Lexer for Java Server Pages.
1420 """
1421 name = 'Java Server Page'
1422 aliases = ['jsp']
1423 filenames = ['*.jsp']
1424 mimetypes = ['application/x-jsp']
1425 url = 'https://projects.eclipse.org/projects/ee4j.jsp'
1426 version_added = '0.7'
1427
1428 def __init__(self, **options):
1429 super().__init__(XmlLexer, JspRootLexer, **options)
1430
1431 def analyse_text(text):
1432 rv = JavaLexer.analyse_text(text) - 0.01
1433 if looks_like_xml(text):
1434 rv += 0.4
1435 if '<%' in text and '%>' in text:
1436 rv += 0.1
1437 return rv
1438
1439
1440class EvoqueLexer(RegexLexer):
1441 """
1442 For files using the Evoque templating system.
1443 """
1444 name = 'Evoque'
1445 aliases = ['evoque']
1446 filenames = ['*.evoque']
1447 mimetypes = ['application/x-evoque']
1448 url = 'https://gizmojo.org/templating'
1449 version_added = '1.1'
1450
1451 flags = re.DOTALL
1452
1453 tokens = {
1454 'root': [
1455 (r'[^#$]+', Other),
1456 (r'#\[', Comment.Multiline, 'comment'),
1457 (r'\$\$', Other),
1458 # svn keywords
1459 (r'\$\w+:[^$\n]*\$', Comment.Multiline),
1460 # directives: begin, end
1461 (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})',
1462 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1463 String, Punctuation)),
1464 # directives: evoque, overlay
1465 # see doc for handling first name arg: /directives/evoque/
1466 # + minor inconsistency: the "name" in e.g. $overlay{name=site_base}
1467 # should be using(PythonLexer), not passed out as String
1468 (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?'
1469 r'(.*?)((?(4)%)\})',
1470 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1471 String, using(PythonLexer), Punctuation)),
1472 # directives: if, for, prefer, test
1473 (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})',
1474 bygroups(Punctuation, Name.Builtin, Punctuation, None,
1475 using(PythonLexer), Punctuation)),
1476 # directive clauses (no {} expression)
1477 (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)),
1478 # expressions
1479 (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})',
1480 bygroups(Punctuation, None, using(PythonLexer),
1481 Name.Builtin, None, None, Punctuation)),
1482 (r'#', Other),
1483 ],
1484 'comment': [
1485 (r'[^\]#]', Comment.Multiline),
1486 (r'#\[', Comment.Multiline, '#push'),
1487 (r'\]#', Comment.Multiline, '#pop'),
1488 (r'[\]#]', Comment.Multiline)
1489 ],
1490 }
1491
1492 def analyse_text(text):
1493 """Evoque templates use $evoque, which is unique."""
1494 if '$evoque' in text:
1495 return 1
1496
1497class EvoqueHtmlLexer(DelegatingLexer):
1498 """
1499 Subclass of the `EvoqueLexer` that highlights unlexed data with the
1500 `HtmlLexer`.
1501 """
1502 name = 'HTML+Evoque'
1503 aliases = ['html+evoque']
1504 alias_filenames = ['*.html']
1505 mimetypes = ['text/html+evoque']
1506 url = 'https://gizmojo.org/templating'
1507 version_added = '1.1'
1508
1509 def __init__(self, **options):
1510 super().__init__(HtmlLexer, EvoqueLexer, **options)
1511
1512 def analyse_text(text):
1513 return EvoqueLexer.analyse_text(text)
1514
1515
1516class EvoqueXmlLexer(DelegatingLexer):
1517 """
1518 Subclass of the `EvoqueLexer` that highlights unlexed data with the
1519 `XmlLexer`.
1520 """
1521 name = 'XML+Evoque'
1522 aliases = ['xml+evoque']
1523 alias_filenames = ['*.xml']
1524 mimetypes = ['application/xml+evoque']
1525 url = 'https://gizmojo.org/templating'
1526 version_added = '1.1'
1527
1528 def __init__(self, **options):
1529 super().__init__(XmlLexer, EvoqueLexer, **options)
1530
1531 def analyse_text(text):
1532 return EvoqueLexer.analyse_text(text)
1533
1534
1535class ColdfusionLexer(RegexLexer):
1536 """
1537 Coldfusion statements
1538 """
1539 name = 'cfstatement'
1540 aliases = ['cfs']
1541 filenames = []
1542 mimetypes = []
1543 url = 'https://www.adobe.com/products/coldfusion-family.html'
1544 version_added = ''
1545
1546 flags = re.IGNORECASE
1547
1548 tokens = {
1549 'root': [
1550 (r'//.*?\n', Comment.Single),
1551 (r'/\*(?:.|\n)*?\*/', Comment.Multiline),
1552 (r'\+\+|--', Operator),
1553 (r'[-+*/^&=!]', Operator),
1554 (r'<=|>=|<|>|==', Operator),
1555 (r'mod\b', Operator),
1556 (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator),
1557 (r'\|\||&&', Operator),
1558 (r'\?', Operator),
1559 (r'"', String.Double, 'string'),
1560 # There is a special rule for allowing html in single quoted
1561 # strings, evidently.
1562 (r"'.*?'", String.Single),
1563 (r'\d+', Number),
1564 (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|'
1565 r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|'
1566 r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword),
1567 (r'(true|false|null)\b', Keyword.Constant),
1568 (r'(application|session|client|cookie|super|this|variables|arguments)\b',
1569 Name.Constant),
1570 (r'([a-z_$][\w.]*)(\s*)(\()',
1571 bygroups(Name.Function, Text, Punctuation)),
1572 (r'[a-z_$][\w.]*', Name.Variable),
1573 (r'[()\[\]{};:,.\\]', Punctuation),
1574 (r'\s+', Text),
1575 ],
1576 'string': [
1577 (r'""', String.Double),
1578 (r'#.+?#', String.Interp),
1579 (r'[^"#]+', String.Double),
1580 (r'#', String.Double),
1581 (r'"', String.Double, '#pop'),
1582 ],
1583 }
1584
1585
1586class ColdfusionMarkupLexer(RegexLexer):
1587 """
1588 Coldfusion markup only
1589 """
1590 name = 'Coldfusion'
1591 aliases = ['cf']
1592 filenames = []
1593 mimetypes = []
1594 url = 'https://www.adobe.com/products/coldfusion-family.html'
1595
1596 tokens = {
1597 'root': [
1598 (r'[^<]+', Other),
1599 include('tags'),
1600 (r'<[^<>]*', Other),
1601 ],
1602 'tags': [
1603 (r'<!---', Comment.Multiline, 'cfcomment'),
1604 (r'(?s)<!--.*?-->', Comment),
1605 (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'),
1606 (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)',
1607 bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1608 # negative lookbehind is for strings with embedded >
1609 (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|'
1610 r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|'
1611 r'mailpart|mail|header|content|zip|image|lock|argument|try|'
1612 r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)',
1613 bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)),
1614 ],
1615 'cfoutput': [
1616 (r'[^#<]+', Other),
1617 (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer),
1618 Punctuation)),
1619 # (r'<cfoutput.*?>', Name.Builtin, '#push'),
1620 (r'</cfoutput.*?>', Name.Builtin, '#pop'),
1621 include('tags'),
1622 (r'(?s)<[^<>]*', Other),
1623 (r'#', Other),
1624 ],
1625 'cfcomment': [
1626 (r'<!---', Comment.Multiline, '#push'),
1627 (r'--->', Comment.Multiline, '#pop'),
1628 (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline),
1629 ],
1630 }
1631
1632
1633class ColdfusionHtmlLexer(DelegatingLexer):
1634 """
1635 Coldfusion markup in html
1636 """
1637 name = 'Coldfusion HTML'
1638 aliases = ['cfm']
1639 filenames = ['*.cfm', '*.cfml']
1640 mimetypes = ['application/x-coldfusion']
1641 url = 'https://www.adobe.com/products/coldfusion-family.html'
1642 version_added = ''
1643
1644 def __init__(self, **options):
1645 super().__init__(HtmlLexer, ColdfusionMarkupLexer, **options)
1646
1647
1648class ColdfusionCFCLexer(DelegatingLexer):
1649 """
1650 Coldfusion markup/script components
1651 """
1652 name = 'Coldfusion CFC'
1653 aliases = ['cfc']
1654 filenames = ['*.cfc']
1655 mimetypes = []
1656 url = 'https://www.adobe.com/products/coldfusion-family.html'
1657 version_added = '2.0'
1658
1659 def __init__(self, **options):
1660 super().__init__(ColdfusionHtmlLexer, ColdfusionLexer, **options)
1661
1662
1663class SspLexer(DelegatingLexer):
1664 """
1665 Lexer for Scalate Server Pages.
1666 """
1667 name = 'Scalate Server Page'
1668 aliases = ['ssp']
1669 filenames = ['*.ssp']
1670 mimetypes = ['application/x-ssp']
1671 url = 'https://scalate.github.io/scalate/'
1672 version_added = '1.4'
1673
1674 def __init__(self, **options):
1675 super().__init__(XmlLexer, JspRootLexer, **options)
1676
1677 def analyse_text(text):
1678 rv = 0.0
1679 if re.search(r'val \w+\s*:', text):
1680 rv += 0.6
1681 if looks_like_xml(text):
1682 rv += 0.2
1683 if '<%' in text and '%>' in text:
1684 rv += 0.1
1685 return rv
1686
1687
1688class TeaTemplateRootLexer(RegexLexer):
1689 """
1690 Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of
1691 code blocks.
1692
1693 .. versionadded:: 1.5
1694 """
1695
1696 tokens = {
1697 'root': [
1698 (r'<%\S?', Keyword, 'sec'),
1699 (r'[^<]+', Other),
1700 (r'<', Other),
1701 ],
1702 'sec': [
1703 (r'%>', Keyword, '#pop'),
1704 # note: '\w\W' != '.' without DOTALL.
1705 (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)),
1706 ],
1707 }
1708
1709
1710class TeaTemplateLexer(DelegatingLexer):
1711 """
1712 Lexer for Tea Templates.
1713 """
1714 name = 'Tea'
1715 aliases = ['tea']
1716 filenames = ['*.tea']
1717 mimetypes = ['text/x-tea']
1718 url = 'https://github.com/teatrove/teatrove'
1719 version_added = '1.5'
1720
1721 def __init__(self, **options):
1722 super().__init__(XmlLexer, TeaTemplateRootLexer, **options)
1723
1724 def analyse_text(text):
1725 rv = TeaLangLexer.analyse_text(text) - 0.01
1726 if looks_like_xml(text):
1727 rv += 0.4
1728 if '<%' in text and '%>' in text:
1729 rv += 0.1
1730 return rv
1731
1732
1733class LassoHtmlLexer(DelegatingLexer):
1734 """
1735 Subclass of the `LassoLexer` which highlights unhandled data with the
1736 `HtmlLexer`.
1737
1738 Nested JavaScript and CSS is also highlighted.
1739 """
1740
1741 name = 'HTML+Lasso'
1742 aliases = ['html+lasso']
1743 version_added = '1.6'
1744 alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]',
1745 '*.incl', '*.inc', '*.las']
1746 mimetypes = ['text/html+lasso',
1747 'application/x-httpd-lasso',
1748 'application/x-httpd-lasso[89]']
1749 url = 'https://www.lassosoft.com'
1750
1751 def __init__(self, **options):
1752 super().__init__(HtmlLexer, LassoLexer, **options)
1753
1754 def analyse_text(text):
1755 rv = LassoLexer.analyse_text(text) - 0.01
1756 if html_doctype_matches(text): # same as HTML lexer
1757 rv += 0.5
1758 return rv
1759
1760
1761class LassoXmlLexer(DelegatingLexer):
1762 """
1763 Subclass of the `LassoLexer` which highlights unhandled data with the
1764 `XmlLexer`.
1765 """
1766
1767 name = 'XML+Lasso'
1768 aliases = ['xml+lasso']
1769 version_added = '1.6'
1770 alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]',
1771 '*.incl', '*.inc', '*.las']
1772 mimetypes = ['application/xml+lasso']
1773 url = 'https://www.lassosoft.com'
1774
1775 def __init__(self, **options):
1776 super().__init__(XmlLexer, LassoLexer, **options)
1777
1778 def analyse_text(text):
1779 rv = LassoLexer.analyse_text(text) - 0.01
1780 if looks_like_xml(text):
1781 rv += 0.4
1782 return rv
1783
1784
1785class LassoCssLexer(DelegatingLexer):
1786 """
1787 Subclass of the `LassoLexer` which highlights unhandled data with the
1788 `CssLexer`.
1789 """
1790
1791 name = 'CSS+Lasso'
1792 aliases = ['css+lasso']
1793 version_added = '1.6'
1794 alias_filenames = ['*.css']
1795 mimetypes = ['text/css+lasso']
1796 url = 'https://www.lassosoft.com'
1797
1798 def __init__(self, **options):
1799 options['requiredelimiters'] = True
1800 super().__init__(CssLexer, LassoLexer, **options)
1801
1802 def analyse_text(text):
1803 rv = LassoLexer.analyse_text(text) - 0.05
1804 if re.search(r'\w+:[^;]+;', text):
1805 rv += 0.1
1806 if 'padding:' in text:
1807 rv += 0.1
1808 return rv
1809
1810
1811class LassoJavascriptLexer(DelegatingLexer):
1812 """
1813 Subclass of the `LassoLexer` which highlights unhandled data with the
1814 `JavascriptLexer`.
1815 """
1816
1817 name = 'JavaScript+Lasso'
1818 aliases = ['javascript+lasso', 'js+lasso']
1819 version_added = '1.6'
1820 alias_filenames = ['*.js']
1821 mimetypes = ['application/x-javascript+lasso',
1822 'text/x-javascript+lasso',
1823 'text/javascript+lasso']
1824 url = 'https://www.lassosoft.com'
1825
1826 def __init__(self, **options):
1827 options['requiredelimiters'] = True
1828 super().__init__(JavascriptLexer, LassoLexer, **options)
1829
1830 def analyse_text(text):
1831 rv = LassoLexer.analyse_text(text) - 0.05
1832 return rv
1833
1834
1835class HandlebarsLexer(RegexLexer):
1836 """
1837 Generic handlebars template lexer.
1838
1839 Highlights only the Handlebars template tags (stuff between `{{` and `}}`).
1840 Everything else is left for a delegating lexer.
1841 """
1842
1843 name = "Handlebars"
1844 url = 'https://handlebarsjs.com/'
1845 aliases = ['handlebars']
1846 version_added = '2.0'
1847
1848 tokens = {
1849 'root': [
1850 (r'[^{]+', Other),
1851
1852 # Comment start {{! }} or {{!--
1853 (r'\{\{!.*\}\}', Comment),
1854
1855 # HTML Escaping open {{{expression
1856 (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'),
1857
1858 # {{blockOpen {{#blockOpen {{/blockClose with optional tilde ~
1859 (r'(\{\{)([#~/]+)([^\s}]*)',
1860 bygroups(Comment.Preproc, Number.Attribute, Number.Attribute), 'tag'),
1861 (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'),
1862 ],
1863
1864 'tag': [
1865 (r'\s+', Text),
1866 # HTML Escaping close }}}
1867 (r'\}\}\}', Comment.Special, '#pop'),
1868 # blockClose}}, includes optional tilde ~
1869 (r'(~?)(\}\})', bygroups(Number, Comment.Preproc), '#pop'),
1870
1871 # {{opt=something}}
1872 (r'([^\s}]+)(=)', bygroups(Name.Attribute, Operator)),
1873
1874 # Partials {{> ...}}
1875 (r'(>)(\s*)(@partial-block)', bygroups(Keyword, Text, Keyword)),
1876 (r'(#?>)(\s*)([\w-]+)', bygroups(Keyword, Text, Name.Variable)),
1877 (r'(>)(\s*)(\()', bygroups(Keyword, Text, Punctuation),
1878 'dynamic-partial'),
1879
1880 include('generic'),
1881 ],
1882 'dynamic-partial': [
1883 (r'\s+', Text),
1884 (r'\)', Punctuation, '#pop'),
1885
1886 (r'(lookup)(\s+)(\.|this)(\s+)', bygroups(Keyword, Text,
1887 Name.Variable, Text)),
1888 (r'(lookup)(\s+)(\S+)', bygroups(Keyword, Text,
1889 using(this, state='variable'))),
1890 (r'[\w-]+', Name.Function),
1891
1892 include('generic'),
1893 ],
1894 'variable': [
1895 (r'[()/@a-zA-Z][\w-]*', Name.Variable),
1896 (r'\.[\w-]+', Name.Variable),
1897 (r'(this\/|\.\/|(\.\.\/)+)[\w-]+', Name.Variable),
1898 ],
1899 'generic': [
1900 include('variable'),
1901
1902 # borrowed from DjangoLexer
1903 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
1904 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
1905 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
1906 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
1907 ]
1908 }
1909
1910
1911class HandlebarsHtmlLexer(DelegatingLexer):
1912 """
1913 Subclass of the `HandlebarsLexer` that highlights unlexed data with the
1914 `HtmlLexer`.
1915 """
1916
1917 name = "HTML+Handlebars"
1918 aliases = ["html+handlebars"]
1919 filenames = ['*.handlebars', '*.hbs']
1920 mimetypes = ['text/html+handlebars', 'text/x-handlebars-template']
1921 url = 'https://handlebarsjs.com/'
1922 version_added = '2.0'
1923
1924 def __init__(self, **options):
1925 super().__init__(HtmlLexer, HandlebarsLexer, **options)
1926
1927
1928class YamlJinjaLexer(DelegatingLexer):
1929 """
1930 Subclass of the `DjangoLexer` that highlights unlexed data with the
1931 `YamlLexer`.
1932
1933 Commonly used in Saltstack salt states.
1934 """
1935
1936 name = 'YAML+Jinja'
1937 aliases = ['yaml+jinja', 'salt', 'sls']
1938 filenames = ['*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2']
1939 mimetypes = ['text/x-yaml+jinja', 'text/x-sls']
1940 url = 'https://jinja.palletsprojects.com'
1941 version_added = '2.0'
1942
1943 def __init__(self, **options):
1944 super().__init__(YamlLexer, DjangoLexer, **options)
1945
1946
1947class LiquidLexer(RegexLexer):
1948 """
1949 Lexer for Liquid templates.
1950 """
1951 name = 'liquid'
1952 url = 'https://www.rubydoc.info/github/Shopify/liquid'
1953 aliases = ['liquid']
1954 filenames = ['*.liquid']
1955 version_added = '2.0'
1956
1957 tokens = {
1958 'root': [
1959 (r'[^{]+', Text),
1960 # tags and block tags
1961 (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'),
1962 # output tags
1963 (r'(\{\{)(\s*)([^\s}]+)',
1964 bygroups(Punctuation, Whitespace, using(this, state = 'generic')),
1965 'output'),
1966 (r'\{', Text)
1967 ],
1968
1969 'tag-or-block': [
1970 # builtin logic blocks
1971 (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'),
1972 (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace),
1973 combined('end-of-block', 'whitespace', 'generic')),
1974 (r'(else)(\s*)(%\})',
1975 bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'),
1976
1977 # other builtin blocks
1978 (r'(capture)(\s+)([^\s%]+)(\s*)(%\})',
1979 bygroups(Name.Tag, Whitespace, using(this, state = 'variable'),
1980 Whitespace, Punctuation), '#pop'),
1981 (r'(comment)(\s*)(%\})',
1982 bygroups(Name.Tag, Whitespace, Punctuation), 'comment'),
1983 (r'(raw)(\s*)(%\})',
1984 bygroups(Name.Tag, Whitespace, Punctuation), 'raw'),
1985
1986 # end of block
1987 (r'(end(case|unless|if))(\s*)(%\})',
1988 bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'),
1989 (r'(end([^\s%]+))(\s*)(%\})',
1990 bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'),
1991
1992 # builtin tags (assign and include are handled together with usual tags)
1993 (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)',
1994 bygroups(Name.Tag, Whitespace,
1995 using(this, state='generic'), Punctuation, Whitespace),
1996 'variable-tag-markup'),
1997
1998 # other tags or blocks
1999 (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup')
2000 ],
2001
2002 'output': [
2003 include('whitespace'),
2004 (r'\}\}', Punctuation, '#pop'), # end of output
2005
2006 (r'\|', Punctuation, 'filters')
2007 ],
2008
2009 'filters': [
2010 include('whitespace'),
2011 (r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output
2012
2013 (r'([^\s|:]+)(:?)(\s*)',
2014 bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup')
2015 ],
2016
2017 'filter-markup': [
2018 (r'\|', Punctuation, '#pop'),
2019 include('end-of-tag'),
2020 include('default-param-markup')
2021 ],
2022
2023 'condition': [
2024 include('end-of-block'),
2025 include('whitespace'),
2026
2027 (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})',
2028 bygroups(using(this, state = 'generic'), Whitespace, Operator,
2029 Whitespace, using(this, state = 'generic'), Whitespace,
2030 Punctuation)),
2031 (r'\b!', Operator),
2032 (r'\bnot\b', Operator.Word),
2033 (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)',
2034 bygroups(using(this, state = 'generic'), Whitespace, Operator.Word,
2035 Whitespace, using(this, state = 'generic'))),
2036
2037 include('generic'),
2038 include('whitespace')
2039 ],
2040
2041 'generic-value': [
2042 include('generic'),
2043 include('end-at-whitespace')
2044 ],
2045
2046 'operator': [
2047 (r'(\s*)((=|!|>|<)=?)(\s*)',
2048 bygroups(Whitespace, Operator, None, Whitespace), '#pop'),
2049 (r'(\s*)(\bcontains\b)(\s*)',
2050 bygroups(Whitespace, Operator.Word, Whitespace), '#pop'),
2051 ],
2052
2053 'end-of-tag': [
2054 (r'\}\}', Punctuation, '#pop')
2055 ],
2056
2057 'end-of-block': [
2058 (r'%\}', Punctuation, ('#pop', '#pop'))
2059 ],
2060
2061 'end-at-whitespace': [
2062 (r'\s+', Whitespace, '#pop')
2063 ],
2064
2065 # states for unknown markup
2066 'param-markup': [
2067 include('whitespace'),
2068 # params with colons or equals
2069 (r'([^\s=:]+)(\s*)(=|:)',
2070 bygroups(Name.Attribute, Whitespace, Operator)),
2071 # explicit variables
2072 (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})',
2073 bygroups(Punctuation, Whitespace, using(this, state = 'variable'),
2074 Whitespace, Punctuation)),
2075
2076 include('string'),
2077 include('number'),
2078 include('keyword'),
2079 (r',', Punctuation)
2080 ],
2081
2082 'default-param-markup': [
2083 include('param-markup'),
2084 (r'.', Text) # fallback for switches / variables / un-quoted strings / ...
2085 ],
2086
2087 'variable-param-markup': [
2088 include('param-markup'),
2089 include('variable'),
2090 (r'.', Text) # fallback
2091 ],
2092
2093 'tag-markup': [
2094 (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
2095 include('default-param-markup')
2096 ],
2097
2098 'variable-tag-markup': [
2099 (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag
2100 include('variable-param-markup')
2101 ],
2102
2103 # states for different values types
2104 'keyword': [
2105 (r'\b(false|true)\b', Keyword.Constant)
2106 ],
2107
2108 'variable': [
2109 (r'[a-zA-Z_]\w*', Name.Variable),
2110 (r'(?<=\w)\.(?=\w)', Punctuation)
2111 ],
2112
2113 'string': [
2114 (r"'[^']*'", String.Single),
2115 (r'"[^"]*"', String.Double)
2116 ],
2117
2118 'number': [
2119 (r'\d+\.\d+', Number.Float),
2120 (r'\d+', Number.Integer)
2121 ],
2122
2123 'generic': [ # decides for variable, string, keyword or number
2124 include('keyword'),
2125 include('string'),
2126 include('number'),
2127 include('variable')
2128 ],
2129
2130 'whitespace': [
2131 (r'[ \t]+', Whitespace)
2132 ],
2133
2134 # states for builtin blocks
2135 'comment': [
2136 (r'(\{%)(\s*)(endcomment)(\s*)(%\})',
2137 bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
2138 Punctuation), ('#pop', '#pop')),
2139 (r'.', Comment)
2140 ],
2141
2142 'raw': [
2143 (r'[^{]+', Text),
2144 (r'(\{%)(\s*)(endraw)(\s*)(%\})',
2145 bygroups(Punctuation, Whitespace, Name.Tag, Whitespace,
2146 Punctuation), '#pop'),
2147 (r'\{', Text)
2148 ],
2149 }
2150
2151
2152class TwigLexer(RegexLexer):
2153 """
2154 Twig template lexer.
2155
2156 It just highlights Twig code between the preprocessor directives,
2157 other data is left untouched by the lexer.
2158 """
2159
2160 name = 'Twig'
2161 aliases = ['twig']
2162 mimetypes = ['application/x-twig']
2163 url = 'https://twig.symfony.com'
2164 version_added = '2.0'
2165
2166 flags = re.M | re.S
2167
2168 # Note that a backslash is included in the following two patterns
2169 # PHP uses a backslash as a namespace separator
2170 _ident_char = r'[\\\w-]|[^\x00-\x7f]'
2171 _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])'
2172 _ident_end = r'(?:' + _ident_char + ')*'
2173 _ident_inner = _ident_begin + _ident_end
2174
2175 tokens = {
2176 'root': [
2177 (r'[^{]+', Other),
2178 (r'\{\{', Comment.Preproc, 'var'),
2179 # twig comments
2180 (r'\{\#.*?\#\}', Comment),
2181 # raw twig blocks
2182 (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)'
2183 r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})',
2184 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
2185 Other, Comment.Preproc, Text, Keyword, Text,
2186 Comment.Preproc)),
2187 (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)'
2188 r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})',
2189 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc,
2190 Other, Comment.Preproc, Text, Keyword, Text,
2191 Comment.Preproc)),
2192 # filter blocks
2193 (rf'(\{{%)(-?\s*)(filter)(\s+)({_ident_inner})',
2194 bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function),
2195 'tag'),
2196 (r'(\{%)(-?\s*)([a-zA-Z_]\w*)',
2197 bygroups(Comment.Preproc, Text, Keyword), 'tag'),
2198 (r'\{', Other),
2199 ],
2200 'varnames': [
2201 (rf'(\|)(\s*)({_ident_inner})',
2202 bygroups(Operator, Text, Name.Function)),
2203 (rf'(is)(\s+)(not)?(\s*)({_ident_inner})',
2204 bygroups(Keyword, Text, Keyword, Text, Name.Function)),
2205 (r'(?i)(true|false|none|null)\b', Keyword.Pseudo),
2206 (r'(in|not|and|b-and|or|b-or|b-xor|is'
2207 r'if|elseif|else|import'
2208 r'constant|defined|divisibleby|empty|even|iterable|odd|sameas'
2209 r'matches|starts\s+with|ends\s+with)\b',
2210 Keyword),
2211 (r'(loop|block|parent)\b', Name.Builtin),
2212 (_ident_inner, Name.Variable),
2213 (r'\.' + _ident_inner, Name.Variable),
2214 (r'\.[0-9]+', Number),
2215 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
2216 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
2217 (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator),
2218 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
2219 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
2220 ],
2221 'var': [
2222 (r'\s+', Text),
2223 (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'),
2224 include('varnames')
2225 ],
2226 'tag': [
2227 (r'\s+', Text),
2228 (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'),
2229 include('varnames'),
2230 (r'.', Punctuation),
2231 ],
2232 }
2233
2234
2235class TwigHtmlLexer(DelegatingLexer):
2236 """
2237 Subclass of the `TwigLexer` that highlights unlexed data with the
2238 `HtmlLexer`.
2239 """
2240
2241 name = "HTML+Twig"
2242 aliases = ["html+twig"]
2243 filenames = ['*.twig']
2244 mimetypes = ['text/html+twig']
2245 url = 'https://twig.symfony.com'
2246 version_added = '2.0'
2247
2248 def __init__(self, **options):
2249 super().__init__(HtmlLexer, TwigLexer, **options)
2250
2251
2252class Angular2Lexer(RegexLexer):
2253 """
2254 Generic angular2 template lexer.
2255
2256 Highlights only the Angular template tags (stuff between `{{` and `}}` and
2257 special attributes: '(event)=', '[property]=', '[(twoWayBinding)]=').
2258 Everything else is left for a delegating lexer.
2259 """
2260
2261 name = "Angular2"
2262 url = 'https://angular.io/guide/template-syntax'
2263 aliases = ['ng2']
2264 version_added = '2.1'
2265
2266 tokens = {
2267 'root': [
2268 (r'[^{([*#]+', Other),
2269
2270 # {{meal.name}}
2271 (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'),
2272
2273 # (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar"
2274 (r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)',
2275 bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text),
2276 'attr'),
2277 (r'([([]+)([\w:.-]+)([\])]+)(\s*)',
2278 bygroups(Punctuation, Name.Attribute, Punctuation, Text)),
2279
2280 # *ngIf="..."; #f="ngForm"
2281 (r'([*#])([\w:.-]+)(\s*)(=)(\s*)',
2282 bygroups(Punctuation, Name.Attribute, Text, Operator, Text), 'attr'),
2283 (r'([*#])([\w:.-]+)(\s*)',
2284 bygroups(Punctuation, Name.Attribute, Text)),
2285 ],
2286
2287 'ngExpression': [
2288 (r'\s+(\|\s+)?', Text),
2289 (r'\}\}', Comment.Preproc, '#pop'),
2290
2291 # Literals
2292 (r':?(true|false)', String.Boolean),
2293 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double),
2294 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single),
2295 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|"
2296 r"0[xX][0-9a-fA-F]+[Ll]?", Number),
2297
2298 # Variabletext
2299 (r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable),
2300 (r'\.[\w-]+(\(.*\))?', Name.Variable),
2301
2302 # inline If
2303 (r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)',
2304 bygroups(Operator, Text, String, Text, Operator, Text, String, Text)),
2305 ],
2306 'attr': [
2307 ('".*?"', String, '#pop'),
2308 ("'.*?'", String, '#pop'),
2309 (r'[^\s>]+', String, '#pop'),
2310 ],
2311 }
2312
2313
2314class Angular2HtmlLexer(DelegatingLexer):
2315 """
2316 Subclass of the `Angular2Lexer` that highlights unlexed data with the
2317 `HtmlLexer`.
2318 """
2319
2320 name = "HTML + Angular2"
2321 aliases = ["html+ng2"]
2322 filenames = ['*.ng2']
2323 url = 'https://angular.io/guide/template-syntax'
2324 version_added = '2.0'
2325
2326 def __init__(self, **options):
2327 super().__init__(HtmlLexer, Angular2Lexer, **options)
2328
2329
2330class SqlJinjaLexer(DelegatingLexer):
2331 """
2332 Templated SQL lexer.
2333 """
2334
2335 name = 'SQL+Jinja'
2336 aliases = ['sql+jinja']
2337 filenames = ['*.sql', '*.sql.j2', '*.sql.jinja2']
2338 url = 'https://jinja.palletsprojects.com'
2339 version_added = '2.13'
2340
2341 def __init__(self, **options):
2342 super().__init__(SqlLexer, DjangoLexer, **options)
2343
2344 def analyse_text(text):
2345 rv = 0.0
2346 # dbt's ref function
2347 if re.search(r'\{\{\s*ref\(.*\)\s*\}\}', text):
2348 rv += 0.4
2349 # dbt's source function
2350 if re.search(r'\{\{\s*source\(.*\)\s*\}\}', text):
2351 rv += 0.25
2352 # Jinja macro
2353 if re.search(r'\{%-?\s*macro \w+\(.*\)\s*-?%\}', text):
2354 rv += 0.15
2355 return rv