Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pygments/lexers/templates.py: 66%

708 statements  

« prev     ^ index     » next       coverage.py v7.2.7, created at 2023-07-01 06:54 +0000

1""" 

2 pygments.lexers.templates 

3 ~~~~~~~~~~~~~~~~~~~~~~~~~ 

4 

5 Lexers for various template engines' markup. 

6 

7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS. 

8 :license: BSD, see LICENSE for details. 

9""" 

10 

11import re 

12 

13from pygments.lexers.html import HtmlLexer, XmlLexer 

14from pygments.lexers.javascript import JavascriptLexer, LassoLexer 

15from pygments.lexers.css import CssLexer 

16from pygments.lexers.php import PhpLexer 

17from pygments.lexers.python import PythonLexer 

18from pygments.lexers.perl import PerlLexer 

19from pygments.lexers.jvm import JavaLexer, TeaLangLexer 

20from pygments.lexers.data import YamlLexer 

21from pygments.lexers.sql import SqlLexer 

22from pygments.lexer import Lexer, DelegatingLexer, RegexLexer, bygroups, \ 

23 include, using, this, default, combined 

24from pygments.token import Error, Punctuation, Whitespace, \ 

25 Text, Comment, Operator, Keyword, Name, String, Number, Other, Token 

26from pygments.util import html_doctype_matches, looks_like_xml 

27 

28__all__ = ['HtmlPhpLexer', 'XmlPhpLexer', 'CssPhpLexer', 

29 'JavascriptPhpLexer', 'ErbLexer', 'RhtmlLexer', 

30 'XmlErbLexer', 'CssErbLexer', 'JavascriptErbLexer', 

31 'SmartyLexer', 'HtmlSmartyLexer', 'XmlSmartyLexer', 

32 'CssSmartyLexer', 'JavascriptSmartyLexer', 'DjangoLexer', 

33 'HtmlDjangoLexer', 'CssDjangoLexer', 'XmlDjangoLexer', 

34 'JavascriptDjangoLexer', 'GenshiLexer', 'HtmlGenshiLexer', 

35 'GenshiTextLexer', 'CssGenshiLexer', 'JavascriptGenshiLexer', 

36 'MyghtyLexer', 'MyghtyHtmlLexer', 'MyghtyXmlLexer', 

37 'MyghtyCssLexer', 'MyghtyJavascriptLexer', 'MasonLexer', 'MakoLexer', 

38 'MakoHtmlLexer', 'MakoXmlLexer', 'MakoJavascriptLexer', 

39 'MakoCssLexer', 'JspLexer', 'CheetahLexer', 'CheetahHtmlLexer', 

40 'CheetahXmlLexer', 'CheetahJavascriptLexer', 'EvoqueLexer', 

41 'EvoqueHtmlLexer', 'EvoqueXmlLexer', 'ColdfusionLexer', 

42 'ColdfusionHtmlLexer', 'ColdfusionCFCLexer', 'VelocityLexer', 

43 'VelocityHtmlLexer', 'VelocityXmlLexer', 'SspLexer', 

44 'TeaTemplateLexer', 'LassoHtmlLexer', 'LassoXmlLexer', 

45 'LassoCssLexer', 'LassoJavascriptLexer', 'HandlebarsLexer', 

46 'HandlebarsHtmlLexer', 'YamlJinjaLexer', 'LiquidLexer', 

47 'TwigLexer', 'TwigHtmlLexer', 'Angular2Lexer', 'Angular2HtmlLexer', 

48 'SqlJinjaLexer'] 

49 

50 

51class ErbLexer(Lexer): 

52 """ 

53 Generic ERB (Ruby Templating) lexer. 

54 

55 Just highlights ruby code between the preprocessor directives, other data 

56 is left untouched by the lexer. 

57 

58 All options are also forwarded to the `RubyLexer`. 

59 """ 

60 

61 name = 'ERB' 

62 url = 'https://github.com/ruby/erb' 

63 aliases = ['erb'] 

64 mimetypes = ['application/x-ruby-templating'] 

65 

66 _block_re = re.compile(r'(<%%|%%>|<%=|<%#|<%-|<%|-%>|%>|^%[^%].*?$)', re.M) 

67 

68 def __init__(self, **options): 

69 from pygments.lexers.ruby import RubyLexer 

70 self.ruby_lexer = RubyLexer(**options) 

71 Lexer.__init__(self, **options) 

72 

73 def get_tokens_unprocessed(self, text): 

74 """ 

75 Since ERB doesn't allow "<%" and other tags inside of ruby 

76 blocks we have to use a split approach here that fails for 

77 that too. 

78 """ 

79 tokens = self._block_re.split(text) 

80 tokens.reverse() 

81 state = idx = 0 

82 try: 

83 while True: 

84 # text 

85 if state == 0: 

86 val = tokens.pop() 

87 yield idx, Other, val 

88 idx += len(val) 

89 state = 1 

90 # block starts 

91 elif state == 1: 

92 tag = tokens.pop() 

93 # literals 

94 if tag in ('<%%', '%%>'): 

95 yield idx, Other, tag 

96 idx += 3 

97 state = 0 

98 # comment 

99 elif tag == '<%#': 

100 yield idx, Comment.Preproc, tag 

101 val = tokens.pop() 

102 yield idx + 3, Comment, val 

103 idx += 3 + len(val) 

104 state = 2 

105 # blocks or output 

106 elif tag in ('<%', '<%=', '<%-'): 

107 yield idx, Comment.Preproc, tag 

108 idx += len(tag) 

109 data = tokens.pop() 

110 r_idx = 0 

111 for r_idx, r_token, r_value in \ 

112 self.ruby_lexer.get_tokens_unprocessed(data): 

113 yield r_idx + idx, r_token, r_value 

114 idx += len(data) 

115 state = 2 

116 elif tag in ('%>', '-%>'): 

117 yield idx, Error, tag 

118 idx += len(tag) 

119 state = 0 

120 # % raw ruby statements 

121 else: 

122 yield idx, Comment.Preproc, tag[0] 

123 r_idx = 0 

124 for r_idx, r_token, r_value in \ 

125 self.ruby_lexer.get_tokens_unprocessed(tag[1:]): 

126 yield idx + 1 + r_idx, r_token, r_value 

127 idx += len(tag) 

128 state = 0 

129 # block ends 

130 elif state == 2: 

131 tag = tokens.pop() 

132 if tag not in ('%>', '-%>'): 

133 yield idx, Other, tag 

134 else: 

135 yield idx, Comment.Preproc, tag 

136 idx += len(tag) 

137 state = 0 

138 except IndexError: 

139 return 

140 

141 def analyse_text(text): 

142 if '<%' in text and '%>' in text: 

143 return 0.4 

144 

145 

146class SmartyLexer(RegexLexer): 

147 """ 

148 Generic Smarty template lexer. 

149 

150 Just highlights smarty code between the preprocessor directives, other 

151 data is left untouched by the lexer. 

152 """ 

153 

154 name = 'Smarty' 

155 url = 'https://www.smarty.net/' 

156 aliases = ['smarty'] 

157 filenames = ['*.tpl'] 

158 mimetypes = ['application/x-smarty'] 

159 

160 flags = re.MULTILINE | re.DOTALL 

161 

162 tokens = { 

163 'root': [ 

164 (r'[^{]+', Other), 

165 (r'(\{)(\*.*?\*)(\})', 

166 bygroups(Comment.Preproc, Comment, Comment.Preproc)), 

167 (r'(\{php\})(.*?)(\{/php\})', 

168 bygroups(Comment.Preproc, using(PhpLexer, startinline=True), 

169 Comment.Preproc)), 

170 (r'(\{)(/?[a-zA-Z_]\w*)(\s*)', 

171 bygroups(Comment.Preproc, Name.Function, Text), 'smarty'), 

172 (r'\{', Comment.Preproc, 'smarty') 

173 ], 

174 'smarty': [ 

175 (r'\s+', Text), 

176 (r'\{', Comment.Preproc, '#push'), 

177 (r'\}', Comment.Preproc, '#pop'), 

178 (r'#[a-zA-Z_]\w*#', Name.Variable), 

179 (r'\$[a-zA-Z_]\w*(\.\w+)*', Name.Variable), 

180 (r'[~!%^&*()+=|\[\]:;,.<>/?@-]', Operator), 

181 (r'(true|false|null)\b', Keyword.Constant), 

182 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" 

183 r"0[xX][0-9a-fA-F]+[Ll]?", Number), 

184 (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), 

185 (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), 

186 (r'[a-zA-Z_]\w*', Name.Attribute) 

187 ] 

188 } 

189 

190 def analyse_text(text): 

191 rv = 0.0 

192 if re.search(r'\{if\s+.*?\}.*?\{/if\}', text): 

193 rv += 0.15 

194 if re.search(r'\{include\s+file=.*?\}', text): 

195 rv += 0.15 

196 if re.search(r'\{foreach\s+.*?\}.*?\{/foreach\}', text): 

197 rv += 0.15 

198 if re.search(r'\{\$.*?\}', text): 

199 rv += 0.01 

200 return rv 

201 

202 

203class VelocityLexer(RegexLexer): 

204 """ 

205 Generic Velocity template lexer. 

206 

207 Just highlights velocity directives and variable references, other 

208 data is left untouched by the lexer. 

209 """ 

210 

211 name = 'Velocity' 

212 url = 'https://velocity.apache.org/' 

213 aliases = ['velocity'] 

214 filenames = ['*.vm', '*.fhtml'] 

215 

216 flags = re.MULTILINE | re.DOTALL 

217 

218 identifier = r'[a-zA-Z_]\w*' 

219 

220 tokens = { 

221 'root': [ 

222 (r'[^{#$]+', Other), 

223 (r'(#)(\*.*?\*)(#)', 

224 bygroups(Comment.Preproc, Comment, Comment.Preproc)), 

225 (r'(##)(.*?$)', 

226 bygroups(Comment.Preproc, Comment)), 

227 (r'(#\{?)(' + identifier + r')(\}?)(\s?\()', 

228 bygroups(Comment.Preproc, Name.Function, Comment.Preproc, Punctuation), 

229 'directiveparams'), 

230 (r'(#\{?)(' + identifier + r')(\}|\b)', 

231 bygroups(Comment.Preproc, Name.Function, Comment.Preproc)), 

232 (r'\$!?\{?', Punctuation, 'variable') 

233 ], 

234 'variable': [ 

235 (identifier, Name.Variable), 

236 (r'\(', Punctuation, 'funcparams'), 

237 (r'(\.)(' + identifier + r')', 

238 bygroups(Punctuation, Name.Variable), '#push'), 

239 (r'\}', Punctuation, '#pop'), 

240 default('#pop') 

241 ], 

242 'directiveparams': [ 

243 (r'(&&|\|\||==?|!=?|[-<>+*%&|^/])|\b(eq|ne|gt|lt|ge|le|not|in)\b', 

244 Operator), 

245 (r'\[', Operator, 'rangeoperator'), 

246 (r'\b' + identifier + r'\b', Name.Function), 

247 include('funcparams') 

248 ], 

249 'rangeoperator': [ 

250 (r'\.\.', Operator), 

251 include('funcparams'), 

252 (r'\]', Operator, '#pop') 

253 ], 

254 'funcparams': [ 

255 (r'\$!?\{?', Punctuation, 'variable'), 

256 (r'\s+', Text), 

257 (r'[,:]', Punctuation), 

258 (r'"(\\\\|\\[^\\]|[^"\\])*"', String.Double), 

259 (r"'(\\\\|\\[^\\]|[^'\\])*'", String.Single), 

260 (r"0[xX][0-9a-fA-F]+[Ll]?", Number), 

261 (r"\b[0-9]+\b", Number), 

262 (r'(true|false|null)\b', Keyword.Constant), 

263 (r'\(', Punctuation, '#push'), 

264 (r'\)', Punctuation, '#pop'), 

265 (r'\{', Punctuation, '#push'), 

266 (r'\}', Punctuation, '#pop'), 

267 (r'\[', Punctuation, '#push'), 

268 (r'\]', Punctuation, '#pop'), 

269 ] 

270 } 

271 

272 def analyse_text(text): 

273 rv = 0.0 

274 if re.search(r'#\{?macro\}?\(.*?\).*?#\{?end\}?', text, re.DOTALL): 

275 rv += 0.25 

276 if re.search(r'#\{?if\}?\(.+?\).*?#\{?end\}?', text, re.DOTALL): 

277 rv += 0.15 

278 if re.search(r'#\{?foreach\}?\(.+?\).*?#\{?end\}?', text, re.DOTALL): 

279 rv += 0.15 

280 if re.search(r'\$!?\{?[a-zA-Z_]\w*(\([^)]*\))?' 

281 r'(\.\w+(\([^)]*\))?)*\}?', text): 

282 rv += 0.01 

283 return rv 

284 

285 

286class VelocityHtmlLexer(DelegatingLexer): 

287 """ 

288 Subclass of the `VelocityLexer` that highlights unlexed data 

289 with the `HtmlLexer`. 

290 

291 """ 

292 

293 name = 'HTML+Velocity' 

294 aliases = ['html+velocity'] 

295 alias_filenames = ['*.html', '*.fhtml'] 

296 mimetypes = ['text/html+velocity'] 

297 

298 def __init__(self, **options): 

299 super().__init__(HtmlLexer, VelocityLexer, **options) 

300 

301 

302class VelocityXmlLexer(DelegatingLexer): 

303 """ 

304 Subclass of the `VelocityLexer` that highlights unlexed data 

305 with the `XmlLexer`. 

306 

307 """ 

308 

309 name = 'XML+Velocity' 

310 aliases = ['xml+velocity'] 

311 alias_filenames = ['*.xml', '*.vm'] 

312 mimetypes = ['application/xml+velocity'] 

313 

314 def __init__(self, **options): 

315 super().__init__(XmlLexer, VelocityLexer, **options) 

316 

317 def analyse_text(text): 

318 rv = VelocityLexer.analyse_text(text) - 0.01 

319 if looks_like_xml(text): 

320 rv += 0.4 

321 return rv 

322 

323 

324class DjangoLexer(RegexLexer): 

325 """ 

326 Generic `django <http://www.djangoproject.com/documentation/templates/>`_ 

327 and `jinja <https://jinja.pocoo.org/jinja/>`_ template lexer. 

328 

329 It just highlights django/jinja code between the preprocessor directives, 

330 other data is left untouched by the lexer. 

331 """ 

332 

333 name = 'Django/Jinja' 

334 aliases = ['django', 'jinja'] 

335 mimetypes = ['application/x-django-templating', 'application/x-jinja'] 

336 

337 flags = re.M | re.S 

338 

339 tokens = { 

340 'root': [ 

341 (r'[^{]+', Other), 

342 (r'\{\{', Comment.Preproc, 'var'), 

343 # jinja/django comments 

344 (r'\{#.*?#\}', Comment), 

345 # django comments 

346 (r'(\{%)(-?\s*)(comment)(\s*-?)(%\})(.*?)' 

347 r'(\{%)(-?\s*)(endcomment)(\s*-?)(%\})', 

348 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, 

349 Comment, Comment.Preproc, Text, Keyword, Text, 

350 Comment.Preproc)), 

351 # raw jinja blocks 

352 (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)' 

353 r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})', 

354 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, 

355 Text, Comment.Preproc, Text, Keyword, Text, 

356 Comment.Preproc)), 

357 # filter blocks 

358 (r'(\{%)(-?\s*)(filter)(\s+)([a-zA-Z_]\w*)', 

359 bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), 

360 'block'), 

361 (r'(\{%)(-?\s*)([a-zA-Z_]\w*)', 

362 bygroups(Comment.Preproc, Text, Keyword), 'block'), 

363 (r'\{', Other) 

364 ], 

365 'varnames': [ 

366 (r'(\|)(\s*)([a-zA-Z_]\w*)', 

367 bygroups(Operator, Text, Name.Function)), 

368 (r'(is)(\s+)(not)?(\s+)?([a-zA-Z_]\w*)', 

369 bygroups(Keyword, Text, Keyword, Text, Name.Function)), 

370 (r'(_|true|false|none|True|False|None)\b', Keyword.Pseudo), 

371 (r'(in|as|reversed|recursive|not|and|or|is|if|else|import|' 

372 r'with(?:(?:out)?\s*context)?|scoped|ignore\s+missing)\b', 

373 Keyword), 

374 (r'(loop|block|super|forloop)\b', Name.Builtin), 

375 (r'[a-zA-Z_][\w-]*', Name.Variable), 

376 (r'\.\w+', Name.Variable), 

377 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double), 

378 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single), 

379 (r'([{}()\[\]+\-*/%,:~]|[><=]=?|!=)', Operator), 

380 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" 

381 r"0[xX][0-9a-fA-F]+[Ll]?", Number), 

382 ], 

383 'var': [ 

384 (r'\s+', Text), 

385 (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'), 

386 include('varnames') 

387 ], 

388 'block': [ 

389 (r'\s+', Text), 

390 (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'), 

391 include('varnames'), 

392 (r'.', Punctuation) 

393 ] 

394 } 

395 

396 def analyse_text(text): 

397 rv = 0.0 

398 if re.search(r'\{%\s*(block|extends)', text) is not None: 

399 rv += 0.4 

400 if re.search(r'\{%\s*if\s*.*?%\}', text) is not None: 

401 rv += 0.1 

402 if re.search(r'\{\{.*?\}\}', text) is not None: 

403 rv += 0.1 

404 return rv 

405 

406 

407class MyghtyLexer(RegexLexer): 

408 """ 

409 Generic myghty templates lexer. Code that isn't Myghty 

410 markup is yielded as `Token.Other`. 

411 

412 .. versionadded:: 0.6 

413 """ 

414 

415 name = 'Myghty' 

416 url = 'http://www.myghty.org/' 

417 aliases = ['myghty'] 

418 filenames = ['*.myt', 'autodelegate'] 

419 mimetypes = ['application/x-myghty'] 

420 

421 tokens = { 

422 'root': [ 

423 (r'\s+', Text), 

424 (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)', 

425 bygroups(Name.Tag, Text, Name.Function, Name.Tag, 

426 using(this), Name.Tag)), 

427 (r'(?s)(<%\w+)(.*?)(>)(.*?)(</%\2\s*>)', 

428 bygroups(Name.Tag, Name.Function, Name.Tag, 

429 using(PythonLexer), Name.Tag)), 

430 (r'(<&[^|])(.*?)(,.*?)?(&>)', 

431 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)), 

432 (r'(?s)(<&\|)(.*?)(,.*?)?(&>)', 

433 bygroups(Name.Tag, Name.Function, using(PythonLexer), Name.Tag)), 

434 (r'</&>', Name.Tag), 

435 (r'(?s)(<%!?)(.*?)(%>)', 

436 bygroups(Name.Tag, using(PythonLexer), Name.Tag)), 

437 (r'(?<=^)#[^\n]*(\n|\Z)', Comment), 

438 (r'(?<=^)(%)([^\n]*)(\n|\Z)', 

439 bygroups(Name.Tag, using(PythonLexer), Other)), 

440 (r"""(?sx) 

441 (.+?) # anything, followed by: 

442 (?: 

443 (?<=\n)(?=[%#]) | # an eval or comment line 

444 (?=</?[%&]) | # a substitution or block or 

445 # call start or end 

446 # - don't consume 

447 (\\\n) | # an escaped newline 

448 \Z # end of string 

449 )""", bygroups(Other, Operator)), 

450 ] 

451 } 

452 

453 

454class MyghtyHtmlLexer(DelegatingLexer): 

455 """ 

456 Subclass of the `MyghtyLexer` that highlights unlexed data 

457 with the `HtmlLexer`. 

458 

459 .. versionadded:: 0.6 

460 """ 

461 

462 name = 'HTML+Myghty' 

463 aliases = ['html+myghty'] 

464 mimetypes = ['text/html+myghty'] 

465 

466 def __init__(self, **options): 

467 super().__init__(HtmlLexer, MyghtyLexer, **options) 

468 

469 

470class MyghtyXmlLexer(DelegatingLexer): 

471 """ 

472 Subclass of the `MyghtyLexer` that highlights unlexed data 

473 with the `XmlLexer`. 

474 

475 .. versionadded:: 0.6 

476 """ 

477 

478 name = 'XML+Myghty' 

479 aliases = ['xml+myghty'] 

480 mimetypes = ['application/xml+myghty'] 

481 

482 def __init__(self, **options): 

483 super().__init__(XmlLexer, MyghtyLexer, **options) 

484 

485 

486class MyghtyJavascriptLexer(DelegatingLexer): 

487 """ 

488 Subclass of the `MyghtyLexer` that highlights unlexed data 

489 with the `JavascriptLexer`. 

490 

491 .. versionadded:: 0.6 

492 """ 

493 

494 name = 'JavaScript+Myghty' 

495 aliases = ['javascript+myghty', 'js+myghty'] 

496 mimetypes = ['application/x-javascript+myghty', 

497 'text/x-javascript+myghty', 

498 'text/javascript+mygthy'] 

499 

500 def __init__(self, **options): 

501 super().__init__(JavascriptLexer, MyghtyLexer, **options) 

502 

503 

504class MyghtyCssLexer(DelegatingLexer): 

505 """ 

506 Subclass of the `MyghtyLexer` that highlights unlexed data 

507 with the `CssLexer`. 

508 

509 .. versionadded:: 0.6 

510 """ 

511 

512 name = 'CSS+Myghty' 

513 aliases = ['css+myghty'] 

514 mimetypes = ['text/css+myghty'] 

515 

516 def __init__(self, **options): 

517 super().__init__(CssLexer, MyghtyLexer, **options) 

518 

519 

520class MasonLexer(RegexLexer): 

521 """ 

522 Generic mason templates lexer. Stolen from Myghty lexer. Code that isn't 

523 Mason markup is HTML. 

524 

525 .. versionadded:: 1.4 

526 """ 

527 name = 'Mason' 

528 url = 'http://www.masonhq.com/' 

529 aliases = ['mason'] 

530 filenames = ['*.m', '*.mhtml', '*.mc', '*.mi', 'autohandler', 'dhandler'] 

531 mimetypes = ['application/x-mason'] 

532 

533 tokens = { 

534 'root': [ 

535 (r'\s+', Whitespace), 

536 (r'(?s)(<%doc>)(.*?)(</%doc>)', 

537 bygroups(Name.Tag, Comment.Multiline, Name.Tag)), 

538 (r'(?s)(<%(?:def|method))(\s*)(.*?)(>)(.*?)(</%\2\s*>)', 

539 bygroups(Name.Tag, Whitespace, Name.Function, Name.Tag, 

540 using(this), Name.Tag)), 

541 (r'(?s)(<%(\w+)(.*?)(>))(.*?)(</%\2\s*>)', 

542 bygroups(Name.Tag, None, None, None, using(PerlLexer), Name.Tag)), 

543 (r'(?s)(<&[^|])(.*?)(,.*?)?(&>)', 

544 bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)), 

545 (r'(?s)(<&\|)(.*?)(,.*?)?(&>)', 

546 bygroups(Name.Tag, Name.Function, using(PerlLexer), Name.Tag)), 

547 (r'</&>', Name.Tag), 

548 (r'(?s)(<%!?)(.*?)(%>)', 

549 bygroups(Name.Tag, using(PerlLexer), Name.Tag)), 

550 (r'(?<=^)#[^\n]*(\n|\Z)', Comment), 

551 (r'(?<=^)(%)([^\n]*)(\n|\Z)', 

552 bygroups(Name.Tag, using(PerlLexer), Other)), 

553 (r"""(?sx) 

554 (.+?) # anything, followed by: 

555 (?: 

556 (?<=\n)(?=[%#]) | # an eval or comment line 

557 (?=</?[%&]) | # a substitution or block or 

558 # call start or end 

559 # - don't consume 

560 (\\\n) | # an escaped newline 

561 \Z # end of string 

562 )""", bygroups(using(HtmlLexer), Operator)), 

563 ] 

564 } 

565 

566 def analyse_text(text): 

567 result = 0.0 

568 if re.search(r'</%(class|doc|init)>', text) is not None: 

569 result = 1.0 

570 elif re.search(r'<&.+&>', text, re.DOTALL) is not None: 

571 result = 0.11 

572 return result 

573 

574 

575class MakoLexer(RegexLexer): 

576 """ 

577 Generic mako templates lexer. Code that isn't Mako 

578 markup is yielded as `Token.Other`. 

579 

580 .. versionadded:: 0.7 

581 """ 

582 

583 name = 'Mako' 

584 url = 'http://www.makotemplates.org/' 

585 aliases = ['mako'] 

586 filenames = ['*.mao'] 

587 mimetypes = ['application/x-mako'] 

588 

589 tokens = { 

590 'root': [ 

591 (r'(\s*)(%)(\s*end(?:\w+))(\n|\Z)', 

592 bygroups(Text.Whitespace, Comment.Preproc, Keyword, Other)), 

593 (r'(\s*)(%)([^\n]*)(\n|\Z)', 

594 bygroups(Text.Whitespace, Comment.Preproc, using(PythonLexer), Other)), 

595 (r'(\s*)(##[^\n]*)(\n|\Z)', 

596 bygroups(Text.Whitespace, Comment.Single, Text.Whitespace)), 

597 (r'(?s)<%doc>.*?</%doc>', Comment.Multiline), 

598 (r'(<%)([\w.:]+)', 

599 bygroups(Comment.Preproc, Name.Builtin), 'tag'), 

600 (r'(</%)([\w.:]+)(>)', 

601 bygroups(Comment.Preproc, Name.Builtin, Comment.Preproc)), 

602 (r'<%(?=([\w.:]+))', Comment.Preproc, 'ondeftags'), 

603 (r'(?s)(<%(?:!?))(.*?)(%>)', 

604 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), 

605 (r'(\$\{)(.*?)(\})', 

606 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), 

607 (r'''(?sx) 

608 (.+?) # anything, followed by: 

609 (?: 

610 (?<=\n)(?=%|\#\#) | # an eval or comment line 

611 (?=\#\*) | # multiline comment 

612 (?=</?%) | # a python block 

613 # call start or end 

614 (?=\$\{) | # a substitution 

615 (?<=\n)(?=\s*%) | 

616 # - don't consume 

617 (\\\n) | # an escaped newline 

618 \Z # end of string 

619 ) 

620 ''', bygroups(Other, Operator)), 

621 (r'\s+', Text), 

622 ], 

623 'ondeftags': [ 

624 (r'<%', Comment.Preproc), 

625 (r'(?<=<%)(include|inherit|namespace|page)', Name.Builtin), 

626 include('tag'), 

627 ], 

628 'tag': [ 

629 (r'((?:\w+)\s*=)(\s*)(".*?")', 

630 bygroups(Name.Attribute, Text, String)), 

631 (r'/?\s*>', Comment.Preproc, '#pop'), 

632 (r'\s+', Text), 

633 ], 

634 'attr': [ 

635 ('".*?"', String, '#pop'), 

636 ("'.*?'", String, '#pop'), 

637 (r'[^\s>]+', String, '#pop'), 

638 ], 

639 } 

640 

641 

642class MakoHtmlLexer(DelegatingLexer): 

643 """ 

644 Subclass of the `MakoLexer` that highlights unlexed data 

645 with the `HtmlLexer`. 

646 

647 .. versionadded:: 0.7 

648 """ 

649 

650 name = 'HTML+Mako' 

651 aliases = ['html+mako'] 

652 mimetypes = ['text/html+mako'] 

653 

654 def __init__(self, **options): 

655 super().__init__(HtmlLexer, MakoLexer, **options) 

656 

657 

658class MakoXmlLexer(DelegatingLexer): 

659 """ 

660 Subclass of the `MakoLexer` that highlights unlexed data 

661 with the `XmlLexer`. 

662 

663 .. versionadded:: 0.7 

664 """ 

665 

666 name = 'XML+Mako' 

667 aliases = ['xml+mako'] 

668 mimetypes = ['application/xml+mako'] 

669 

670 def __init__(self, **options): 

671 super().__init__(XmlLexer, MakoLexer, **options) 

672 

673 

674class MakoJavascriptLexer(DelegatingLexer): 

675 """ 

676 Subclass of the `MakoLexer` that highlights unlexed data 

677 with the `JavascriptLexer`. 

678 

679 .. versionadded:: 0.7 

680 """ 

681 

682 name = 'JavaScript+Mako' 

683 aliases = ['javascript+mako', 'js+mako'] 

684 mimetypes = ['application/x-javascript+mako', 

685 'text/x-javascript+mako', 

686 'text/javascript+mako'] 

687 

688 def __init__(self, **options): 

689 super().__init__(JavascriptLexer, MakoLexer, **options) 

690 

691 

692class MakoCssLexer(DelegatingLexer): 

693 """ 

694 Subclass of the `MakoLexer` that highlights unlexed data 

695 with the `CssLexer`. 

696 

697 .. versionadded:: 0.7 

698 """ 

699 

700 name = 'CSS+Mako' 

701 aliases = ['css+mako'] 

702 mimetypes = ['text/css+mako'] 

703 

704 def __init__(self, **options): 

705 super().__init__(CssLexer, MakoLexer, **options) 

706 

707 

708# Genshi and Cheetah lexers courtesy of Matt Good. 

709 

710class CheetahPythonLexer(Lexer): 

711 """ 

712 Lexer for handling Cheetah's special $ tokens in Python syntax. 

713 """ 

714 

715 def get_tokens_unprocessed(self, text): 

716 pylexer = PythonLexer(**self.options) 

717 for pos, type_, value in pylexer.get_tokens_unprocessed(text): 

718 if type_ == Token.Error and value == '$': 

719 type_ = Comment.Preproc 

720 yield pos, type_, value 

721 

722 

723class CheetahLexer(RegexLexer): 

724 """ 

725 Generic cheetah templates lexer. Code that isn't Cheetah 

726 markup is yielded as `Token.Other`. This also works for 

727 `spitfire templates`_ which use the same syntax. 

728 

729 .. _spitfire templates: http://code.google.com/p/spitfire/ 

730 """ 

731 

732 name = 'Cheetah' 

733 url = 'http://www.cheetahtemplate.org/' 

734 aliases = ['cheetah', 'spitfire'] 

735 filenames = ['*.tmpl', '*.spt'] 

736 mimetypes = ['application/x-cheetah', 'application/x-spitfire'] 

737 

738 tokens = { 

739 'root': [ 

740 (r'(##[^\n]*)$', 

741 (bygroups(Comment))), 

742 (r'#[*](.|\n)*?[*]#', Comment), 

743 (r'#end[^#\n]*(?:#|$)', Comment.Preproc), 

744 (r'#slurp$', Comment.Preproc), 

745 (r'(#[a-zA-Z]+)([^#\n]*)(#|$)', 

746 (bygroups(Comment.Preproc, using(CheetahPythonLexer), 

747 Comment.Preproc))), 

748 # TODO support other Python syntax like $foo['bar'] 

749 (r'(\$)([a-zA-Z_][\w.]*\w)', 

750 bygroups(Comment.Preproc, using(CheetahPythonLexer))), 

751 (r'(?s)(\$\{!?)(.*?)(\})', 

752 bygroups(Comment.Preproc, using(CheetahPythonLexer), 

753 Comment.Preproc)), 

754 (r'''(?sx) 

755 (.+?) # anything, followed by: 

756 (?: 

757 (?=\#[#a-zA-Z]*) | # an eval comment 

758 (?=\$[a-zA-Z_{]) | # a substitution 

759 \Z # end of string 

760 ) 

761 ''', Other), 

762 (r'\s+', Text), 

763 ], 

764 } 

765 

766 

767class CheetahHtmlLexer(DelegatingLexer): 

768 """ 

769 Subclass of the `CheetahLexer` that highlights unlexed data 

770 with the `HtmlLexer`. 

771 """ 

772 

773 name = 'HTML+Cheetah' 

774 aliases = ['html+cheetah', 'html+spitfire', 'htmlcheetah'] 

775 mimetypes = ['text/html+cheetah', 'text/html+spitfire'] 

776 

777 def __init__(self, **options): 

778 super().__init__(HtmlLexer, CheetahLexer, **options) 

779 

780 

781class CheetahXmlLexer(DelegatingLexer): 

782 """ 

783 Subclass of the `CheetahLexer` that highlights unlexed data 

784 with the `XmlLexer`. 

785 """ 

786 

787 name = 'XML+Cheetah' 

788 aliases = ['xml+cheetah', 'xml+spitfire'] 

789 mimetypes = ['application/xml+cheetah', 'application/xml+spitfire'] 

790 

791 def __init__(self, **options): 

792 super().__init__(XmlLexer, CheetahLexer, **options) 

793 

794 

795class CheetahJavascriptLexer(DelegatingLexer): 

796 """ 

797 Subclass of the `CheetahLexer` that highlights unlexed data 

798 with the `JavascriptLexer`. 

799 """ 

800 

801 name = 'JavaScript+Cheetah' 

802 aliases = ['javascript+cheetah', 'js+cheetah', 

803 'javascript+spitfire', 'js+spitfire'] 

804 mimetypes = ['application/x-javascript+cheetah', 

805 'text/x-javascript+cheetah', 

806 'text/javascript+cheetah', 

807 'application/x-javascript+spitfire', 

808 'text/x-javascript+spitfire', 

809 'text/javascript+spitfire'] 

810 

811 def __init__(self, **options): 

812 super().__init__(JavascriptLexer, CheetahLexer, **options) 

813 

814 

815class GenshiTextLexer(RegexLexer): 

816 """ 

817 A lexer that highlights genshi text templates. 

818 """ 

819 

820 name = 'Genshi Text' 

821 url = 'http://genshi.edgewall.org/' 

822 aliases = ['genshitext'] 

823 mimetypes = ['application/x-genshi-text', 'text/x-genshi'] 

824 

825 tokens = { 

826 'root': [ 

827 (r'[^#$\s]+', Other), 

828 (r'^(\s*)(##.*)$', bygroups(Text, Comment)), 

829 (r'^(\s*)(#)', bygroups(Text, Comment.Preproc), 'directive'), 

830 include('variable'), 

831 (r'[#$\s]', Other), 

832 ], 

833 'directive': [ 

834 (r'\n', Text, '#pop'), 

835 (r'(?:def|for|if)\s+.*', using(PythonLexer), '#pop'), 

836 (r'(choose|when|with)([^\S\n]+)(.*)', 

837 bygroups(Keyword, Text, using(PythonLexer)), '#pop'), 

838 (r'(choose|otherwise)\b', Keyword, '#pop'), 

839 (r'(end\w*)([^\S\n]*)(.*)', bygroups(Keyword, Text, Comment), '#pop'), 

840 ], 

841 'variable': [ 

842 (r'(?<!\$)(\$\{)(.+?)(\})', 

843 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), 

844 (r'(?<!\$)(\$)([a-zA-Z_][\w.]*)', 

845 Name.Variable), 

846 ] 

847 } 

848 

849 

850class GenshiMarkupLexer(RegexLexer): 

851 """ 

852 Base lexer for Genshi markup, used by `HtmlGenshiLexer` and 

853 `GenshiLexer`. 

854 """ 

855 

856 flags = re.DOTALL 

857 

858 tokens = { 

859 'root': [ 

860 (r'[^<$]+', Other), 

861 (r'(<\?python)(.*?)(\?>)', 

862 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), 

863 # yield style and script blocks as Other 

864 (r'<\s*(script|style)\s*.*?>.*?<\s*/\1\s*>', Other), 

865 (r'<\s*py:[a-zA-Z0-9]+', Name.Tag, 'pytag'), 

866 (r'<\s*[a-zA-Z0-9:.]+', Name.Tag, 'tag'), 

867 include('variable'), 

868 (r'[<$]', Other), 

869 ], 

870 'pytag': [ 

871 (r'\s+', Text), 

872 (r'[\w:-]+\s*=', Name.Attribute, 'pyattr'), 

873 (r'/?\s*>', Name.Tag, '#pop'), 

874 ], 

875 'pyattr': [ 

876 ('(")(.*?)(")', bygroups(String, using(PythonLexer), String), '#pop'), 

877 ("(')(.*?)(')", bygroups(String, using(PythonLexer), String), '#pop'), 

878 (r'[^\s>]+', String, '#pop'), 

879 ], 

880 'tag': [ 

881 (r'\s+', Text), 

882 (r'py:[\w-]+\s*=', Name.Attribute, 'pyattr'), 

883 (r'[\w:-]+\s*=', Name.Attribute, 'attr'), 

884 (r'/?\s*>', Name.Tag, '#pop'), 

885 ], 

886 'attr': [ 

887 ('"', String, 'attr-dstring'), 

888 ("'", String, 'attr-sstring'), 

889 (r'[^\s>]*', String, '#pop') 

890 ], 

891 'attr-dstring': [ 

892 ('"', String, '#pop'), 

893 include('strings'), 

894 ("'", String) 

895 ], 

896 'attr-sstring': [ 

897 ("'", String, '#pop'), 

898 include('strings'), 

899 ("'", String) 

900 ], 

901 'strings': [ 

902 ('[^"\'$]+', String), 

903 include('variable') 

904 ], 

905 'variable': [ 

906 (r'(?<!\$)(\$\{)(.+?)(\})', 

907 bygroups(Comment.Preproc, using(PythonLexer), Comment.Preproc)), 

908 (r'(?<!\$)(\$)([a-zA-Z_][\w\.]*)', 

909 Name.Variable), 

910 ] 

911 } 

912 

913 

914class HtmlGenshiLexer(DelegatingLexer): 

915 """ 

916 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and 

917 `kid <http://kid-templating.org/>`_ kid HTML templates. 

918 """ 

919 

920 name = 'HTML+Genshi' 

921 aliases = ['html+genshi', 'html+kid'] 

922 alias_filenames = ['*.html', '*.htm', '*.xhtml'] 

923 mimetypes = ['text/html+genshi'] 

924 

925 def __init__(self, **options): 

926 super().__init__(HtmlLexer, GenshiMarkupLexer, **options) 

927 

928 def analyse_text(text): 

929 rv = 0.0 

930 if re.search(r'\$\{.*?\}', text) is not None: 

931 rv += 0.2 

932 if re.search(r'py:(.*?)=["\']', text) is not None: 

933 rv += 0.2 

934 return rv + HtmlLexer.analyse_text(text) - 0.01 

935 

936 

937class GenshiLexer(DelegatingLexer): 

938 """ 

939 A lexer that highlights `genshi <http://genshi.edgewall.org/>`_ and 

940 `kid <http://kid-templating.org/>`_ kid XML templates. 

941 """ 

942 

943 name = 'Genshi' 

944 aliases = ['genshi', 'kid', 'xml+genshi', 'xml+kid'] 

945 filenames = ['*.kid'] 

946 alias_filenames = ['*.xml'] 

947 mimetypes = ['application/x-genshi', 'application/x-kid'] 

948 

949 def __init__(self, **options): 

950 super().__init__(XmlLexer, GenshiMarkupLexer, **options) 

951 

952 def analyse_text(text): 

953 rv = 0.0 

954 if re.search(r'\$\{.*?\}', text) is not None: 

955 rv += 0.2 

956 if re.search(r'py:(.*?)=["\']', text) is not None: 

957 rv += 0.2 

958 return rv + XmlLexer.analyse_text(text) - 0.01 

959 

960 

961class JavascriptGenshiLexer(DelegatingLexer): 

962 """ 

963 A lexer that highlights javascript code in genshi text templates. 

964 """ 

965 

966 name = 'JavaScript+Genshi Text' 

967 aliases = ['js+genshitext', 'js+genshi', 'javascript+genshitext', 

968 'javascript+genshi'] 

969 alias_filenames = ['*.js'] 

970 mimetypes = ['application/x-javascript+genshi', 

971 'text/x-javascript+genshi', 

972 'text/javascript+genshi'] 

973 

974 def __init__(self, **options): 

975 super().__init__(JavascriptLexer, GenshiTextLexer, **options) 

976 

977 def analyse_text(text): 

978 return GenshiLexer.analyse_text(text) - 0.05 

979 

980 

981class CssGenshiLexer(DelegatingLexer): 

982 """ 

983 A lexer that highlights CSS definitions in genshi text templates. 

984 """ 

985 

986 name = 'CSS+Genshi Text' 

987 aliases = ['css+genshitext', 'css+genshi'] 

988 alias_filenames = ['*.css'] 

989 mimetypes = ['text/css+genshi'] 

990 

991 def __init__(self, **options): 

992 super().__init__(CssLexer, GenshiTextLexer, **options) 

993 

994 def analyse_text(text): 

995 return GenshiLexer.analyse_text(text) - 0.05 

996 

997 

998class RhtmlLexer(DelegatingLexer): 

999 """ 

1000 Subclass of the ERB lexer that highlights the unlexed data with the 

1001 html lexer. 

1002 

1003 Nested Javascript and CSS is highlighted too. 

1004 """ 

1005 

1006 name = 'RHTML' 

1007 aliases = ['rhtml', 'html+erb', 'html+ruby'] 

1008 filenames = ['*.rhtml'] 

1009 alias_filenames = ['*.html', '*.htm', '*.xhtml'] 

1010 mimetypes = ['text/html+ruby'] 

1011 

1012 def __init__(self, **options): 

1013 super().__init__(HtmlLexer, ErbLexer, **options) 

1014 

1015 def analyse_text(text): 

1016 rv = ErbLexer.analyse_text(text) - 0.01 

1017 if html_doctype_matches(text): 

1018 # one more than the XmlErbLexer returns 

1019 rv += 0.5 

1020 return rv 

1021 

1022 

1023class XmlErbLexer(DelegatingLexer): 

1024 """ 

1025 Subclass of `ErbLexer` which highlights data outside preprocessor 

1026 directives with the `XmlLexer`. 

1027 """ 

1028 

1029 name = 'XML+Ruby' 

1030 aliases = ['xml+ruby', 'xml+erb'] 

1031 alias_filenames = ['*.xml'] 

1032 mimetypes = ['application/xml+ruby'] 

1033 

1034 def __init__(self, **options): 

1035 super().__init__(XmlLexer, ErbLexer, **options) 

1036 

1037 def analyse_text(text): 

1038 rv = ErbLexer.analyse_text(text) - 0.01 

1039 if looks_like_xml(text): 

1040 rv += 0.4 

1041 return rv 

1042 

1043 

1044class CssErbLexer(DelegatingLexer): 

1045 """ 

1046 Subclass of `ErbLexer` which highlights unlexed data with the `CssLexer`. 

1047 """ 

1048 

1049 name = 'CSS+Ruby' 

1050 aliases = ['css+ruby', 'css+erb'] 

1051 alias_filenames = ['*.css'] 

1052 mimetypes = ['text/css+ruby'] 

1053 

1054 def __init__(self, **options): 

1055 super().__init__(CssLexer, ErbLexer, **options) 

1056 

1057 def analyse_text(text): 

1058 return ErbLexer.analyse_text(text) - 0.05 

1059 

1060 

1061class JavascriptErbLexer(DelegatingLexer): 

1062 """ 

1063 Subclass of `ErbLexer` which highlights unlexed data with the 

1064 `JavascriptLexer`. 

1065 """ 

1066 

1067 name = 'JavaScript+Ruby' 

1068 aliases = ['javascript+ruby', 'js+ruby', 'javascript+erb', 'js+erb'] 

1069 alias_filenames = ['*.js'] 

1070 mimetypes = ['application/x-javascript+ruby', 

1071 'text/x-javascript+ruby', 

1072 'text/javascript+ruby'] 

1073 

1074 def __init__(self, **options): 

1075 super().__init__(JavascriptLexer, ErbLexer, **options) 

1076 

1077 def analyse_text(text): 

1078 return ErbLexer.analyse_text(text) - 0.05 

1079 

1080 

1081class HtmlPhpLexer(DelegatingLexer): 

1082 """ 

1083 Subclass of `PhpLexer` that highlights unhandled data with the `HtmlLexer`. 

1084 

1085 Nested Javascript and CSS is highlighted too. 

1086 """ 

1087 

1088 name = 'HTML+PHP' 

1089 aliases = ['html+php'] 

1090 filenames = ['*.phtml'] 

1091 alias_filenames = ['*.php', '*.html', '*.htm', '*.xhtml', 

1092 '*.php[345]'] 

1093 mimetypes = ['application/x-php', 

1094 'application/x-httpd-php', 'application/x-httpd-php3', 

1095 'application/x-httpd-php4', 'application/x-httpd-php5'] 

1096 

1097 def __init__(self, **options): 

1098 super().__init__(HtmlLexer, PhpLexer, **options) 

1099 

1100 def analyse_text(text): 

1101 rv = PhpLexer.analyse_text(text) - 0.01 

1102 if html_doctype_matches(text): 

1103 rv += 0.5 

1104 return rv 

1105 

1106 

1107class XmlPhpLexer(DelegatingLexer): 

1108 """ 

1109 Subclass of `PhpLexer` that highlights unhandled data with the `XmlLexer`. 

1110 """ 

1111 

1112 name = 'XML+PHP' 

1113 aliases = ['xml+php'] 

1114 alias_filenames = ['*.xml', '*.php', '*.php[345]'] 

1115 mimetypes = ['application/xml+php'] 

1116 

1117 def __init__(self, **options): 

1118 super().__init__(XmlLexer, PhpLexer, **options) 

1119 

1120 def analyse_text(text): 

1121 rv = PhpLexer.analyse_text(text) - 0.01 

1122 if looks_like_xml(text): 

1123 rv += 0.4 

1124 return rv 

1125 

1126 

1127class CssPhpLexer(DelegatingLexer): 

1128 """ 

1129 Subclass of `PhpLexer` which highlights unmatched data with the `CssLexer`. 

1130 """ 

1131 

1132 name = 'CSS+PHP' 

1133 aliases = ['css+php'] 

1134 alias_filenames = ['*.css'] 

1135 mimetypes = ['text/css+php'] 

1136 

1137 def __init__(self, **options): 

1138 super().__init__(CssLexer, PhpLexer, **options) 

1139 

1140 def analyse_text(text): 

1141 return PhpLexer.analyse_text(text) - 0.05 

1142 

1143 

1144class JavascriptPhpLexer(DelegatingLexer): 

1145 """ 

1146 Subclass of `PhpLexer` which highlights unmatched data with the 

1147 `JavascriptLexer`. 

1148 """ 

1149 

1150 name = 'JavaScript+PHP' 

1151 aliases = ['javascript+php', 'js+php'] 

1152 alias_filenames = ['*.js'] 

1153 mimetypes = ['application/x-javascript+php', 

1154 'text/x-javascript+php', 

1155 'text/javascript+php'] 

1156 

1157 def __init__(self, **options): 

1158 super().__init__(JavascriptLexer, PhpLexer, **options) 

1159 

1160 def analyse_text(text): 

1161 return PhpLexer.analyse_text(text) 

1162 

1163 

1164class HtmlSmartyLexer(DelegatingLexer): 

1165 """ 

1166 Subclass of the `SmartyLexer` that highlights unlexed data with the 

1167 `HtmlLexer`. 

1168 

1169 Nested Javascript and CSS is highlighted too. 

1170 """ 

1171 

1172 name = 'HTML+Smarty' 

1173 aliases = ['html+smarty'] 

1174 alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.tpl'] 

1175 mimetypes = ['text/html+smarty'] 

1176 

1177 def __init__(self, **options): 

1178 super().__init__(HtmlLexer, SmartyLexer, **options) 

1179 

1180 def analyse_text(text): 

1181 rv = SmartyLexer.analyse_text(text) - 0.01 

1182 if html_doctype_matches(text): 

1183 rv += 0.5 

1184 return rv 

1185 

1186 

1187class XmlSmartyLexer(DelegatingLexer): 

1188 """ 

1189 Subclass of the `SmartyLexer` that highlights unlexed data with the 

1190 `XmlLexer`. 

1191 """ 

1192 

1193 name = 'XML+Smarty' 

1194 aliases = ['xml+smarty'] 

1195 alias_filenames = ['*.xml', '*.tpl'] 

1196 mimetypes = ['application/xml+smarty'] 

1197 

1198 def __init__(self, **options): 

1199 super().__init__(XmlLexer, SmartyLexer, **options) 

1200 

1201 def analyse_text(text): 

1202 rv = SmartyLexer.analyse_text(text) - 0.01 

1203 if looks_like_xml(text): 

1204 rv += 0.4 

1205 return rv 

1206 

1207 

1208class CssSmartyLexer(DelegatingLexer): 

1209 """ 

1210 Subclass of the `SmartyLexer` that highlights unlexed data with the 

1211 `CssLexer`. 

1212 """ 

1213 

1214 name = 'CSS+Smarty' 

1215 aliases = ['css+smarty'] 

1216 alias_filenames = ['*.css', '*.tpl'] 

1217 mimetypes = ['text/css+smarty'] 

1218 

1219 def __init__(self, **options): 

1220 super().__init__(CssLexer, SmartyLexer, **options) 

1221 

1222 def analyse_text(text): 

1223 return SmartyLexer.analyse_text(text) - 0.05 

1224 

1225 

1226class JavascriptSmartyLexer(DelegatingLexer): 

1227 """ 

1228 Subclass of the `SmartyLexer` that highlights unlexed data with the 

1229 `JavascriptLexer`. 

1230 """ 

1231 

1232 name = 'JavaScript+Smarty' 

1233 aliases = ['javascript+smarty', 'js+smarty'] 

1234 alias_filenames = ['*.js', '*.tpl'] 

1235 mimetypes = ['application/x-javascript+smarty', 

1236 'text/x-javascript+smarty', 

1237 'text/javascript+smarty'] 

1238 

1239 def __init__(self, **options): 

1240 super().__init__(JavascriptLexer, SmartyLexer, **options) 

1241 

1242 def analyse_text(text): 

1243 return SmartyLexer.analyse_text(text) - 0.05 

1244 

1245 

1246class HtmlDjangoLexer(DelegatingLexer): 

1247 """ 

1248 Subclass of the `DjangoLexer` that highlights unlexed data with the 

1249 `HtmlLexer`. 

1250 

1251 Nested Javascript and CSS is highlighted too. 

1252 """ 

1253 

1254 name = 'HTML+Django/Jinja' 

1255 aliases = ['html+django', 'html+jinja', 'htmldjango'] 

1256 filenames = ['*.html.j2', '*.htm.j2', '*.xhtml.j2', '*.html.jinja2', '*.htm.jinja2', '*.xhtml.jinja2'] 

1257 alias_filenames = ['*.html', '*.htm', '*.xhtml'] 

1258 mimetypes = ['text/html+django', 'text/html+jinja'] 

1259 

1260 def __init__(self, **options): 

1261 super().__init__(HtmlLexer, DjangoLexer, **options) 

1262 

1263 def analyse_text(text): 

1264 rv = DjangoLexer.analyse_text(text) - 0.01 

1265 if html_doctype_matches(text): 

1266 rv += 0.5 

1267 return rv 

1268 

1269 

1270class XmlDjangoLexer(DelegatingLexer): 

1271 """ 

1272 Subclass of the `DjangoLexer` that highlights unlexed data with the 

1273 `XmlLexer`. 

1274 """ 

1275 

1276 name = 'XML+Django/Jinja' 

1277 aliases = ['xml+django', 'xml+jinja'] 

1278 filenames = ['*.xml.j2', '*.xml.jinja2'] 

1279 alias_filenames = ['*.xml'] 

1280 mimetypes = ['application/xml+django', 'application/xml+jinja'] 

1281 

1282 def __init__(self, **options): 

1283 super().__init__(XmlLexer, DjangoLexer, **options) 

1284 

1285 def analyse_text(text): 

1286 rv = DjangoLexer.analyse_text(text) - 0.01 

1287 if looks_like_xml(text): 

1288 rv += 0.4 

1289 return rv 

1290 

1291 

1292class CssDjangoLexer(DelegatingLexer): 

1293 """ 

1294 Subclass of the `DjangoLexer` that highlights unlexed data with the 

1295 `CssLexer`. 

1296 """ 

1297 

1298 name = 'CSS+Django/Jinja' 

1299 aliases = ['css+django', 'css+jinja'] 

1300 filenames = ['*.css.j2', '*.css.jinja2'] 

1301 alias_filenames = ['*.css'] 

1302 mimetypes = ['text/css+django', 'text/css+jinja'] 

1303 

1304 def __init__(self, **options): 

1305 super().__init__(CssLexer, DjangoLexer, **options) 

1306 

1307 def analyse_text(text): 

1308 return DjangoLexer.analyse_text(text) - 0.05 

1309 

1310 

1311class JavascriptDjangoLexer(DelegatingLexer): 

1312 """ 

1313 Subclass of the `DjangoLexer` that highlights unlexed data with the 

1314 `JavascriptLexer`. 

1315 """ 

1316 

1317 name = 'JavaScript+Django/Jinja' 

1318 aliases = ['javascript+django', 'js+django', 

1319 'javascript+jinja', 'js+jinja'] 

1320 filenames = ['*.js.j2', '*.js.jinja2'] 

1321 alias_filenames = ['*.js'] 

1322 mimetypes = ['application/x-javascript+django', 

1323 'application/x-javascript+jinja', 

1324 'text/x-javascript+django', 

1325 'text/x-javascript+jinja', 

1326 'text/javascript+django', 

1327 'text/javascript+jinja'] 

1328 

1329 def __init__(self, **options): 

1330 super().__init__(JavascriptLexer, DjangoLexer, **options) 

1331 

1332 def analyse_text(text): 

1333 return DjangoLexer.analyse_text(text) - 0.05 

1334 

1335 

1336class JspRootLexer(RegexLexer): 

1337 """ 

1338 Base for the `JspLexer`. Yields `Token.Other` for area outside of 

1339 JSP tags. 

1340 

1341 .. versionadded:: 0.7 

1342 """ 

1343 

1344 tokens = { 

1345 'root': [ 

1346 (r'<%\S?', Keyword, 'sec'), 

1347 # FIXME: I want to make these keywords but still parse attributes. 

1348 (r'</?jsp:(forward|getProperty|include|plugin|setProperty|useBean).*?>', 

1349 Keyword), 

1350 (r'[^<]+', Other), 

1351 (r'<', Other), 

1352 ], 

1353 'sec': [ 

1354 (r'%>', Keyword, '#pop'), 

1355 # note: '\w\W' != '.' without DOTALL. 

1356 (r'[\w\W]+?(?=%>|\Z)', using(JavaLexer)), 

1357 ], 

1358 } 

1359 

1360 

1361class JspLexer(DelegatingLexer): 

1362 """ 

1363 Lexer for Java Server Pages. 

1364 

1365 .. versionadded:: 0.7 

1366 """ 

1367 name = 'Java Server Page' 

1368 aliases = ['jsp'] 

1369 filenames = ['*.jsp'] 

1370 mimetypes = ['application/x-jsp'] 

1371 

1372 def __init__(self, **options): 

1373 super().__init__(XmlLexer, JspRootLexer, **options) 

1374 

1375 def analyse_text(text): 

1376 rv = JavaLexer.analyse_text(text) - 0.01 

1377 if looks_like_xml(text): 

1378 rv += 0.4 

1379 if '<%' in text and '%>' in text: 

1380 rv += 0.1 

1381 return rv 

1382 

1383 

1384class EvoqueLexer(RegexLexer): 

1385 """ 

1386 For files using the Evoque templating system. 

1387 

1388 .. versionadded:: 1.1 

1389 """ 

1390 name = 'Evoque' 

1391 aliases = ['evoque'] 

1392 filenames = ['*.evoque'] 

1393 mimetypes = ['application/x-evoque'] 

1394 

1395 flags = re.DOTALL 

1396 

1397 tokens = { 

1398 'root': [ 

1399 (r'[^#$]+', Other), 

1400 (r'#\[', Comment.Multiline, 'comment'), 

1401 (r'\$\$', Other), 

1402 # svn keywords 

1403 (r'\$\w+:[^$\n]*\$', Comment.Multiline), 

1404 # directives: begin, end 

1405 (r'(\$)(begin|end)(\{(%)?)(.*?)((?(4)%)\})', 

1406 bygroups(Punctuation, Name.Builtin, Punctuation, None, 

1407 String, Punctuation)), 

1408 # directives: evoque, overlay 

1409 # see doc for handling first name arg: /directives/evoque/ 

1410 # + minor inconsistency: the "name" in e.g. $overlay{name=site_base} 

1411 # should be using(PythonLexer), not passed out as String 

1412 (r'(\$)(evoque|overlay)(\{(%)?)(\s*[#\w\-"\'.]+)?' 

1413 r'(.*?)((?(4)%)\})', 

1414 bygroups(Punctuation, Name.Builtin, Punctuation, None, 

1415 String, using(PythonLexer), Punctuation)), 

1416 # directives: if, for, prefer, test 

1417 (r'(\$)(\w+)(\{(%)?)(.*?)((?(4)%)\})', 

1418 bygroups(Punctuation, Name.Builtin, Punctuation, None, 

1419 using(PythonLexer), Punctuation)), 

1420 # directive clauses (no {} expression) 

1421 (r'(\$)(else|rof|fi)', bygroups(Punctuation, Name.Builtin)), 

1422 # expressions 

1423 (r'(\$\{(%)?)(.*?)((!)(.*?))?((?(2)%)\})', 

1424 bygroups(Punctuation, None, using(PythonLexer), 

1425 Name.Builtin, None, None, Punctuation)), 

1426 (r'#', Other), 

1427 ], 

1428 'comment': [ 

1429 (r'[^\]#]', Comment.Multiline), 

1430 (r'#\[', Comment.Multiline, '#push'), 

1431 (r'\]#', Comment.Multiline, '#pop'), 

1432 (r'[\]#]', Comment.Multiline) 

1433 ], 

1434 } 

1435 

1436 def analyse_text(text): 

1437 """Evoque templates use $evoque, which is unique.""" 

1438 if '$evoque' in text: 

1439 return 1 

1440 

1441class EvoqueHtmlLexer(DelegatingLexer): 

1442 """ 

1443 Subclass of the `EvoqueLexer` that highlights unlexed data with the 

1444 `HtmlLexer`. 

1445 

1446 .. versionadded:: 1.1 

1447 """ 

1448 name = 'HTML+Evoque' 

1449 aliases = ['html+evoque'] 

1450 filenames = ['*.html'] 

1451 mimetypes = ['text/html+evoque'] 

1452 

1453 def __init__(self, **options): 

1454 super().__init__(HtmlLexer, EvoqueLexer, **options) 

1455 

1456 def analyse_text(text): 

1457 return EvoqueLexer.analyse_text(text) 

1458 

1459 

1460class EvoqueXmlLexer(DelegatingLexer): 

1461 """ 

1462 Subclass of the `EvoqueLexer` that highlights unlexed data with the 

1463 `XmlLexer`. 

1464 

1465 .. versionadded:: 1.1 

1466 """ 

1467 name = 'XML+Evoque' 

1468 aliases = ['xml+evoque'] 

1469 filenames = ['*.xml'] 

1470 mimetypes = ['application/xml+evoque'] 

1471 

1472 def __init__(self, **options): 

1473 super().__init__(XmlLexer, EvoqueLexer, **options) 

1474 

1475 def analyse_text(text): 

1476 return EvoqueLexer.analyse_text(text) 

1477 

1478 

1479class ColdfusionLexer(RegexLexer): 

1480 """ 

1481 Coldfusion statements 

1482 """ 

1483 name = 'cfstatement' 

1484 aliases = ['cfs'] 

1485 filenames = [] 

1486 mimetypes = [] 

1487 flags = re.IGNORECASE 

1488 

1489 tokens = { 

1490 'root': [ 

1491 (r'//.*?\n', Comment.Single), 

1492 (r'/\*(?:.|\n)*?\*/', Comment.Multiline), 

1493 (r'\+\+|--', Operator), 

1494 (r'[-+*/^&=!]', Operator), 

1495 (r'<=|>=|<|>|==', Operator), 

1496 (r'mod\b', Operator), 

1497 (r'(eq|lt|gt|lte|gte|not|is|and|or)\b', Operator), 

1498 (r'\|\||&&', Operator), 

1499 (r'\?', Operator), 

1500 (r'"', String.Double, 'string'), 

1501 # There is a special rule for allowing html in single quoted 

1502 # strings, evidently. 

1503 (r"'.*?'", String.Single), 

1504 (r'\d+', Number), 

1505 (r'(if|else|len|var|xml|default|break|switch|component|property|function|do|' 

1506 r'try|catch|in|continue|for|return|while|required|any|array|binary|boolean|' 

1507 r'component|date|guid|numeric|query|string|struct|uuid|case)\b', Keyword), 

1508 (r'(true|false|null)\b', Keyword.Constant), 

1509 (r'(application|session|client|cookie|super|this|variables|arguments)\b', 

1510 Name.Constant), 

1511 (r'([a-z_$][\w.]*)(\s*)(\()', 

1512 bygroups(Name.Function, Text, Punctuation)), 

1513 (r'[a-z_$][\w.]*', Name.Variable), 

1514 (r'[()\[\]{};:,.\\]', Punctuation), 

1515 (r'\s+', Text), 

1516 ], 

1517 'string': [ 

1518 (r'""', String.Double), 

1519 (r'#.+?#', String.Interp), 

1520 (r'[^"#]+', String.Double), 

1521 (r'#', String.Double), 

1522 (r'"', String.Double, '#pop'), 

1523 ], 

1524 } 

1525 

1526 

1527class ColdfusionMarkupLexer(RegexLexer): 

1528 """ 

1529 Coldfusion markup only 

1530 """ 

1531 name = 'Coldfusion' 

1532 aliases = ['cf'] 

1533 filenames = [] 

1534 mimetypes = [] 

1535 

1536 tokens = { 

1537 'root': [ 

1538 (r'[^<]+', Other), 

1539 include('tags'), 

1540 (r'<[^<>]*', Other), 

1541 ], 

1542 'tags': [ 

1543 (r'<!---', Comment.Multiline, 'cfcomment'), 

1544 (r'(?s)<!--.*?-->', Comment), 

1545 (r'<cfoutput.*?>', Name.Builtin, 'cfoutput'), 

1546 (r'(?s)(<cfscript.*?>)(.+?)(</cfscript.*?>)', 

1547 bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)), 

1548 # negative lookbehind is for strings with embedded > 

1549 (r'(?s)(</?cf(?:component|include|if|else|elseif|loop|return|' 

1550 r'dbinfo|dump|abort|location|invoke|throw|file|savecontent|' 

1551 r'mailpart|mail|header|content|zip|image|lock|argument|try|' 

1552 r'catch|break|directory|http|set|function|param)\b)(.*?)((?<!\\)>)', 

1553 bygroups(Name.Builtin, using(ColdfusionLexer), Name.Builtin)), 

1554 ], 

1555 'cfoutput': [ 

1556 (r'[^#<]+', Other), 

1557 (r'(#)(.*?)(#)', bygroups(Punctuation, using(ColdfusionLexer), 

1558 Punctuation)), 

1559 # (r'<cfoutput.*?>', Name.Builtin, '#push'), 

1560 (r'</cfoutput.*?>', Name.Builtin, '#pop'), 

1561 include('tags'), 

1562 (r'(?s)<[^<>]*', Other), 

1563 (r'#', Other), 

1564 ], 

1565 'cfcomment': [ 

1566 (r'<!---', Comment.Multiline, '#push'), 

1567 (r'--->', Comment.Multiline, '#pop'), 

1568 (r'([^<-]|<(?!!---)|-(?!-->))+', Comment.Multiline), 

1569 ], 

1570 } 

1571 

1572 

1573class ColdfusionHtmlLexer(DelegatingLexer): 

1574 """ 

1575 Coldfusion markup in html 

1576 """ 

1577 name = 'Coldfusion HTML' 

1578 aliases = ['cfm'] 

1579 filenames = ['*.cfm', '*.cfml'] 

1580 mimetypes = ['application/x-coldfusion'] 

1581 

1582 def __init__(self, **options): 

1583 super().__init__(HtmlLexer, ColdfusionMarkupLexer, **options) 

1584 

1585 

1586class ColdfusionCFCLexer(DelegatingLexer): 

1587 """ 

1588 Coldfusion markup/script components 

1589 

1590 .. versionadded:: 2.0 

1591 """ 

1592 name = 'Coldfusion CFC' 

1593 aliases = ['cfc'] 

1594 filenames = ['*.cfc'] 

1595 mimetypes = [] 

1596 

1597 def __init__(self, **options): 

1598 super().__init__(ColdfusionHtmlLexer, ColdfusionLexer, **options) 

1599 

1600 

1601class SspLexer(DelegatingLexer): 

1602 """ 

1603 Lexer for Scalate Server Pages. 

1604 

1605 .. versionadded:: 1.4 

1606 """ 

1607 name = 'Scalate Server Page' 

1608 aliases = ['ssp'] 

1609 filenames = ['*.ssp'] 

1610 mimetypes = ['application/x-ssp'] 

1611 

1612 def __init__(self, **options): 

1613 super().__init__(XmlLexer, JspRootLexer, **options) 

1614 

1615 def analyse_text(text): 

1616 rv = 0.0 

1617 if re.search(r'val \w+\s*:', text): 

1618 rv += 0.6 

1619 if looks_like_xml(text): 

1620 rv += 0.2 

1621 if '<%' in text and '%>' in text: 

1622 rv += 0.1 

1623 return rv 

1624 

1625 

1626class TeaTemplateRootLexer(RegexLexer): 

1627 """ 

1628 Base for the `TeaTemplateLexer`. Yields `Token.Other` for area outside of 

1629 code blocks. 

1630 

1631 .. versionadded:: 1.5 

1632 """ 

1633 

1634 tokens = { 

1635 'root': [ 

1636 (r'<%\S?', Keyword, 'sec'), 

1637 (r'[^<]+', Other), 

1638 (r'<', Other), 

1639 ], 

1640 'sec': [ 

1641 (r'%>', Keyword, '#pop'), 

1642 # note: '\w\W' != '.' without DOTALL. 

1643 (r'[\w\W]+?(?=%>|\Z)', using(TeaLangLexer)), 

1644 ], 

1645 } 

1646 

1647 

1648class TeaTemplateLexer(DelegatingLexer): 

1649 """ 

1650 Lexer for `Tea Templates <http://teatrove.org/>`_. 

1651 

1652 .. versionadded:: 1.5 

1653 """ 

1654 name = 'Tea' 

1655 aliases = ['tea'] 

1656 filenames = ['*.tea'] 

1657 mimetypes = ['text/x-tea'] 

1658 

1659 def __init__(self, **options): 

1660 super().__init__(XmlLexer, TeaTemplateRootLexer, **options) 

1661 

1662 def analyse_text(text): 

1663 rv = TeaLangLexer.analyse_text(text) - 0.01 

1664 if looks_like_xml(text): 

1665 rv += 0.4 

1666 if '<%' in text and '%>' in text: 

1667 rv += 0.1 

1668 return rv 

1669 

1670 

1671class LassoHtmlLexer(DelegatingLexer): 

1672 """ 

1673 Subclass of the `LassoLexer` which highlights unhandled data with the 

1674 `HtmlLexer`. 

1675 

1676 Nested JavaScript and CSS is also highlighted. 

1677 

1678 .. versionadded:: 1.6 

1679 """ 

1680 

1681 name = 'HTML+Lasso' 

1682 aliases = ['html+lasso'] 

1683 alias_filenames = ['*.html', '*.htm', '*.xhtml', '*.lasso', '*.lasso[89]', 

1684 '*.incl', '*.inc', '*.las'] 

1685 mimetypes = ['text/html+lasso', 

1686 'application/x-httpd-lasso', 

1687 'application/x-httpd-lasso[89]'] 

1688 

1689 def __init__(self, **options): 

1690 super().__init__(HtmlLexer, LassoLexer, **options) 

1691 

1692 def analyse_text(text): 

1693 rv = LassoLexer.analyse_text(text) - 0.01 

1694 if html_doctype_matches(text): # same as HTML lexer 

1695 rv += 0.5 

1696 return rv 

1697 

1698 

1699class LassoXmlLexer(DelegatingLexer): 

1700 """ 

1701 Subclass of the `LassoLexer` which highlights unhandled data with the 

1702 `XmlLexer`. 

1703 

1704 .. versionadded:: 1.6 

1705 """ 

1706 

1707 name = 'XML+Lasso' 

1708 aliases = ['xml+lasso'] 

1709 alias_filenames = ['*.xml', '*.lasso', '*.lasso[89]', 

1710 '*.incl', '*.inc', '*.las'] 

1711 mimetypes = ['application/xml+lasso'] 

1712 

1713 def __init__(self, **options): 

1714 super().__init__(XmlLexer, LassoLexer, **options) 

1715 

1716 def analyse_text(text): 

1717 rv = LassoLexer.analyse_text(text) - 0.01 

1718 if looks_like_xml(text): 

1719 rv += 0.4 

1720 return rv 

1721 

1722 

1723class LassoCssLexer(DelegatingLexer): 

1724 """ 

1725 Subclass of the `LassoLexer` which highlights unhandled data with the 

1726 `CssLexer`. 

1727 

1728 .. versionadded:: 1.6 

1729 """ 

1730 

1731 name = 'CSS+Lasso' 

1732 aliases = ['css+lasso'] 

1733 alias_filenames = ['*.css'] 

1734 mimetypes = ['text/css+lasso'] 

1735 

1736 def __init__(self, **options): 

1737 options['requiredelimiters'] = True 

1738 super().__init__(CssLexer, LassoLexer, **options) 

1739 

1740 def analyse_text(text): 

1741 rv = LassoLexer.analyse_text(text) - 0.05 

1742 if re.search(r'\w+:[^;]+;', text): 

1743 rv += 0.1 

1744 if 'padding:' in text: 

1745 rv += 0.1 

1746 return rv 

1747 

1748 

1749class LassoJavascriptLexer(DelegatingLexer): 

1750 """ 

1751 Subclass of the `LassoLexer` which highlights unhandled data with the 

1752 `JavascriptLexer`. 

1753 

1754 .. versionadded:: 1.6 

1755 """ 

1756 

1757 name = 'JavaScript+Lasso' 

1758 aliases = ['javascript+lasso', 'js+lasso'] 

1759 alias_filenames = ['*.js'] 

1760 mimetypes = ['application/x-javascript+lasso', 

1761 'text/x-javascript+lasso', 

1762 'text/javascript+lasso'] 

1763 

1764 def __init__(self, **options): 

1765 options['requiredelimiters'] = True 

1766 super().__init__(JavascriptLexer, LassoLexer, **options) 

1767 

1768 def analyse_text(text): 

1769 rv = LassoLexer.analyse_text(text) - 0.05 

1770 return rv 

1771 

1772 

1773class HandlebarsLexer(RegexLexer): 

1774 """ 

1775 Generic handlebars template lexer. 

1776 

1777 Highlights only the Handlebars template tags (stuff between `{{` and `}}`). 

1778 Everything else is left for a delegating lexer. 

1779 

1780 .. versionadded:: 2.0 

1781 """ 

1782 

1783 name = "Handlebars" 

1784 url = 'https://handlebarsjs.com/' 

1785 aliases = ['handlebars'] 

1786 

1787 tokens = { 

1788 'root': [ 

1789 (r'[^{]+', Other), 

1790 

1791 # Comment start {{! }} or {{!-- 

1792 (r'\{\{!.*\}\}', Comment), 

1793 

1794 # HTML Escaping open {{{expression 

1795 (r'(\{\{\{)(\s*)', bygroups(Comment.Special, Text), 'tag'), 

1796 

1797 # {{blockOpen {{#blockOpen {{/blockClose with optional tilde ~ 

1798 (r'(\{\{)([#~/]+)([^\s}]*)', 

1799 bygroups(Comment.Preproc, Number.Attribute, Number.Attribute), 'tag'), 

1800 (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'tag'), 

1801 ], 

1802 

1803 'tag': [ 

1804 (r'\s+', Text), 

1805 # HTML Escaping close }}} 

1806 (r'\}\}\}', Comment.Special, '#pop'), 

1807 # blockClose}}, includes optional tilde ~ 

1808 (r'(~?)(\}\})', bygroups(Number, Comment.Preproc), '#pop'), 

1809 

1810 # {{opt=something}} 

1811 (r'([^\s}]+)(=)', bygroups(Name.Attribute, Operator)), 

1812 

1813 # Partials {{> ...}} 

1814 (r'(>)(\s*)(@partial-block)', bygroups(Keyword, Text, Keyword)), 

1815 (r'(#?>)(\s*)([\w-]+)', bygroups(Keyword, Text, Name.Variable)), 

1816 (r'(>)(\s*)(\()', bygroups(Keyword, Text, Punctuation), 

1817 'dynamic-partial'), 

1818 

1819 include('generic'), 

1820 ], 

1821 'dynamic-partial': [ 

1822 (r'\s+', Text), 

1823 (r'\)', Punctuation, '#pop'), 

1824 

1825 (r'(lookup)(\s+)(\.|this)(\s+)', bygroups(Keyword, Text, 

1826 Name.Variable, Text)), 

1827 (r'(lookup)(\s+)(\S+)', bygroups(Keyword, Text, 

1828 using(this, state='variable'))), 

1829 (r'[\w-]+', Name.Function), 

1830 

1831 include('generic'), 

1832 ], 

1833 'variable': [ 

1834 (r'[()/@a-zA-Z][\w-]*', Name.Variable), 

1835 (r'\.[\w-]+', Name.Variable), 

1836 (r'(this\/|\.\/|(\.\.\/)+)[\w-]+', Name.Variable), 

1837 ], 

1838 'generic': [ 

1839 include('variable'), 

1840 

1841 # borrowed from DjangoLexer 

1842 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double), 

1843 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single), 

1844 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" 

1845 r"0[xX][0-9a-fA-F]+[Ll]?", Number), 

1846 ] 

1847 } 

1848 

1849 

1850class HandlebarsHtmlLexer(DelegatingLexer): 

1851 """ 

1852 Subclass of the `HandlebarsLexer` that highlights unlexed data with the 

1853 `HtmlLexer`. 

1854 

1855 .. versionadded:: 2.0 

1856 """ 

1857 

1858 name = "HTML+Handlebars" 

1859 aliases = ["html+handlebars"] 

1860 filenames = ['*.handlebars', '*.hbs'] 

1861 mimetypes = ['text/html+handlebars', 'text/x-handlebars-template'] 

1862 

1863 def __init__(self, **options): 

1864 super().__init__(HtmlLexer, HandlebarsLexer, **options) 

1865 

1866 

1867class YamlJinjaLexer(DelegatingLexer): 

1868 """ 

1869 Subclass of the `DjangoLexer` that highlights unlexed data with the 

1870 `YamlLexer`. 

1871 

1872 Commonly used in Saltstack salt states. 

1873 

1874 .. versionadded:: 2.0 

1875 """ 

1876 

1877 name = 'YAML+Jinja' 

1878 aliases = ['yaml+jinja', 'salt', 'sls'] 

1879 filenames = ['*.sls', '*.yaml.j2', '*.yml.j2', '*.yaml.jinja2', '*.yml.jinja2'] 

1880 mimetypes = ['text/x-yaml+jinja', 'text/x-sls'] 

1881 

1882 def __init__(self, **options): 

1883 super().__init__(YamlLexer, DjangoLexer, **options) 

1884 

1885 

1886class LiquidLexer(RegexLexer): 

1887 """ 

1888 Lexer for Liquid templates. 

1889 

1890 .. versionadded:: 2.0 

1891 """ 

1892 name = 'liquid' 

1893 url = 'https://www.rubydoc.info/github/Shopify/liquid' 

1894 aliases = ['liquid'] 

1895 filenames = ['*.liquid'] 

1896 

1897 tokens = { 

1898 'root': [ 

1899 (r'[^{]+', Text), 

1900 # tags and block tags 

1901 (r'(\{%)(\s*)', bygroups(Punctuation, Whitespace), 'tag-or-block'), 

1902 # output tags 

1903 (r'(\{\{)(\s*)([^\s}]+)', 

1904 bygroups(Punctuation, Whitespace, using(this, state = 'generic')), 

1905 'output'), 

1906 (r'\{', Text) 

1907 ], 

1908 

1909 'tag-or-block': [ 

1910 # builtin logic blocks 

1911 (r'(if|unless|elsif|case)(?=\s+)', Keyword.Reserved, 'condition'), 

1912 (r'(when)(\s+)', bygroups(Keyword.Reserved, Whitespace), 

1913 combined('end-of-block', 'whitespace', 'generic')), 

1914 (r'(else)(\s*)(%\})', 

1915 bygroups(Keyword.Reserved, Whitespace, Punctuation), '#pop'), 

1916 

1917 # other builtin blocks 

1918 (r'(capture)(\s+)([^\s%]+)(\s*)(%\})', 

1919 bygroups(Name.Tag, Whitespace, using(this, state = 'variable'), 

1920 Whitespace, Punctuation), '#pop'), 

1921 (r'(comment)(\s*)(%\})', 

1922 bygroups(Name.Tag, Whitespace, Punctuation), 'comment'), 

1923 (r'(raw)(\s*)(%\})', 

1924 bygroups(Name.Tag, Whitespace, Punctuation), 'raw'), 

1925 

1926 # end of block 

1927 (r'(end(case|unless|if))(\s*)(%\})', 

1928 bygroups(Keyword.Reserved, None, Whitespace, Punctuation), '#pop'), 

1929 (r'(end([^\s%]+))(\s*)(%\})', 

1930 bygroups(Name.Tag, None, Whitespace, Punctuation), '#pop'), 

1931 

1932 # builtin tags (assign and include are handled together with usual tags) 

1933 (r'(cycle)(\s+)(?:([^\s:]*)(:))?(\s*)', 

1934 bygroups(Name.Tag, Whitespace, 

1935 using(this, state='generic'), Punctuation, Whitespace), 

1936 'variable-tag-markup'), 

1937 

1938 # other tags or blocks 

1939 (r'([^\s%]+)(\s*)', bygroups(Name.Tag, Whitespace), 'tag-markup') 

1940 ], 

1941 

1942 'output': [ 

1943 include('whitespace'), 

1944 (r'\}\}', Punctuation, '#pop'), # end of output 

1945 

1946 (r'\|', Punctuation, 'filters') 

1947 ], 

1948 

1949 'filters': [ 

1950 include('whitespace'), 

1951 (r'\}\}', Punctuation, ('#pop', '#pop')), # end of filters and output 

1952 

1953 (r'([^\s|:]+)(:?)(\s*)', 

1954 bygroups(Name.Function, Punctuation, Whitespace), 'filter-markup') 

1955 ], 

1956 

1957 'filter-markup': [ 

1958 (r'\|', Punctuation, '#pop'), 

1959 include('end-of-tag'), 

1960 include('default-param-markup') 

1961 ], 

1962 

1963 'condition': [ 

1964 include('end-of-block'), 

1965 include('whitespace'), 

1966 

1967 (r'([^\s=!><]+)(\s*)([=!><]=?)(\s*)(\S+)(\s*)(%\})', 

1968 bygroups(using(this, state = 'generic'), Whitespace, Operator, 

1969 Whitespace, using(this, state = 'generic'), Whitespace, 

1970 Punctuation)), 

1971 (r'\b!', Operator), 

1972 (r'\bnot\b', Operator.Word), 

1973 (r'([\w.\'"]+)(\s+)(contains)(\s+)([\w.\'"]+)', 

1974 bygroups(using(this, state = 'generic'), Whitespace, Operator.Word, 

1975 Whitespace, using(this, state = 'generic'))), 

1976 

1977 include('generic'), 

1978 include('whitespace') 

1979 ], 

1980 

1981 'generic-value': [ 

1982 include('generic'), 

1983 include('end-at-whitespace') 

1984 ], 

1985 

1986 'operator': [ 

1987 (r'(\s*)((=|!|>|<)=?)(\s*)', 

1988 bygroups(Whitespace, Operator, None, Whitespace), '#pop'), 

1989 (r'(\s*)(\bcontains\b)(\s*)', 

1990 bygroups(Whitespace, Operator.Word, Whitespace), '#pop'), 

1991 ], 

1992 

1993 'end-of-tag': [ 

1994 (r'\}\}', Punctuation, '#pop') 

1995 ], 

1996 

1997 'end-of-block': [ 

1998 (r'%\}', Punctuation, ('#pop', '#pop')) 

1999 ], 

2000 

2001 'end-at-whitespace': [ 

2002 (r'\s+', Whitespace, '#pop') 

2003 ], 

2004 

2005 # states for unknown markup 

2006 'param-markup': [ 

2007 include('whitespace'), 

2008 # params with colons or equals 

2009 (r'([^\s=:]+)(\s*)(=|:)', 

2010 bygroups(Name.Attribute, Whitespace, Operator)), 

2011 # explicit variables 

2012 (r'(\{\{)(\s*)([^\s}])(\s*)(\}\})', 

2013 bygroups(Punctuation, Whitespace, using(this, state = 'variable'), 

2014 Whitespace, Punctuation)), 

2015 

2016 include('string'), 

2017 include('number'), 

2018 include('keyword'), 

2019 (r',', Punctuation) 

2020 ], 

2021 

2022 'default-param-markup': [ 

2023 include('param-markup'), 

2024 (r'.', Text) # fallback for switches / variables / un-quoted strings / ... 

2025 ], 

2026 

2027 'variable-param-markup': [ 

2028 include('param-markup'), 

2029 include('variable'), 

2030 (r'.', Text) # fallback 

2031 ], 

2032 

2033 'tag-markup': [ 

2034 (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag 

2035 include('default-param-markup') 

2036 ], 

2037 

2038 'variable-tag-markup': [ 

2039 (r'%\}', Punctuation, ('#pop', '#pop')), # end of tag 

2040 include('variable-param-markup') 

2041 ], 

2042 

2043 # states for different values types 

2044 'keyword': [ 

2045 (r'\b(false|true)\b', Keyword.Constant) 

2046 ], 

2047 

2048 'variable': [ 

2049 (r'[a-zA-Z_]\w*', Name.Variable), 

2050 (r'(?<=\w)\.(?=\w)', Punctuation) 

2051 ], 

2052 

2053 'string': [ 

2054 (r"'[^']*'", String.Single), 

2055 (r'"[^"]*"', String.Double) 

2056 ], 

2057 

2058 'number': [ 

2059 (r'\d+\.\d+', Number.Float), 

2060 (r'\d+', Number.Integer) 

2061 ], 

2062 

2063 'generic': [ # decides for variable, string, keyword or number 

2064 include('keyword'), 

2065 include('string'), 

2066 include('number'), 

2067 include('variable') 

2068 ], 

2069 

2070 'whitespace': [ 

2071 (r'[ \t]+', Whitespace) 

2072 ], 

2073 

2074 # states for builtin blocks 

2075 'comment': [ 

2076 (r'(\{%)(\s*)(endcomment)(\s*)(%\})', 

2077 bygroups(Punctuation, Whitespace, Name.Tag, Whitespace, 

2078 Punctuation), ('#pop', '#pop')), 

2079 (r'.', Comment) 

2080 ], 

2081 

2082 'raw': [ 

2083 (r'[^{]+', Text), 

2084 (r'(\{%)(\s*)(endraw)(\s*)(%\})', 

2085 bygroups(Punctuation, Whitespace, Name.Tag, Whitespace, 

2086 Punctuation), '#pop'), 

2087 (r'\{', Text) 

2088 ], 

2089 } 

2090 

2091 

2092class TwigLexer(RegexLexer): 

2093 """ 

2094 Twig template lexer. 

2095 

2096 It just highlights Twig code between the preprocessor directives, 

2097 other data is left untouched by the lexer. 

2098 

2099 .. versionadded:: 2.0 

2100 """ 

2101 

2102 name = 'Twig' 

2103 aliases = ['twig'] 

2104 mimetypes = ['application/x-twig'] 

2105 

2106 flags = re.M | re.S 

2107 

2108 # Note that a backslash is included in the following two patterns 

2109 # PHP uses a backslash as a namespace separator 

2110 _ident_char = r'[\\\w-]|[^\x00-\x7f]' 

2111 _ident_begin = r'(?:[\\_a-z]|[^\x00-\x7f])' 

2112 _ident_end = r'(?:' + _ident_char + ')*' 

2113 _ident_inner = _ident_begin + _ident_end 

2114 

2115 tokens = { 

2116 'root': [ 

2117 (r'[^{]+', Other), 

2118 (r'\{\{', Comment.Preproc, 'var'), 

2119 # twig comments 

2120 (r'\{\#.*?\#\}', Comment), 

2121 # raw twig blocks 

2122 (r'(\{%)(-?\s*)(raw)(\s*-?)(%\})(.*?)' 

2123 r'(\{%)(-?\s*)(endraw)(\s*-?)(%\})', 

2124 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, 

2125 Other, Comment.Preproc, Text, Keyword, Text, 

2126 Comment.Preproc)), 

2127 (r'(\{%)(-?\s*)(verbatim)(\s*-?)(%\})(.*?)' 

2128 r'(\{%)(-?\s*)(endverbatim)(\s*-?)(%\})', 

2129 bygroups(Comment.Preproc, Text, Keyword, Text, Comment.Preproc, 

2130 Other, Comment.Preproc, Text, Keyword, Text, 

2131 Comment.Preproc)), 

2132 # filter blocks 

2133 (r'(\{%%)(-?\s*)(filter)(\s+)(%s)' % _ident_inner, 

2134 bygroups(Comment.Preproc, Text, Keyword, Text, Name.Function), 

2135 'tag'), 

2136 (r'(\{%)(-?\s*)([a-zA-Z_]\w*)', 

2137 bygroups(Comment.Preproc, Text, Keyword), 'tag'), 

2138 (r'\{', Other), 

2139 ], 

2140 'varnames': [ 

2141 (r'(\|)(\s*)(%s)' % _ident_inner, 

2142 bygroups(Operator, Text, Name.Function)), 

2143 (r'(is)(\s+)(not)?(\s*)(%s)' % _ident_inner, 

2144 bygroups(Keyword, Text, Keyword, Text, Name.Function)), 

2145 (r'(?i)(true|false|none|null)\b', Keyword.Pseudo), 

2146 (r'(in|not|and|b-and|or|b-or|b-xor|is' 

2147 r'if|elseif|else|import' 

2148 r'constant|defined|divisibleby|empty|even|iterable|odd|sameas' 

2149 r'matches|starts\s+with|ends\s+with)\b', 

2150 Keyword), 

2151 (r'(loop|block|parent)\b', Name.Builtin), 

2152 (_ident_inner, Name.Variable), 

2153 (r'\.' + _ident_inner, Name.Variable), 

2154 (r'\.[0-9]+', Number), 

2155 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double), 

2156 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single), 

2157 (r'([{}()\[\]+\-*/,:~%]|\.\.|\?|:|\*\*|\/\/|!=|[><=]=?)', Operator), 

2158 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" 

2159 r"0[xX][0-9a-fA-F]+[Ll]?", Number), 

2160 ], 

2161 'var': [ 

2162 (r'\s+', Text), 

2163 (r'(-?)(\}\})', bygroups(Text, Comment.Preproc), '#pop'), 

2164 include('varnames') 

2165 ], 

2166 'tag': [ 

2167 (r'\s+', Text), 

2168 (r'(-?)(%\})', bygroups(Text, Comment.Preproc), '#pop'), 

2169 include('varnames'), 

2170 (r'.', Punctuation), 

2171 ], 

2172 } 

2173 

2174 

2175class TwigHtmlLexer(DelegatingLexer): 

2176 """ 

2177 Subclass of the `TwigLexer` that highlights unlexed data with the 

2178 `HtmlLexer`. 

2179 

2180 .. versionadded:: 2.0 

2181 """ 

2182 

2183 name = "HTML+Twig" 

2184 aliases = ["html+twig"] 

2185 filenames = ['*.twig'] 

2186 mimetypes = ['text/html+twig'] 

2187 

2188 def __init__(self, **options): 

2189 super().__init__(HtmlLexer, TwigLexer, **options) 

2190 

2191 

2192class Angular2Lexer(RegexLexer): 

2193 """ 

2194 Generic angular2 template lexer. 

2195 

2196 Highlights only the Angular template tags (stuff between `{{` and `}}` and 

2197 special attributes: '(event)=', '[property]=', '[(twoWayBinding)]='). 

2198 Everything else is left for a delegating lexer. 

2199 

2200 .. versionadded:: 2.1 

2201 """ 

2202 

2203 name = "Angular2" 

2204 url = 'https://angular.io/guide/template-syntax' 

2205 aliases = ['ng2'] 

2206 

2207 tokens = { 

2208 'root': [ 

2209 (r'[^{([*#]+', Other), 

2210 

2211 # {{meal.name}} 

2212 (r'(\{\{)(\s*)', bygroups(Comment.Preproc, Text), 'ngExpression'), 

2213 

2214 # (click)="deleteOrder()"; [value]="test"; [(twoWayTest)]="foo.bar" 

2215 (r'([([]+)([\w:.-]+)([\])]+)(\s*)(=)(\s*)', 

2216 bygroups(Punctuation, Name.Attribute, Punctuation, Text, Operator, Text), 

2217 'attr'), 

2218 (r'([([]+)([\w:.-]+)([\])]+)(\s*)', 

2219 bygroups(Punctuation, Name.Attribute, Punctuation, Text)), 

2220 

2221 # *ngIf="..."; #f="ngForm" 

2222 (r'([*#])([\w:.-]+)(\s*)(=)(\s*)', 

2223 bygroups(Punctuation, Name.Attribute, Text, Operator, Text), 'attr'), 

2224 (r'([*#])([\w:.-]+)(\s*)', 

2225 bygroups(Punctuation, Name.Attribute, Text)), 

2226 ], 

2227 

2228 'ngExpression': [ 

2229 (r'\s+(\|\s+)?', Text), 

2230 (r'\}\}', Comment.Preproc, '#pop'), 

2231 

2232 # Literals 

2233 (r':?(true|false)', String.Boolean), 

2234 (r':?"(\\\\|\\[^\\]|[^"\\])*"', String.Double), 

2235 (r":?'(\\\\|\\[^\\]|[^'\\])*'", String.Single), 

2236 (r"[0-9](\.[0-9]*)?(eE[+-][0-9])?[flFLdD]?|" 

2237 r"0[xX][0-9a-fA-F]+[Ll]?", Number), 

2238 

2239 # Variabletext 

2240 (r'[a-zA-Z][\w-]*(\(.*\))?', Name.Variable), 

2241 (r'\.[\w-]+(\(.*\))?', Name.Variable), 

2242 

2243 # inline If 

2244 (r'(\?)(\s*)([^}\s]+)(\s*)(:)(\s*)([^}\s]+)(\s*)', 

2245 bygroups(Operator, Text, String, Text, Operator, Text, String, Text)), 

2246 ], 

2247 'attr': [ 

2248 ('".*?"', String, '#pop'), 

2249 ("'.*?'", String, '#pop'), 

2250 (r'[^\s>]+', String, '#pop'), 

2251 ], 

2252 } 

2253 

2254 

2255class Angular2HtmlLexer(DelegatingLexer): 

2256 """ 

2257 Subclass of the `Angular2Lexer` that highlights unlexed data with the 

2258 `HtmlLexer`. 

2259 

2260 .. versionadded:: 2.0 

2261 """ 

2262 

2263 name = "HTML + Angular2" 

2264 aliases = ["html+ng2"] 

2265 filenames = ['*.ng2'] 

2266 

2267 def __init__(self, **options): 

2268 super().__init__(HtmlLexer, Angular2Lexer, **options) 

2269 

2270 

2271class SqlJinjaLexer(DelegatingLexer): 

2272 """ 

2273 Templated SQL lexer. 

2274 

2275 .. versionadded:: 2.13 

2276 """ 

2277 

2278 name = 'SQL+Jinja' 

2279 aliases = ['sql+jinja'] 

2280 filenames = ['*.sql', '*.sql.j2', '*.sql.jinja2'] 

2281 

2282 def __init__(self, **options): 

2283 super().__init__(SqlLexer, DjangoLexer, **options) 

2284 

2285 def analyse_text(text): 

2286 rv = 0.0 

2287 # dbt's ref function 

2288 if re.search(r'\{\{\s*ref\(.*\)\s*\}\}', text): 

2289 rv += 0.4 

2290 # dbt's source function 

2291 if re.search(r'\{\{\s*source\(.*\)\s*\}\}', text): 

2292 rv += 0.25 

2293 # Jinja macro 

2294 if re.search(r'\{%-?\s*macro \w+\(.*\)\s*-?%\}', text): 

2295 rv += 0.15 

2296 return rv