Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pygments/lexers/configs.py: 88%

219 statements  

« prev     ^ index     » next       coverage.py v7.2.2, created at 2023-03-26 07:45 +0000

1""" 

2 pygments.lexers.configs 

3 ~~~~~~~~~~~~~~~~~~~~~~~ 

4 

5 Lexers for configuration file formats. 

6 

7 :copyright: Copyright 2006-2022 by the Pygments team, see AUTHORS. 

8 :license: BSD, see LICENSE for details. 

9""" 

10 

11import re 

12 

13from pygments.lexer import ExtendedRegexLexer, RegexLexer, default, words, \ 

14 bygroups, include, using, line_re 

15from pygments.token import Text, Comment, Operator, Keyword, Name, String, \ 

16 Number, Punctuation, Whitespace, Literal, Error, Generic 

17from pygments.lexers.shell import BashLexer 

18from pygments.lexers.data import JsonLexer 

19 

20__all__ = ['IniLexer', 'RegeditLexer', 'PropertiesLexer', 'KconfigLexer', 

21 'Cfengine3Lexer', 'ApacheConfLexer', 'SquidConfLexer', 

22 'NginxConfLexer', 'LighttpdConfLexer', 'DockerLexer', 

23 'TerraformLexer', 'TermcapLexer', 'TerminfoLexer', 

24 'PkgConfigLexer', 'PacmanConfLexer', 'AugeasLexer', 'TOMLLexer', 

25 'NestedTextLexer', 'SingularityLexer', 'UnixConfigLexer'] 

26 

27 

28class IniLexer(RegexLexer): 

29 """ 

30 Lexer for configuration files in INI style. 

31 """ 

32 

33 name = 'INI' 

34 aliases = ['ini', 'cfg', 'dosini'] 

35 filenames = [ 

36 '*.ini', '*.cfg', '*.inf', '.editorconfig', 

37 # systemd unit files 

38 # https://www.freedesktop.org/software/systemd/man/systemd.unit.html 

39 '*.service', '*.socket', '*.device', '*.mount', '*.automount', 

40 '*.swap', '*.target', '*.path', '*.timer', '*.slice', '*.scope', 

41 ] 

42 mimetypes = ['text/x-ini', 'text/inf'] 

43 

44 tokens = { 

45 'root': [ 

46 (r'\s+', Whitespace), 

47 (r'[;#].*', Comment.Single), 

48 (r'(\[.*?\])([ \t]*)$', bygroups(Keyword, Whitespace)), 

49 (r'(.*?)([  \t]*)([=:])([ \t]*)([^;#\n]*)(\\)(\s+)', 

50 bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String, 

51 Text, Whitespace), 

52 "value"), 

53 (r'(.*?)([ \t]*)([=:])([  \t]*)([^ ;#\n]*(?: +[^ ;#\n]+)*)', 

54 bygroups(Name.Attribute, Whitespace, Operator, Whitespace, String)), 

55 # standalone option, supported by some INI parsers 

56 (r'(.+?)$', Name.Attribute), 

57 ], 

58 'value': [ # line continuation 

59 (r'\s+', Whitespace), 

60 (r'(\s*)(.*)(\\)([ \t]*)', 

61 bygroups(Whitespace, String, Text, Whitespace)), 

62 (r'.*$', String, "#pop"), 

63 ], 

64 } 

65 

66 def analyse_text(text): 

67 npos = text.find('\n') 

68 if npos < 3: 

69 return False 

70 return text[0] == '[' and text[npos-1] == ']' 

71 

72 

73class RegeditLexer(RegexLexer): 

74 """ 

75 Lexer for Windows Registry files produced by regedit. 

76 

77 .. versionadded:: 1.6 

78 """ 

79 

80 name = 'reg' 

81 url = 'http://en.wikipedia.org/wiki/Windows_Registry#.REG_files' 

82 aliases = ['registry'] 

83 filenames = ['*.reg'] 

84 mimetypes = ['text/x-windows-registry'] 

85 

86 tokens = { 

87 'root': [ 

88 (r'Windows Registry Editor.*', Text), 

89 (r'\s+', Whitespace), 

90 (r'[;#].*', Comment.Single), 

91 (r'(\[)(-?)(HKEY_[A-Z_]+)(.*?\])$', 

92 bygroups(Keyword, Operator, Name.Builtin, Keyword)), 

93 # String keys, which obey somewhat normal escaping 

94 (r'("(?:\\"|\\\\|[^"])+")([ \t]*)(=)([ \t]*)', 

95 bygroups(Name.Attribute, Whitespace, Operator, Whitespace), 

96 'value'), 

97 # Bare keys (includes @) 

98 (r'(.*?)([ \t]*)(=)([ \t]*)', 

99 bygroups(Name.Attribute, Whitespace, Operator, Whitespace), 

100 'value'), 

101 ], 

102 'value': [ 

103 (r'-', Operator, '#pop'), # delete value 

104 (r'(dword|hex(?:\([0-9a-fA-F]\))?)(:)([0-9a-fA-F,]+)', 

105 bygroups(Name.Variable, Punctuation, Number), '#pop'), 

106 # As far as I know, .reg files do not support line continuation. 

107 (r'.+', String, '#pop'), 

108 default('#pop'), 

109 ] 

110 } 

111 

112 def analyse_text(text): 

113 return text.startswith('Windows Registry Editor') 

114 

115 

116class PropertiesLexer(RegexLexer): 

117 """ 

118 Lexer for configuration files in Java's properties format. 

119 

120 Note: trailing whitespace counts as part of the value as per spec 

121 

122 .. versionadded:: 1.4 

123 """ 

124 

125 name = 'Properties' 

126 aliases = ['properties', 'jproperties'] 

127 filenames = ['*.properties'] 

128 mimetypes = ['text/x-java-properties'] 

129 

130 tokens = { 

131 'root': [ 

132 (r'\s+', Whitespace), 

133 (r'[!#].*|/{2}.*', Comment.Single), 

134 # search for first separator 

135 (r'([^\\\n]|\\.)*?(?=[ \f\t=:])', Name.Attribute, "separator"), 

136 # empty key 

137 (r'.+?$', Name.Attribute), 

138 ], 

139 'separator': [ 

140 # search for line continuation escape 

141 (r'([ \f\t]*)([=:]*)([ \f\t]*)(.*(?<!\\)(?:\\{2})*)(\\)(?!\\)$', 

142 bygroups(Whitespace, Operator, Whitespace, String, Text), "value", "#pop"), 

143 (r'([ \f\t]*)([=:]*)([ \f\t]*)(.*)', 

144 bygroups(Whitespace, Operator, Whitespace, String), "#pop"), 

145 ], 

146 'value': [ # line continuation 

147 (r'\s+', Whitespace), 

148 # search for line continuation escape 

149 (r'(\s*)(.*(?<!\\)(?:\\{2})*)(\\)(?!\\)([ \t]*)', 

150 bygroups(Whitespace, String, Text, Whitespace)), 

151 (r'.*$', String, "#pop"), 

152 ], 

153 } 

154 

155 

156def _rx_indent(level): 

157 # Kconfig *always* interprets a tab as 8 spaces, so this is the default. 

158 # Edit this if you are in an environment where KconfigLexer gets expanded 

159 # input (tabs expanded to spaces) and the expansion tab width is != 8, 

160 # e.g. in connection with Trac (trac.ini, [mimeviewer], tab_width). 

161 # Value range here is 2 <= {tab_width} <= 8. 

162 tab_width = 8 

163 # Regex matching a given indentation {level}, assuming that indentation is 

164 # a multiple of {tab_width}. In other cases there might be problems. 

165 if tab_width == 2: 

166 space_repeat = '+' 

167 else: 

168 space_repeat = '{1,%d}' % (tab_width - 1) 

169 if level == 1: 

170 level_repeat = '' 

171 else: 

172 level_repeat = '{%s}' % level 

173 return r'(?:\t| %s\t| {%s})%s.*\n' % (space_repeat, tab_width, level_repeat) 

174 

175 

176class KconfigLexer(RegexLexer): 

177 """ 

178 For Linux-style Kconfig files. 

179 

180 .. versionadded:: 1.6 

181 """ 

182 

183 name = 'Kconfig' 

184 aliases = ['kconfig', 'menuconfig', 'linux-config', 'kernel-config'] 

185 # Adjust this if new kconfig file names appear in your environment 

186 filenames = ['Kconfig*', '*Config.in*', 'external.in*', 

187 'standard-modules.in'] 

188 mimetypes = ['text/x-kconfig'] 

189 # No re.MULTILINE, indentation-aware help text needs line-by-line handling 

190 flags = 0 

191 

192 def call_indent(level): 

193 # If indentation >= {level} is detected, enter state 'indent{level}' 

194 return (_rx_indent(level), String.Doc, 'indent%s' % level) 

195 

196 def do_indent(level): 

197 # Print paragraphs of indentation level >= {level} as String.Doc, 

198 # ignoring blank lines. Then return to 'root' state. 

199 return [ 

200 (_rx_indent(level), String.Doc), 

201 (r'\s*\n', Text), 

202 default('#pop:2') 

203 ] 

204 

205 tokens = { 

206 'root': [ 

207 (r'\s+', Whitespace), 

208 (r'#.*?\n', Comment.Single), 

209 (words(( 

210 'mainmenu', 'config', 'menuconfig', 'choice', 'endchoice', 

211 'comment', 'menu', 'endmenu', 'visible if', 'if', 'endif', 

212 'source', 'prompt', 'select', 'depends on', 'default', 

213 'range', 'option'), suffix=r'\b'), 

214 Keyword), 

215 (r'(---help---|help)[\t ]*\n', Keyword, 'help'), 

216 (r'(bool|tristate|string|hex|int|defconfig_list|modules|env)\b', 

217 Name.Builtin), 

218 (r'[!=&|]', Operator), 

219 (r'[()]', Punctuation), 

220 (r'[0-9]+', Number.Integer), 

221 (r"'(''|[^'])*'", String.Single), 

222 (r'"(""|[^"])*"', String.Double), 

223 (r'\S+', Text), 

224 ], 

225 # Help text is indented, multi-line and ends when a lower indentation 

226 # level is detected. 

227 'help': [ 

228 # Skip blank lines after help token, if any 

229 (r'\s*\n', Text), 

230 # Determine the first help line's indentation level heuristically(!). 

231 # Attention: this is not perfect, but works for 99% of "normal" 

232 # indentation schemes up to a max. indentation level of 7. 

233 call_indent(7), 

234 call_indent(6), 

235 call_indent(5), 

236 call_indent(4), 

237 call_indent(3), 

238 call_indent(2), 

239 call_indent(1), 

240 default('#pop'), # for incomplete help sections without text 

241 ], 

242 # Handle text for indentation levels 7 to 1 

243 'indent7': do_indent(7), 

244 'indent6': do_indent(6), 

245 'indent5': do_indent(5), 

246 'indent4': do_indent(4), 

247 'indent3': do_indent(3), 

248 'indent2': do_indent(2), 

249 'indent1': do_indent(1), 

250 } 

251 

252 

253class Cfengine3Lexer(RegexLexer): 

254 """ 

255 Lexer for CFEngine3 policy files. 

256 

257 .. versionadded:: 1.5 

258 """ 

259 

260 name = 'CFEngine3' 

261 url = 'http://cfengine.org' 

262 aliases = ['cfengine3', 'cf3'] 

263 filenames = ['*.cf'] 

264 mimetypes = [] 

265 

266 tokens = { 

267 'root': [ 

268 (r'#.*?\n', Comment), 

269 (r'(body)(\s+)(\S+)(\s+)(control)', 

270 bygroups(Keyword, Whitespace, Keyword, Whitespace, Keyword)), 

271 (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)(\()', 

272 bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function, Punctuation), 

273 'arglist'), 

274 (r'(body|bundle)(\s+)(\S+)(\s+)(\w+)', 

275 bygroups(Keyword, Whitespace, Keyword, Whitespace, Name.Function)), 

276 (r'(")([^"]+)(")(\s+)(string|slist|int|real)(\s*)(=>)(\s*)', 

277 bygroups(Punctuation, Name.Variable, Punctuation, 

278 Whitespace, Keyword.Type, Whitespace, Operator, Whitespace)), 

279 (r'(\S+)(\s*)(=>)(\s*)', 

280 bygroups(Keyword.Reserved, Whitespace, Operator, Text)), 

281 (r'"', String, 'string'), 

282 (r'(\w+)(\()', bygroups(Name.Function, Punctuation)), 

283 (r'([\w.!&|()]+)(::)', bygroups(Name.Class, Punctuation)), 

284 (r'(\w+)(:)', bygroups(Keyword.Declaration, Punctuation)), 

285 (r'@[{(][^)}]+[})]', Name.Variable), 

286 (r'[(){},;]', Punctuation), 

287 (r'=>', Operator), 

288 (r'->', Operator), 

289 (r'\d+\.\d+', Number.Float), 

290 (r'\d+', Number.Integer), 

291 (r'\w+', Name.Function), 

292 (r'\s+', Whitespace), 

293 ], 

294 'string': [ 

295 (r'\$[{(]', String.Interpol, 'interpol'), 

296 (r'\\.', String.Escape), 

297 (r'"', String, '#pop'), 

298 (r'\n', String), 

299 (r'.', String), 

300 ], 

301 'interpol': [ 

302 (r'\$[{(]', String.Interpol, '#push'), 

303 (r'[})]', String.Interpol, '#pop'), 

304 (r'[^${()}]+', String.Interpol), 

305 ], 

306 'arglist': [ 

307 (r'\)', Punctuation, '#pop'), 

308 (r',', Punctuation), 

309 (r'\w+', Name.Variable), 

310 (r'\s+', Whitespace), 

311 ], 

312 } 

313 

314 

315class ApacheConfLexer(RegexLexer): 

316 """ 

317 Lexer for configuration files following the Apache config file 

318 format. 

319 

320 .. versionadded:: 0.6 

321 """ 

322 

323 name = 'ApacheConf' 

324 aliases = ['apacheconf', 'aconf', 'apache'] 

325 filenames = ['.htaccess', 'apache.conf', 'apache2.conf'] 

326 mimetypes = ['text/x-apacheconf'] 

327 flags = re.MULTILINE | re.IGNORECASE 

328 

329 tokens = { 

330 'root': [ 

331 (r'\s+', Whitespace), 

332 (r'#(.*\\\n)+.*$|(#.*?)$', Comment), 

333 (r'(<[^\s>/][^\s>]*)(?:(\s+)(.*))?(>)', 

334 bygroups(Name.Tag, Whitespace, String, Name.Tag)), 

335 (r'(</[^\s>]+)(>)', 

336 bygroups(Name.Tag, Name.Tag)), 

337 (r'[a-z]\w*', Name.Builtin, 'value'), 

338 (r'\.+', Text), 

339 ], 

340 'value': [ 

341 (r'\\\n', Text), 

342 (r'\n+', Whitespace, '#pop'), 

343 (r'\\', Text), 

344 (r'[^\S\n]+', Whitespace), 

345 (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), 

346 (r'\d+', Number), 

347 (r'/([*a-z0-9][*\w./-]+)', String.Other), 

348 (r'(on|off|none|any|all|double|email|dns|min|minimal|' 

349 r'os|productonly|full|emerg|alert|crit|error|warn|' 

350 r'notice|info|debug|registry|script|inetd|standalone|' 

351 r'user|group)\b', Keyword), 

352 (r'"([^"\\]*(?:\\(.|\n)[^"\\]*)*)"', String.Double), 

353 (r'[^\s"\\]+', Text) 

354 ], 

355 } 

356 

357 

358class SquidConfLexer(RegexLexer): 

359 """ 

360 Lexer for squid configuration files. 

361 

362 .. versionadded:: 0.9 

363 """ 

364 

365 name = 'SquidConf' 

366 url = 'http://www.squid-cache.org/' 

367 aliases = ['squidconf', 'squid.conf', 'squid'] 

368 filenames = ['squid.conf'] 

369 mimetypes = ['text/x-squidconf'] 

370 flags = re.IGNORECASE 

371 

372 keywords = ( 

373 "access_log", "acl", "always_direct", "announce_host", 

374 "announce_period", "announce_port", "announce_to", "anonymize_headers", 

375 "append_domain", "as_whois_server", "auth_param_basic", 

376 "authenticate_children", "authenticate_program", "authenticate_ttl", 

377 "broken_posts", "buffered_logs", "cache_access_log", "cache_announce", 

378 "cache_dir", "cache_dns_program", "cache_effective_group", 

379 "cache_effective_user", "cache_host", "cache_host_acl", 

380 "cache_host_domain", "cache_log", "cache_mem", "cache_mem_high", 

381 "cache_mem_low", "cache_mgr", "cachemgr_passwd", "cache_peer", 

382 "cache_peer_access", "cache_replacement_policy", "cache_stoplist", 

383 "cache_stoplist_pattern", "cache_store_log", "cache_swap", 

384 "cache_swap_high", "cache_swap_log", "cache_swap_low", "client_db", 

385 "client_lifetime", "client_netmask", "connect_timeout", "coredump_dir", 

386 "dead_peer_timeout", "debug_options", "delay_access", "delay_class", 

387 "delay_initial_bucket_level", "delay_parameters", "delay_pools", 

388 "deny_info", "dns_children", "dns_defnames", "dns_nameservers", 

389 "dns_testnames", "emulate_httpd_log", "err_html_text", 

390 "fake_user_agent", "firewall_ip", "forwarded_for", "forward_snmpd_port", 

391 "fqdncache_size", "ftpget_options", "ftpget_program", "ftp_list_width", 

392 "ftp_passive", "ftp_user", "half_closed_clients", "header_access", 

393 "header_replace", "hierarchy_stoplist", "high_response_time_warning", 

394 "high_page_fault_warning", "hosts_file", "htcp_port", "http_access", 

395 "http_anonymizer", "httpd_accel", "httpd_accel_host", 

396 "httpd_accel_port", "httpd_accel_uses_host_header", 

397 "httpd_accel_with_proxy", "http_port", "http_reply_access", 

398 "icp_access", "icp_hit_stale", "icp_port", "icp_query_timeout", 

399 "ident_lookup", "ident_lookup_access", "ident_timeout", 

400 "incoming_http_average", "incoming_icp_average", "inside_firewall", 

401 "ipcache_high", "ipcache_low", "ipcache_size", "local_domain", 

402 "local_ip", "logfile_rotate", "log_fqdn", "log_icp_queries", 

403 "log_mime_hdrs", "maximum_object_size", "maximum_single_addr_tries", 

404 "mcast_groups", "mcast_icp_query_timeout", "mcast_miss_addr", 

405 "mcast_miss_encode_key", "mcast_miss_port", "memory_pools", 

406 "memory_pools_limit", "memory_replacement_policy", "mime_table", 

407 "min_http_poll_cnt", "min_icp_poll_cnt", "minimum_direct_hops", 

408 "minimum_object_size", "minimum_retry_timeout", "miss_access", 

409 "negative_dns_ttl", "negative_ttl", "neighbor_timeout", 

410 "neighbor_type_domain", "netdb_high", "netdb_low", "netdb_ping_period", 

411 "netdb_ping_rate", "never_direct", "no_cache", "passthrough_proxy", 

412 "pconn_timeout", "pid_filename", "pinger_program", "positive_dns_ttl", 

413 "prefer_direct", "proxy_auth", "proxy_auth_realm", "query_icmp", 

414 "quick_abort", "quick_abort_max", "quick_abort_min", 

415 "quick_abort_pct", "range_offset_limit", "read_timeout", 

416 "redirect_children", "redirect_program", 

417 "redirect_rewrites_host_header", "reference_age", 

418 "refresh_pattern", "reload_into_ims", "request_body_max_size", 

419 "request_size", "request_timeout", "shutdown_lifetime", 

420 "single_parent_bypass", "siteselect_timeout", "snmp_access", 

421 "snmp_incoming_address", "snmp_port", "source_ping", "ssl_proxy", 

422 "store_avg_object_size", "store_objects_per_bucket", 

423 "strip_query_terms", "swap_level1_dirs", "swap_level2_dirs", 

424 "tcp_incoming_address", "tcp_outgoing_address", "tcp_recv_bufsize", 

425 "test_reachability", "udp_hit_obj", "udp_hit_obj_size", 

426 "udp_incoming_address", "udp_outgoing_address", "unique_hostname", 

427 "unlinkd_program", "uri_whitespace", "useragent_log", 

428 "visible_hostname", "wais_relay", "wais_relay_host", "wais_relay_port", 

429 ) 

430 

431 opts = ( 

432 "proxy-only", "weight", "ttl", "no-query", "default", "round-robin", 

433 "multicast-responder", "on", "off", "all", "deny", "allow", "via", 

434 "parent", "no-digest", "heap", "lru", "realm", "children", "q1", "q2", 

435 "credentialsttl", "none", "disable", "offline_toggle", "diskd", 

436 ) 

437 

438 actions = ( 

439 "shutdown", "info", "parameter", "server_list", "client_list", 

440 r'squid.conf', 

441 ) 

442 

443 actions_stats = ( 

444 "objects", "vm_objects", "utilization", "ipcache", "fqdncache", "dns", 

445 "redirector", "io", "reply_headers", "filedescriptors", "netdb", 

446 ) 

447 

448 actions_log = ("status", "enable", "disable", "clear") 

449 

450 acls = ( 

451 "url_regex", "urlpath_regex", "referer_regex", "port", "proto", 

452 "req_mime_type", "rep_mime_type", "method", "browser", "user", "src", 

453 "dst", "time", "dstdomain", "ident", "snmp_community", 

454 ) 

455 

456 ip_re = ( 

457 r'(?:(?:(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|0x0*[0-9a-f]{1,2}|' 

458 r'0+[1-3]?[0-7]{0,2})(?:\.(?:[3-9]\d?|2(?:5[0-5]|[0-4]?\d)?|1\d{0,2}|' 

459 r'0x0*[0-9a-f]{1,2}|0+[1-3]?[0-7]{0,2})){3})|(?!.*::.*::)(?:(?!:)|' 

460 r':(?=:))(?:[0-9a-f]{0,4}(?:(?<=::)|(?<!::):)){6}(?:[0-9a-f]{0,4}' 

461 r'(?:(?<=::)|(?<!::):)[0-9a-f]{0,4}(?:(?<=::)|(?<!:)|(?<=:)(?<!::):)|' 

462 r'(?:25[0-4]|2[0-4]\d|1\d\d|[1-9]?\d)(?:\.(?:25[0-4]|2[0-4]\d|1\d\d|' 

463 r'[1-9]?\d)){3}))' 

464 ) 

465 

466 tokens = { 

467 'root': [ 

468 (r'\s+', Whitespace), 

469 (r'#', Comment, 'comment'), 

470 (words(keywords, prefix=r'\b', suffix=r'\b'), Keyword), 

471 (words(opts, prefix=r'\b', suffix=r'\b'), Name.Constant), 

472 # Actions 

473 (words(actions, prefix=r'\b', suffix=r'\b'), String), 

474 (words(actions_stats, prefix=r'stats/', suffix=r'\b'), String), 

475 (words(actions_log, prefix=r'log/', suffix=r'='), String), 

476 (words(acls, prefix=r'\b', suffix=r'\b'), Keyword), 

477 (ip_re + r'(?:/(?:' + ip_re + r'|\b\d+\b))?', Number.Float), 

478 (r'(?:\b\d+\b(?:-\b\d+|%)?)', Number), 

479 (r'\S+', Text), 

480 ], 

481 'comment': [ 

482 (r'\s*TAG:.*', String.Escape, '#pop'), 

483 (r'.+', Comment, '#pop'), 

484 default('#pop'), 

485 ], 

486 } 

487 

488 

489class NginxConfLexer(RegexLexer): 

490 """ 

491 Lexer for Nginx configuration files. 

492 

493 .. versionadded:: 0.11 

494 """ 

495 name = 'Nginx configuration file' 

496 url = 'http://nginx.net/' 

497 aliases = ['nginx'] 

498 filenames = ['nginx.conf'] 

499 mimetypes = ['text/x-nginx-conf'] 

500 

501 tokens = { 

502 'root': [ 

503 (r'(include)(\s+)([^\s;]+)', bygroups(Keyword, Whitespace, Name)), 

504 (r'[^\s;#]+', Keyword, 'stmt'), 

505 include('base'), 

506 ], 

507 'block': [ 

508 (r'\}', Punctuation, '#pop:2'), 

509 (r'[^\s;#]+', Keyword.Namespace, 'stmt'), 

510 include('base'), 

511 ], 

512 'stmt': [ 

513 (r'\{', Punctuation, 'block'), 

514 (r';', Punctuation, '#pop'), 

515 include('base'), 

516 ], 

517 'base': [ 

518 (r'#.*\n', Comment.Single), 

519 (r'on|off', Name.Constant), 

520 (r'\$[^\s;#()]+', Name.Variable), 

521 (r'([a-z0-9.-]+)(:)([0-9]+)', 

522 bygroups(Name, Punctuation, Number.Integer)), 

523 (r'[a-z-]+/[a-z-+]+', String), # mimetype 

524 # (r'[a-zA-Z._-]+', Keyword), 

525 (r'[0-9]+[km]?\b', Number.Integer), 

526 (r'(~)(\s*)([^\s{]+)', bygroups(Punctuation, Whitespace, String.Regex)), 

527 (r'[:=~]', Punctuation), 

528 (r'[^\s;#{}$]+', String), # catch all 

529 (r'/[^\s;#]*', Name), # pathname 

530 (r'\s+', Whitespace), 

531 (r'[$;]', Text), # leftover characters 

532 ], 

533 } 

534 

535 

536class LighttpdConfLexer(RegexLexer): 

537 """ 

538 Lexer for Lighttpd configuration files. 

539 

540 .. versionadded:: 0.11 

541 """ 

542 name = 'Lighttpd configuration file' 

543 url = 'http://lighttpd.net/' 

544 aliases = ['lighttpd', 'lighty'] 

545 filenames = ['lighttpd.conf'] 

546 mimetypes = ['text/x-lighttpd-conf'] 

547 

548 tokens = { 

549 'root': [ 

550 (r'#.*\n', Comment.Single), 

551 (r'/\S*', Name), # pathname 

552 (r'[a-zA-Z._-]+', Keyword), 

553 (r'\d+\.\d+\.\d+\.\d+(?:/\d+)?', Number), 

554 (r'[0-9]+', Number), 

555 (r'=>|=~|\+=|==|=|\+', Operator), 

556 (r'\$[A-Z]+', Name.Builtin), 

557 (r'[(){}\[\],]', Punctuation), 

558 (r'"([^"\\]*(?:\\.[^"\\]*)*)"', String.Double), 

559 (r'\s+', Whitespace), 

560 ], 

561 

562 } 

563 

564 

565class DockerLexer(RegexLexer): 

566 """ 

567 Lexer for Docker configuration files. 

568 

569 .. versionadded:: 2.0 

570 """ 

571 name = 'Docker' 

572 url = 'http://docker.io' 

573 aliases = ['docker', 'dockerfile'] 

574 filenames = ['Dockerfile', '*.docker'] 

575 mimetypes = ['text/x-dockerfile-config'] 

576 

577 _keywords = (r'(?:MAINTAINER|EXPOSE|WORKDIR|USER|STOPSIGNAL)') 

578 _bash_keywords = (r'(?:RUN|CMD|ENTRYPOINT|ENV|ARG|LABEL|ADD|COPY)') 

579 _lb = r'(?:\s*\\?\s*)' # dockerfile line break regex 

580 flags = re.IGNORECASE | re.MULTILINE 

581 

582 tokens = { 

583 'root': [ 

584 (r'#.*', Comment), 

585 (r'(FROM)([ \t]*)(\S*)([ \t]*)(?:(AS)([ \t]*)(\S*))?', 

586 bygroups(Keyword, Whitespace, String, Whitespace, Keyword, Whitespace, String)), 

587 (r'(ONBUILD)(\s+)(%s)' % (_lb,), bygroups(Keyword, Whitespace, using(BashLexer))), 

588 (r'(HEALTHCHECK)(\s+)((%s--\w+=\w+%s)*)' % (_lb, _lb), 

589 bygroups(Keyword, Whitespace, using(BashLexer))), 

590 (r'(VOLUME|ENTRYPOINT|CMD|SHELL)(\s+)(%s)(\[.*?\])' % (_lb,), 

591 bygroups(Keyword, Whitespace, using(BashLexer), using(JsonLexer))), 

592 (r'(LABEL|ENV|ARG)(\s+)((%s\w+=\w+%s)*)' % (_lb, _lb), 

593 bygroups(Keyword, Whitespace, using(BashLexer))), 

594 (r'(%s|VOLUME)\b(\s+)(.*)' % (_keywords), bygroups(Keyword, Whitespace, String)), 

595 (r'(%s)(\s+)' % (_bash_keywords,), bygroups(Keyword, Whitespace)), 

596 (r'(.*\\\n)*.+', using(BashLexer)), 

597 ] 

598 } 

599 

600 

601class TerraformLexer(ExtendedRegexLexer): 

602 """ 

603 Lexer for terraformi ``.tf`` files. 

604 

605 .. versionadded:: 2.1 

606 """ 

607 

608 name = 'Terraform' 

609 url = 'https://www.terraform.io/' 

610 aliases = ['terraform', 'tf'] 

611 filenames = ['*.tf'] 

612 mimetypes = ['application/x-tf', 'application/x-terraform'] 

613 

614 classes = ('backend', 'data', 'module', 'output', 'provider', 

615 'provisioner', 'resource', 'variable') 

616 classes_re = "({})".format(('|').join(classes)) 

617 

618 types = ('string', 'number', 'bool', 'list', 'tuple', 'map', 'set', 'object', 'null') 

619 

620 numeric_functions = ('abs', 'ceil', 'floor', 'log', 'max', 

621 'mix', 'parseint', 'pow', 'signum') 

622 

623 string_functions = ('chomp', 'format', 'formatlist', 'indent', 

624 'join', 'lower', 'regex', 'regexall', 'replace', 

625 'split', 'strrev', 'substr', 'title', 'trim', 

626 'trimprefix', 'trimsuffix', 'trimspace', 'upper' 

627 ) 

628 

629 collection_functions = ('alltrue', 'anytrue', 'chunklist', 'coalesce', 

630 'coalescelist', 'compact', 'concat', 'contains', 

631 'distinct', 'element', 'flatten', 'index', 'keys', 

632 'length', 'list', 'lookup', 'map', 'matchkeys', 

633 'merge', 'range', 'reverse', 'setintersection', 

634 'setproduct', 'setsubtract', 'setunion', 'slice', 

635 'sort', 'sum', 'transpose', 'values', 'zipmap' 

636 ) 

637 

638 encoding_functions = ('base64decode', 'base64encode', 'base64gzip', 

639 'csvdecode', 'jsondecode', 'jsonencode', 'textdecodebase64', 

640 'textencodebase64', 'urlencode', 'yamldecode', 'yamlencode') 

641 

642 filesystem_functions = ('abspath', 'dirname', 'pathexpand', 'basename', 

643 'file', 'fileexists', 'fileset', 'filebase64', 'templatefile') 

644 

645 date_time_functions = ('formatdate', 'timeadd', 'timestamp') 

646 

647 hash_crypto_functions = ('base64sha256', 'base64sha512', 'bcrypt', 'filebase64sha256', 

648 'filebase64sha512', 'filemd5', 'filesha1', 'filesha256', 'filesha512', 

649 'md5', 'rsadecrypt', 'sha1', 'sha256', 'sha512', 'uuid', 'uuidv5') 

650 

651 ip_network_functions = ('cidrhost', 'cidrnetmask', 'cidrsubnet', 'cidrsubnets') 

652 

653 type_conversion_functions = ('can', 'defaults', 'tobool', 'tolist', 'tomap', 

654 'tonumber', 'toset', 'tostring', 'try') 

655 

656 builtins = numeric_functions + string_functions + collection_functions + encoding_functions +\ 

657 filesystem_functions + date_time_functions + hash_crypto_functions + ip_network_functions +\ 

658 type_conversion_functions 

659 builtins_re = "({})".format(('|').join(builtins)) 

660 

661 def heredoc_callback(self, match, ctx): 

662 # Parse a terraform heredoc 

663 # match: 1 = <<[-]?, 2 = name 3 = rest of line 

664 

665 start = match.start(1) 

666 yield start, Operator, match.group(1) # <<[-]? 

667 yield match.start(2), String.Delimiter, match.group(2) # heredoc name 

668 

669 ctx.pos = match.start(3) 

670 ctx.end = match.end(3) 

671 yield ctx.pos, String.Heredoc, match.group(3) 

672 ctx.pos = match.end() 

673 

674 hdname = match.group(2) 

675 tolerant = True # leading whitespace is always accepted 

676 

677 lines = [] 

678 

679 for match in line_re.finditer(ctx.text, ctx.pos): 

680 if tolerant: 

681 check = match.group().strip() 

682 else: 

683 check = match.group().rstrip() 

684 if check == hdname: 

685 for amatch in lines: 

686 yield amatch.start(), String.Heredoc, amatch.group() 

687 yield match.start(), String.Delimiter, match.group() 

688 ctx.pos = match.end() 

689 break 

690 else: 

691 lines.append(match) 

692 else: 

693 # end of heredoc not found -- error! 

694 for amatch in lines: 

695 yield amatch.start(), Error, amatch.group() 

696 ctx.end = len(ctx.text) 

697 

698 tokens = { 

699 'root': [ 

700 include('basic'), 

701 include('whitespace'), 

702 

703 # Strings 

704 (r'(".*")', bygroups(String.Double)), 

705 

706 # Constants 

707 (words(('true', 'false'), prefix=r'\b', suffix=r'\b'), Name.Constant), 

708 

709 # Types 

710 (words(types, prefix=r'\b', suffix=r'\b'), Keyword.Type), 

711 

712 include('identifier'), 

713 include('punctuation'), 

714 (r'[0-9]+', Number), 

715 ], 

716 'basic': [ 

717 (r'\s*/\*', Comment.Multiline, 'comment'), 

718 (r'\s*(#|//).*\n', Comment.Single), 

719 include('whitespace'), 

720 

721 # e.g. terraform { 

722 # e.g. egress { 

723 (r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=?)(\s*)(\{)', 

724 bygroups(Whitespace, Name.Builtin, Whitespace, Operator, Whitespace, Punctuation)), 

725 

726 # Assignment with attributes, e.g. something = ... 

727 (r'(\s*)([0-9a-zA-Z-_]+)(\s*)(=)(\s*)', 

728 bygroups(Whitespace, Name.Attribute, Whitespace, Operator, Whitespace)), 

729 

730 # Assignment with environment variables and similar, e.g. "something" = ... 

731 # or key value assignment, e.g. "SlotName" : ... 

732 (r'(\s*)("\S+")(\s*)([=:])(\s*)', 

733 bygroups(Whitespace, Literal.String.Double, Whitespace, Operator, Whitespace)), 

734 

735 # Functions, e.g. jsonencode(element("value")) 

736 (builtins_re + r'(\()', bygroups(Name.Function, Punctuation)), 

737 

738 # List of attributes, e.g. ignore_changes = [last_modified, filename] 

739 (r'(\[)([a-z_,\s]+)(\])', bygroups(Punctuation, Name.Builtin, Punctuation)), 

740 

741 # e.g. resource "aws_security_group" "allow_tls" { 

742 # e.g. backend "consul" { 

743 (classes_re + r'(\s+)("[0-9a-zA-Z-_]+")?(\s*)("[0-9a-zA-Z-_]+")(\s+)(\{)', 

744 bygroups(Keyword.Reserved, Whitespace, Name.Class, Whitespace, Name.Variable, Whitespace, Punctuation)), 

745 

746 # here-doc style delimited strings 

747 (r'(<<-?)\s*([a-zA-Z_]\w*)(.*?\n)', heredoc_callback), 

748 ], 

749 'identifier': [ 

750 (r'\b(var\.[0-9a-zA-Z-_\.\[\]]+)\b', bygroups(Name.Variable)), 

751 (r'\b([0-9a-zA-Z-_\[\]]+\.[0-9a-zA-Z-_\.\[\]]+)\b', 

752 bygroups(Name.Variable)), 

753 ], 

754 'punctuation': [ 

755 (r'[\[\]()\{\},.?:!=]', Punctuation), 

756 ], 

757 'comment': [ 

758 (r'[^*/]', Comment.Multiline), 

759 (r'/\*', Comment.Multiline, '#push'), 

760 (r'\*/', Comment.Multiline, '#pop'), 

761 (r'[*/]', Comment.Multiline) 

762 ], 

763 'whitespace': [ 

764 (r'\n', Whitespace), 

765 (r'\s+', Whitespace), 

766 (r'(\\)(\n)', bygroups(Text, Whitespace)), 

767 ], 

768 } 

769 

770 

771class TermcapLexer(RegexLexer): 

772 """ 

773 Lexer for termcap database source. 

774 

775 This is very simple and minimal. 

776 

777 .. versionadded:: 2.1 

778 """ 

779 name = 'Termcap' 

780 aliases = ['termcap'] 

781 filenames = ['termcap', 'termcap.src'] 

782 mimetypes = [] 

783 

784 # NOTE: 

785 # * multiline with trailing backslash 

786 # * separator is ':' 

787 # * to embed colon as data, we must use \072 

788 # * space after separator is not allowed (mayve) 

789 tokens = { 

790 'root': [ 

791 (r'^#.*', Comment), 

792 (r'^[^\s#:|]+', Name.Tag, 'names'), 

793 (r'\s+', Whitespace), 

794 ], 

795 'names': [ 

796 (r'\n', Whitespace, '#pop'), 

797 (r':', Punctuation, 'defs'), 

798 (r'\|', Punctuation), 

799 (r'[^:|]+', Name.Attribute), 

800 ], 

801 'defs': [ 

802 (r'(\\)(\n[ \t]*)', bygroups(Text, Whitespace)), 

803 (r'\n[ \t]*', Whitespace, '#pop:2'), 

804 (r'(#)([0-9]+)', bygroups(Operator, Number)), 

805 (r'=', Operator, 'data'), 

806 (r':', Punctuation), 

807 (r'[^\s:=#]+', Name.Class), 

808 ], 

809 'data': [ 

810 (r'\\072', Literal), 

811 (r':', Punctuation, '#pop'), 

812 (r'[^:\\]+', Literal), # for performance 

813 (r'.', Literal), 

814 ], 

815 } 

816 

817 

818class TerminfoLexer(RegexLexer): 

819 """ 

820 Lexer for terminfo database source. 

821 

822 This is very simple and minimal. 

823 

824 .. versionadded:: 2.1 

825 """ 

826 name = 'Terminfo' 

827 aliases = ['terminfo'] 

828 filenames = ['terminfo', 'terminfo.src'] 

829 mimetypes = [] 

830 

831 # NOTE: 

832 # * multiline with leading whitespace 

833 # * separator is ',' 

834 # * to embed comma as data, we can use \, 

835 # * space after separator is allowed 

836 tokens = { 

837 'root': [ 

838 (r'^#.*$', Comment), 

839 (r'^[^\s#,|]+', Name.Tag, 'names'), 

840 (r'\s+', Whitespace), 

841 ], 

842 'names': [ 

843 (r'\n', Whitespace, '#pop'), 

844 (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace), 'defs'), 

845 (r'\|', Punctuation), 

846 (r'[^,|]+', Name.Attribute), 

847 ], 

848 'defs': [ 

849 (r'\n[ \t]+', Whitespace), 

850 (r'\n', Whitespace, '#pop:2'), 

851 (r'(#)([0-9]+)', bygroups(Operator, Number)), 

852 (r'=', Operator, 'data'), 

853 (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace)), 

854 (r'[^\s,=#]+', Name.Class), 

855 ], 

856 'data': [ 

857 (r'\\[,\\]', Literal), 

858 (r'(,)([ \t]*)', bygroups(Punctuation, Whitespace), '#pop'), 

859 (r'[^\\,]+', Literal), # for performance 

860 (r'.', Literal), 

861 ], 

862 } 

863 

864 

865class PkgConfigLexer(RegexLexer): 

866 """ 

867 Lexer for pkg-config 

868 (see also `manual page <http://linux.die.net/man/1/pkg-config>`_). 

869 

870 .. versionadded:: 2.1 

871 """ 

872 

873 name = 'PkgConfig' 

874 url = 'http://www.freedesktop.org/wiki/Software/pkg-config/' 

875 aliases = ['pkgconfig'] 

876 filenames = ['*.pc'] 

877 mimetypes = [] 

878 

879 tokens = { 

880 'root': [ 

881 (r'#.*$', Comment.Single), 

882 

883 # variable definitions 

884 (r'^(\w+)(=)', bygroups(Name.Attribute, Operator)), 

885 

886 # keyword lines 

887 (r'^([\w.]+)(:)', 

888 bygroups(Name.Tag, Punctuation), 'spvalue'), 

889 

890 # variable references 

891 include('interp'), 

892 

893 # fallback 

894 (r'\s+', Whitespace), 

895 (r'[^${}#=:\n.]+', Text), 

896 (r'.', Text), 

897 ], 

898 'interp': [ 

899 # you can escape literal "$" as "$$" 

900 (r'\$\$', Text), 

901 

902 # variable references 

903 (r'\$\{', String.Interpol, 'curly'), 

904 ], 

905 'curly': [ 

906 (r'\}', String.Interpol, '#pop'), 

907 (r'\w+', Name.Attribute), 

908 ], 

909 'spvalue': [ 

910 include('interp'), 

911 

912 (r'#.*$', Comment.Single, '#pop'), 

913 (r'\n', Whitespace, '#pop'), 

914 

915 # fallback 

916 (r'\s+', Whitespace), 

917 (r'[^${}#\n\s]+', Text), 

918 (r'.', Text), 

919 ], 

920 } 

921 

922 

923class PacmanConfLexer(RegexLexer): 

924 """ 

925 Lexer for pacman.conf. 

926 

927 Actually, IniLexer works almost fine for this format, 

928 but it yield error token. It is because pacman.conf has 

929 a form without assignment like: 

930 

931 UseSyslog 

932 Color 

933 TotalDownload 

934 CheckSpace 

935 VerbosePkgLists 

936 

937 These are flags to switch on. 

938 

939 .. versionadded:: 2.1 

940 """ 

941 

942 name = 'PacmanConf' 

943 url = 'https://www.archlinux.org/pacman/pacman.conf.5.html' 

944 aliases = ['pacmanconf'] 

945 filenames = ['pacman.conf'] 

946 mimetypes = [] 

947 

948 tokens = { 

949 'root': [ 

950 # comment 

951 (r'#.*$', Comment.Single), 

952 

953 # section header 

954 (r'^(\s*)(\[.*?\])(\s*)$', bygroups(Whitespace, Keyword, Whitespace)), 

955 

956 # variable definitions 

957 # (Leading space is allowed...) 

958 (r'(\w+)(\s*)(=)', 

959 bygroups(Name.Attribute, Whitespace, Operator)), 

960 

961 # flags to on 

962 (r'^(\s*)(\w+)(\s*)$', 

963 bygroups(Whitespace, Name.Attribute, Whitespace)), 

964 

965 # built-in special values 

966 (words(( 

967 '$repo', # repository 

968 '$arch', # architecture 

969 '%o', # outfile 

970 '%u', # url 

971 ), suffix=r'\b'), 

972 Name.Variable), 

973 

974 # fallback 

975 (r'\s+', Whitespace), 

976 (r'.', Text), 

977 ], 

978 } 

979 

980 

981class AugeasLexer(RegexLexer): 

982 """ 

983 Lexer for Augeas. 

984 

985 .. versionadded:: 2.4 

986 """ 

987 name = 'Augeas' 

988 url = 'http://augeas.net' 

989 aliases = ['augeas'] 

990 filenames = ['*.aug'] 

991 

992 tokens = { 

993 'root': [ 

994 (r'(module)(\s*)([^\s=]+)', bygroups(Keyword.Namespace, Whitespace, Name.Namespace)), 

995 (r'(let)(\s*)([^\s=]+)', bygroups(Keyword.Declaration, Whitespace, Name.Variable)), 

996 (r'(del|store|value|counter|seq|key|label|autoload|incl|excl|transform|test|get|put)(\s+)', bygroups(Name.Builtin, Whitespace)), 

997 (r'(\()([^:]+)(\:)(unit|string|regexp|lens|tree|filter)(\))', bygroups(Punctuation, Name.Variable, Punctuation, Keyword.Type, Punctuation)), 

998 (r'\(\*', Comment.Multiline, 'comment'), 

999 (r'[*+\-.;=?|]', Operator), 

1000 (r'[()\[\]{}]', Operator), 

1001 (r'"', String.Double, 'string'), 

1002 (r'\/', String.Regex, 'regex'), 

1003 (r'([A-Z]\w*)(\.)(\w+)', bygroups(Name.Namespace, Punctuation, Name.Variable)), 

1004 (r'.', Name.Variable), 

1005 (r'\s+', Whitespace), 

1006 ], 

1007 'string': [ 

1008 (r'\\.', String.Escape), 

1009 (r'[^"]', String.Double), 

1010 (r'"', String.Double, '#pop'), 

1011 ], 

1012 'regex': [ 

1013 (r'\\.', String.Escape), 

1014 (r'[^/]', String.Regex), 

1015 (r'\/', String.Regex, '#pop'), 

1016 ], 

1017 'comment': [ 

1018 (r'[^*)]', Comment.Multiline), 

1019 (r'\(\*', Comment.Multiline, '#push'), 

1020 (r'\*\)', Comment.Multiline, '#pop'), 

1021 (r'[)*]', Comment.Multiline) 

1022 ], 

1023 } 

1024 

1025 

1026class TOMLLexer(RegexLexer): 

1027 """ 

1028 Lexer for TOML, a simple language 

1029 for config files. 

1030 

1031 .. versionadded:: 2.4 

1032 """ 

1033 

1034 name = 'TOML' 

1035 url = 'https://github.com/toml-lang/toml' 

1036 aliases = ['toml'] 

1037 filenames = ['*.toml', 'Pipfile', 'poetry.lock'] 

1038 

1039 tokens = { 

1040 'root': [ 

1041 # Table 

1042 (r'^(\s*)(\[.*?\])$', bygroups(Whitespace, Keyword)), 

1043 

1044 # Basics, comments, strings 

1045 (r'[ \t]+', Whitespace), 

1046 (r'\n', Whitespace), 

1047 (r'#.*?$', Comment.Single), 

1048 # Basic string 

1049 (r'"(\\\\|\\[^\\]|[^"\\])*"', String), 

1050 # Literal string 

1051 (r'\'\'\'(.*)\'\'\'', String), 

1052 (r'\'[^\']*\'', String), 

1053 (r'(true|false)$', Keyword.Constant), 

1054 (r'[a-zA-Z_][\w\-]*', Name), 

1055 

1056 # Datetime 

1057 # TODO this needs to be expanded, as TOML is rather flexible: 

1058 # https://github.com/toml-lang/toml#offset-date-time 

1059 (r'\d{4}-\d{2}-\d{2}(?:T| )\d{2}:\d{2}:\d{2}(?:Z|[-+]\d{2}:\d{2})', Number.Integer), 

1060 

1061 # Numbers 

1062 (r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?j?', Number.Float), 

1063 (r'\d+[eE][+-]?[0-9]+j?', Number.Float), 

1064 # Handle +-inf, +-infinity, +-nan 

1065 (r'[+-]?(?:(inf(?:inity)?)|nan)', Number.Float), 

1066 (r'[+-]?\d+', Number.Integer), 

1067 

1068 # Punctuation 

1069 (r'[]{}:(),;[]', Punctuation), 

1070 (r'\.', Punctuation), 

1071 

1072 # Operators 

1073 (r'=', Operator) 

1074 

1075 ] 

1076 } 

1077 

1078class NestedTextLexer(RegexLexer): 

1079 """ 

1080 Lexer for NextedText, a human-friendly data 

1081 format. 

1082 

1083 .. versionadded:: 2.9 

1084 """ 

1085 

1086 name = 'NestedText' 

1087 url = 'https://nestedtext.org' 

1088 aliases = ['nestedtext', 'nt'] 

1089 filenames = ['*.nt'] 

1090 

1091 _quoted_dict_item = r'^(\s*)({0})(.*?)({0}: ?)(.*?)(\s*)$' 

1092 

1093 tokens = { 

1094 'root': [ 

1095 (r'^(\s*)(#.*?)$', bygroups(Whitespace, Comment)), 

1096 (r'^(\s*)(>)( ?)(.*?)(\s*)$', bygroups(Whitespace, Punctuation, Whitespace, String, Whitespace)), 

1097 (r'^(\s*)(-)( ?)(.*?)(\s*)$', bygroups(Whitespace, Punctuation, Whitespace, String, Whitespace)), 

1098 (_quoted_dict_item.format("'"), bygroups(Whitespace, Punctuation, Name, Punctuation, String, Whitespace)), 

1099 (_quoted_dict_item.format('"'), bygroups(Whitespace, Punctuation, Name, Punctuation, String, Whitespace)), 

1100 (r'^(\s*)(.*?)(:)( ?)(.*?)(\s*)$', bygroups(Whitespace, Name, Punctuation, Whitespace, String, Whitespace)), 

1101 ], 

1102 } 

1103 

1104 

1105class SingularityLexer(RegexLexer): 

1106 """ 

1107 Lexer for Singularity definition files. 

1108 

1109 .. versionadded:: 2.6 

1110 """ 

1111 

1112 name = 'Singularity' 

1113 url = 'https://www.sylabs.io/guides/3.0/user-guide/definition_files.html' 

1114 aliases = ['singularity'] 

1115 filenames = ['*.def', 'Singularity'] 

1116 flags = re.IGNORECASE | re.MULTILINE | re.DOTALL 

1117 

1118 _headers = r'^(\s*)(bootstrap|from|osversion|mirrorurl|include|registry|namespace|includecmd)(:)' 

1119 _section = r'^(%(?:pre|post|setup|environment|help|labels|test|runscript|files|startscript))(\s*)' 

1120 _appsect = r'^(%app(?:install|help|run|labels|env|test|files))(\s*)' 

1121 

1122 tokens = { 

1123 'root': [ 

1124 (_section, bygroups(Generic.Heading, Whitespace), 'script'), 

1125 (_appsect, bygroups(Generic.Heading, Whitespace), 'script'), 

1126 (_headers, bygroups(Whitespace, Keyword, Text)), 

1127 (r'\s*#.*?\n', Comment), 

1128 (r'\b(([0-9]+\.?[0-9]*)|(\.[0-9]+))\b', Number), 

1129 (r'[ \t]+', Whitespace), 

1130 (r'(?!^\s*%).', Text), 

1131 ], 

1132 'script': [ 

1133 (r'(.+?(?=^\s*%))|(.*)', using(BashLexer), '#pop'), 

1134 ], 

1135 } 

1136 

1137 def analyse_text(text): 

1138 """This is a quite simple script file, but there are a few keywords 

1139 which seem unique to this language.""" 

1140 result = 0 

1141 if re.search(r'\b(?:osversion|includecmd|mirrorurl)\b', text, re.IGNORECASE): 

1142 result += 0.5 

1143 

1144 if re.search(SingularityLexer._section[1:], text): 

1145 result += 0.49 

1146 

1147 return result 

1148 

1149 

1150class UnixConfigLexer(RegexLexer): 

1151 """ 

1152 Lexer for Unix/Linux config files using colon-separated values, e.g. 

1153 

1154 * ``/etc/group`` 

1155 * ``/etc/passwd`` 

1156 * ``/etc/shadow`` 

1157  

1158 .. versionadded:: 2.12 

1159 """ 

1160 

1161 name = 'Unix/Linux config files' 

1162 aliases = ['unixconfig', 'linuxconfig'] 

1163 filenames = [] 

1164 

1165 tokens = { 

1166 'root': [ 

1167 (r'^#.*', Comment), 

1168 (r'\n', Whitespace), 

1169 (r':', Punctuation), 

1170 (r'[0-9]+', Number), 

1171 (r'((?!\n)[a-zA-Z0-9\_\-\s\(\),]){2,}', Text), 

1172 (r'[^:\n]+', String), 

1173 ], 

1174 }