1"""
2 pygments.lexers.nix
3 ~~~~~~~~~~~~~~~~~~~
4
5 Lexers for the NixOS Nix language.
6
7 :copyright: Copyright 2006-2025 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
10
11import re
12
13from pygments.lexer import RegexLexer, include
14from pygments.token import Text, Comment, Operator, Keyword, Name, String, \
15 Number, Punctuation, Literal
16
17__all__ = ['NixLexer']
18
19
20class NixLexer(RegexLexer):
21 """
22 For the Nix language.
23 """
24
25 name = 'Nix'
26 url = 'http://nixos.org/nix/'
27 aliases = ['nixos', 'nix']
28 filenames = ['*.nix']
29 mimetypes = ['text/x-nix']
30 version_added = '2.0'
31
32 keywords = ['rec', 'with', 'let', 'in', 'inherit', 'assert', 'if',
33 'else', 'then', '...']
34 builtins = ['import', 'abort', 'baseNameOf', 'dirOf', 'isNull', 'builtins',
35 'map', 'removeAttrs', 'throw', 'toString', 'derivation']
36 operators = ['++', '+', '?', '.', '!', '//', '==', '/',
37 '!=', '&&', '||', '->', '=', '<', '>', '*', '-']
38
39 punctuations = ["(", ")", "[", "]", ";", "{", "}", ":", ",", "@"]
40
41 tokens = {
42 'root': [
43 # comments starting with #
44 (r'#.*$', Comment.Single),
45
46 # multiline comments
47 (r'/\*', Comment.Multiline, 'comment'),
48
49 # whitespace
50 (r'\s+', Text),
51
52 # keywords
53 ('({})'.format('|'.join(re.escape(entry) + '\\b' for entry in keywords)), Keyword),
54
55 # highlight the builtins
56 ('({})'.format('|'.join(re.escape(entry) + '\\b' for entry in builtins)),
57 Name.Builtin),
58
59 (r'\b(true|false|null)\b', Name.Constant),
60
61 # floats
62 (r'-?(\d+\.\d*|\.\d+)([eE][-+]?\d+)?', Number.Float),
63
64 # integers
65 (r'-?[0-9]+', Number.Integer),
66
67 # paths
68 (r'[\w.+-]*(\/[\w.+-]+)+', Literal),
69 (r'~(\/[\w.+-]+)+', Literal),
70 (r'\<[\w.+-]+(\/[\w.+-]+)*\>', Literal),
71
72 # operators
73 ('({})'.format('|'.join(re.escape(entry) for entry in operators)),
74 Operator),
75
76 # word operators
77 (r'\b(or|and)\b', Operator.Word),
78
79 (r'\{', Punctuation, 'block'),
80
81 # punctuations
82 ('({})'.format('|'.join(re.escape(entry) for entry in punctuations)), Punctuation),
83
84 # strings
85 (r'"', String.Double, 'doublequote'),
86 (r"''", String.Multiline, 'multiline'),
87
88 # urls
89 (r'[a-zA-Z][a-zA-Z0-9\+\-\.]*\:[\w%/?:@&=+$,\\.!~*\'-]+', Literal),
90
91 # names of variables
92 (r'[\w-]+(?=\s*=)', String.Symbol),
93 (r'[a-zA-Z_][\w\'-]*', Text),
94
95 (r"\$\{", String.Interpol, 'antiquote'),
96 ],
97 'comment': [
98 (r'[^/*]+', Comment.Multiline),
99 (r'/\*', Comment.Multiline, '#push'),
100 (r'\*/', Comment.Multiline, '#pop'),
101 (r'[*/]', Comment.Multiline),
102 ],
103 'multiline': [
104 (r"''(\$|'|\\n|\\r|\\t|\\)", String.Escape),
105 (r"''", String.Multiline, '#pop'),
106 (r'\$\{', String.Interpol, 'antiquote'),
107 (r"[^'\$]+", String.Multiline),
108 (r"\$[^\{']", String.Multiline),
109 (r"'[^']", String.Multiline),
110 (r"\$(?=')", String.Multiline),
111 ],
112 'doublequote': [
113 (r'\\(\\|"|\$|n)', String.Escape),
114 (r'"', String.Double, '#pop'),
115 (r'\$\{', String.Interpol, 'antiquote'),
116 (r'[^"\\\$]+', String.Double),
117 (r'\$[^\{"]', String.Double),
118 (r'\$(?=")', String.Double),
119 (r'\\', String.Double),
120 ],
121 'antiquote': [
122 (r"\}", String.Interpol, '#pop'),
123 # TODO: we should probably escape also here ''${ \${
124 (r"\$\{", String.Interpol, '#push'),
125 include('root'),
126 ],
127 'block': [
128 (r"\}", Punctuation, '#pop'),
129 include('root'),
130 ],
131 }
132
133 def analyse_text(text):
134 rv = 0.0
135 # TODO: let/in
136 if re.search(r'import.+?<[^>]+>', text):
137 rv += 0.4
138 if re.search(r'mkDerivation\s+(\(|\{|rec)', text):
139 rv += 0.4
140 if re.search(r'=\s+mkIf\s+', text):
141 rv += 0.4
142 if re.search(r'\{[a-zA-Z,\s]+\}:', text):
143 rv += 0.1
144 return rv