Coverage for /pythoncovmergedfiles/medio/medio/usr/local/lib/python3.8/site-packages/pygments/lexers/usd.py: 100%
16 statements
« prev ^ index » next coverage.py v7.2.7, created at 2023-07-01 06:54 +0000
« prev ^ index » next coverage.py v7.2.7, created at 2023-07-01 06:54 +0000
1"""
2 pygments.lexers.usd
3 ~~~~~~~~~~~~~~~~~~~
5 The module that parses Pixar's Universal Scene Description file format.
7 :copyright: Copyright 2006-2023 by the Pygments team, see AUTHORS.
8 :license: BSD, see LICENSE for details.
9"""
11from pygments.lexer import RegexLexer, bygroups
12from pygments.lexer import words as words_
13from pygments.lexers._usd_builtins import COMMON_ATTRIBUTES, KEYWORDS, \
14 OPERATORS, SPECIAL_NAMES, TYPES
15from pygments.token import Comment, Keyword, Name, Number, Operator, \
16 Punctuation, String, Text, Whitespace
18__all__ = ["UsdLexer"]
21def _keywords(words, type_):
22 return [(words_(words, prefix=r"\b", suffix=r"\b"), type_)]
25_TYPE = r"(\w+(?:\[\])?)"
26_BASE_ATTRIBUTE = r"(\w+(?:\:\w+)*)(?:(\.)(timeSamples))?"
27_WHITESPACE = r"([ \t]+)"
30class UsdLexer(RegexLexer):
31 """
32 A lexer that parses Pixar's Universal Scene Description file format.
34 .. versionadded:: 2.6
35 """
37 name = "USD"
38 url = 'https://graphics.pixar.com/usd/release/index.html'
39 aliases = ["usd", "usda"]
40 filenames = ["*.usd", "*.usda"]
42 tokens = {
43 "root": [
44 (r"(custom){_WHITESPACE}(uniform)(\s+){}(\s+){}(\s*)(=)".format(
45 _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
46 bygroups(Keyword.Token, Whitespace, Keyword.Token, Whitespace,
47 Keyword.Type, Whitespace, Name.Attribute, Text,
48 Name.Keyword.Tokens, Whitespace, Operator)),
49 (r"(custom){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
50 _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
51 bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
52 Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
53 Operator)),
54 (r"(uniform){_WHITESPACE}{}(\s+){}(\s*)(=)".format(
55 _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
56 bygroups(Keyword.Token, Whitespace, Keyword.Type, Whitespace,
57 Name.Attribute, Text, Name.Keyword.Tokens, Whitespace,
58 Operator)),
59 (r"{}{_WHITESPACE}{}(\s*)(=)".format(
60 _TYPE, _BASE_ATTRIBUTE, _WHITESPACE=_WHITESPACE),
61 bygroups(Keyword.Type, Whitespace, Name.Attribute, Text,
62 Name.Keyword.Tokens, Whitespace, Operator)),
63 ] +
64 _keywords(KEYWORDS, Keyword.Tokens) +
65 _keywords(SPECIAL_NAMES, Name.Builtins) +
66 _keywords(COMMON_ATTRIBUTES, Name.Attribute) +
67 [(r"\b\w+:[\w:]+\b", Name.Attribute)] +
68 _keywords(OPERATORS, Operator) + # more attributes
69 [(type_ + r"\[\]", Keyword.Type) for type_ in TYPES] +
70 _keywords(TYPES, Keyword.Type) +
71 [
72 (r"[(){}\[\]]", Punctuation),
73 ("#.*?$", Comment.Single),
74 (",", Punctuation),
75 (";", Punctuation), # ";"s are allowed to combine separate metadata lines
76 ("=", Operator),
77 (r"[-]*([0-9]*[.])?[0-9]+(?:e[+-]*\d+)?", Number),
78 (r"'''(?:.|\n)*?'''", String),
79 (r'"""(?:.|\n)*?"""', String),
80 (r"'.*?'", String),
81 (r'".*?"', String),
82 (r"<(\.\./)*([\w/]+|[\w/]+\.\w+[\w:]*)>", Name.Namespace),
83 (r"@.*?@", String.Interpol),
84 (r'\(.*"[.\\n]*".*\)', String.Doc),
85 (r"\A#usda .+$", Comment.Hashbang),
86 (r"\s+", Whitespace),
87 (r"\w+", Text),
88 (r"[_:.]+", Punctuation),
89 ],
90 }