// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// EnumNode represents an enum declaration. Example:
//
// enum Foo { BAR = 0; BAZ = 1 }
type EnumNode struct {
compositeNode
Keyword *KeywordNode
Name *IdentNode
OpenBrace *RuneNode
Decls []EnumElement
CloseBrace *RuneNode
}
func (*EnumNode) fileElement() {}
func (*EnumNode) msgElement() {}
// NewEnumNode creates a new *EnumNode. All arguments must be non-nil. While
// it is technically allowed for decls to be nil or empty, the resulting node
// will not be a valid enum, which must have at least one value.
// - keyword: The token corresponding to the "enum" keyword.
// - name: The token corresponding to the enum's name.
// - openBrace: The token corresponding to the "{" rune that starts the body.
// - decls: All declarations inside the enum body.
// - closeBrace: The token corresponding to the "}" rune that ends the body.
func NewEnumNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []EnumElement, closeBrace *RuneNode) *EnumNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
if openBrace == nil {
panic("openBrace is nil")
}
if closeBrace == nil {
panic("closeBrace is nil")
}
children := make([]Node, 0, 4+len(decls))
children = append(children, keyword, name, openBrace)
for _, decl := range decls {
switch decl.(type) {
case *OptionNode, *EnumValueNode, *ReservedNode, *EmptyDeclNode:
default:
panic(fmt.Sprintf("invalid EnumElement type: %T", decl))
}
children = append(children, decl)
}
children = append(children, closeBrace)
return &EnumNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
OpenBrace: openBrace,
CloseBrace: closeBrace,
Decls: decls,
}
}
func (n *EnumNode) RangeOptions(fn func(*OptionNode) bool) {
for _, decl := range n.Decls {
if opt, ok := decl.(*OptionNode); ok {
if !fn(opt) {
return
}
}
}
}
// EnumElement is an interface implemented by all AST nodes that can
// appear in the body of an enum declaration.
type EnumElement interface {
Node
enumElement()
}
var _ EnumElement = (*OptionNode)(nil)
var _ EnumElement = (*EnumValueNode)(nil)
var _ EnumElement = (*ReservedNode)(nil)
var _ EnumElement = (*EmptyDeclNode)(nil)
// EnumValueDeclNode is a placeholder interface for AST nodes that represent
// enum values. This allows NoSourceNode to be used in place of *EnumValueNode
// for some usages.
type EnumValueDeclNode interface {
NodeWithOptions
GetName() Node
GetNumber() Node
}
var _ EnumValueDeclNode = (*EnumValueNode)(nil)
var _ EnumValueDeclNode = (*NoSourceNode)(nil)
// EnumValueNode represents an enum declaration. Example:
//
// UNSET = 0 [deprecated = true];
type EnumValueNode struct {
compositeNode
Name *IdentNode
Equals *RuneNode
Number IntValueNode
Options *CompactOptionsNode
Semicolon *RuneNode
}
func (*EnumValueNode) enumElement() {}
// NewEnumValueNode creates a new *EnumValueNode. All arguments must be non-nil
// except opts which is only non-nil if the declaration included options.
// - name: The token corresponding to the enum value's name.
// - equals: The token corresponding to the '=' rune after the name.
// - number: The token corresponding to the enum value's number.
// - opts: Optional set of enum value options.
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewEnumValueNode(name *IdentNode, equals *RuneNode, number IntValueNode, opts *CompactOptionsNode, semicolon *RuneNode) *EnumValueNode {
if name == nil {
panic("name is nil")
}
if equals == nil {
panic("equals is nil")
}
if number == nil {
panic("number is nil")
}
numChildren := 3
if semicolon != nil {
numChildren++
}
if opts != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
children = append(children, name, equals, number)
if opts != nil {
children = append(children, opts)
}
if semicolon != nil {
children = append(children, semicolon)
}
return &EnumValueNode{
compositeNode: compositeNode{
children: children,
},
Name: name,
Equals: equals,
Number: number,
Options: opts,
Semicolon: semicolon,
}
}
func (e *EnumValueNode) GetName() Node {
return e.Name
}
func (e *EnumValueNode) GetNumber() Node {
return e.Number
}
func (e *EnumValueNode) RangeOptions(fn func(*OptionNode) bool) {
for _, opt := range e.Options.Options {
if !fn(opt) {
return
}
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// FieldDeclNode is a node in the AST that defines a field. This includes
// normal message fields as well as extensions. There are multiple types
// of AST nodes that declare fields:
// - *FieldNode
// - *GroupNode
// - *MapFieldNode
// - *SyntheticMapField
//
// This also allows NoSourceNode and SyntheticMapField to be used in place of
// one of the above for some usages.
type FieldDeclNode interface {
NodeWithOptions
FieldLabel() Node
FieldName() Node
FieldType() Node
FieldTag() Node
FieldExtendee() Node
GetGroupKeyword() Node
GetOptions() *CompactOptionsNode
}
var _ FieldDeclNode = (*FieldNode)(nil)
var _ FieldDeclNode = (*GroupNode)(nil)
var _ FieldDeclNode = (*MapFieldNode)(nil)
var _ FieldDeclNode = (*SyntheticMapField)(nil)
var _ FieldDeclNode = (*NoSourceNode)(nil)
// FieldNode represents a normal field declaration (not groups or maps). It
// can represent extension fields as well as non-extension fields (both inside
// of messages and inside of one-ofs). Example:
//
// optional string foo = 1;
type FieldNode struct {
compositeNode
Label FieldLabel
FldType IdentValueNode
Name *IdentNode
Equals *RuneNode
Tag *UintLiteralNode
Options *CompactOptionsNode
Semicolon *RuneNode
// This is an up-link to the containing *ExtendNode for fields
// that are defined inside of "extend" blocks.
Extendee *ExtendNode
}
func (*FieldNode) msgElement() {}
func (*FieldNode) oneofElement() {}
func (*FieldNode) extendElement() {}
// NewFieldNode creates a new *FieldNode. The label and options arguments may be
// nil but the others must be non-nil.
// - label: The token corresponding to the label keyword if present ("optional",
// "required", or "repeated").
// - fieldType: The token corresponding to the field's type.
// - name: The token corresponding to the field's name.
// - equals: The token corresponding to the '=' rune after the name.
// - tag: The token corresponding to the field's tag number.
// - opts: Optional set of field options.
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewFieldNode(label *KeywordNode, fieldType IdentValueNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *FieldNode {
if fieldType == nil {
panic("fieldType is nil")
}
if name == nil {
panic("name is nil")
}
numChildren := 2
if equals != nil {
numChildren++
}
if tag != nil {
numChildren++
}
if semicolon != nil {
numChildren++
}
if label != nil {
numChildren++
}
if opts != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
if label != nil {
children = append(children, label)
}
children = append(children, fieldType, name)
if equals != nil {
children = append(children, equals)
}
if tag != nil {
children = append(children, tag)
}
if opts != nil {
children = append(children, opts)
}
if semicolon != nil {
children = append(children, semicolon)
}
return &FieldNode{
compositeNode: compositeNode{
children: children,
},
Label: newFieldLabel(label),
FldType: fieldType,
Name: name,
Equals: equals,
Tag: tag,
Options: opts,
Semicolon: semicolon,
}
}
func (n *FieldNode) FieldLabel() Node {
// proto3 fields and fields inside one-ofs will not have a label and we need
// this check in order to return a nil node -- otherwise we'd return a
// non-nil node that has a nil pointer value in it :/
if n.Label.KeywordNode == nil {
return nil
}
return n.Label.KeywordNode
}
func (n *FieldNode) FieldName() Node {
return n.Name
}
func (n *FieldNode) FieldType() Node {
return n.FldType
}
func (n *FieldNode) FieldTag() Node {
if n.Tag == nil {
return n
}
return n.Tag
}
func (n *FieldNode) FieldExtendee() Node {
if n.Extendee != nil {
return n.Extendee.Extendee
}
return nil
}
func (n *FieldNode) GetGroupKeyword() Node {
return nil
}
func (n *FieldNode) GetOptions() *CompactOptionsNode {
return n.Options
}
func (n *FieldNode) RangeOptions(fn func(*OptionNode) bool) {
for _, opt := range n.Options.Options {
if !fn(opt) {
return
}
}
}
// FieldLabel represents the label of a field, which indicates its cardinality
// (i.e. whether it is optional, required, or repeated).
type FieldLabel struct {
*KeywordNode
Repeated bool
Required bool
}
func newFieldLabel(lbl *KeywordNode) FieldLabel {
repeated, required := false, false
if lbl != nil {
repeated = lbl.Val == "repeated"
required = lbl.Val == "required"
}
return FieldLabel{
KeywordNode: lbl,
Repeated: repeated,
Required: required,
}
}
// IsPresent returns true if a label keyword was present in the declaration
// and false if it was absent.
func (f *FieldLabel) IsPresent() bool {
return f.KeywordNode != nil
}
// GroupNode represents a group declaration, which doubles as a field and inline
// message declaration. It can represent extension fields as well as
// non-extension fields (both inside of messages and inside of one-ofs).
// Example:
//
// optional group Key = 4 {
// optional uint64 id = 1;
// optional string name = 2;
// }
type GroupNode struct {
compositeNode
Label FieldLabel
Keyword *KeywordNode
Name *IdentNode
Equals *RuneNode
Tag *UintLiteralNode
Options *CompactOptionsNode
MessageBody
// This is an up-link to the containing *ExtendNode for groups
// that are defined inside of "extend" blocks.
Extendee *ExtendNode
}
func (*GroupNode) msgElement() {}
func (*GroupNode) oneofElement() {}
func (*GroupNode) extendElement() {}
// NewGroupNode creates a new *GroupNode. The label and options arguments may be
// nil but the others must be non-nil.
// - label: The token corresponding to the label keyword if present ("optional",
// "required", or "repeated").
// - keyword: The token corresponding to the "group" keyword.
// - name: The token corresponding to the field's name.
// - equals: The token corresponding to the '=' rune after the name.
// - tag: The token corresponding to the field's tag number.
// - opts: Optional set of field options.
// - openBrace: The token corresponding to the "{" rune that starts the body.
// - decls: All declarations inside the group body.
// - closeBrace: The token corresponding to the "}" rune that ends the body.
func NewGroupNode(label *KeywordNode, keyword *KeywordNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *GroupNode {
if keyword == nil {
panic("fieldType is nil")
}
if name == nil {
panic("name is nil")
}
if openBrace == nil {
panic("openBrace is nil")
}
if closeBrace == nil {
panic("closeBrace is nil")
}
numChildren := 4 + len(decls)
if label != nil {
numChildren++
}
if equals != nil {
numChildren++
}
if tag != nil {
numChildren++
}
if opts != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
if label != nil {
children = append(children, label)
}
children = append(children, keyword, name)
if equals != nil {
children = append(children, equals)
}
if tag != nil {
children = append(children, tag)
}
if opts != nil {
children = append(children, opts)
}
children = append(children, openBrace)
for _, decl := range decls {
children = append(children, decl)
}
children = append(children, closeBrace)
ret := &GroupNode{
compositeNode: compositeNode{
children: children,
},
Label: newFieldLabel(label),
Keyword: keyword,
Name: name,
Equals: equals,
Tag: tag,
Options: opts,
}
populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace)
return ret
}
func (n *GroupNode) FieldLabel() Node {
if n.Label.KeywordNode == nil {
// return nil interface to indicate absence, not a typed nil
return nil
}
return n.Label.KeywordNode
}
func (n *GroupNode) FieldName() Node {
return n.Name
}
func (n *GroupNode) FieldType() Node {
return n.Keyword
}
func (n *GroupNode) FieldTag() Node {
if n.Tag == nil {
return n
}
return n.Tag
}
func (n *GroupNode) FieldExtendee() Node {
if n.Extendee != nil {
return n.Extendee.Extendee
}
return nil
}
func (n *GroupNode) GetGroupKeyword() Node {
return n.Keyword
}
func (n *GroupNode) GetOptions() *CompactOptionsNode {
return n.Options
}
func (n *GroupNode) RangeOptions(fn func(*OptionNode) bool) {
for _, opt := range n.Options.Options {
if !fn(opt) {
return
}
}
}
func (n *GroupNode) AsMessage() *SyntheticGroupMessageNode {
return (*SyntheticGroupMessageNode)(n)
}
// SyntheticGroupMessageNode is a view of a GroupNode that implements MessageDeclNode.
// Since a group field implicitly defines a message type, this node represents
// that message type while the corresponding GroupNode represents the field.
//
// This type is considered synthetic since it never appears in a file's AST, but
// is only returned from other accessors (e.g. GroupNode.AsMessage).
type SyntheticGroupMessageNode GroupNode
func (n *SyntheticGroupMessageNode) MessageName() Node {
return n.Name
}
func (n *SyntheticGroupMessageNode) RangeOptions(fn func(*OptionNode) bool) {
for _, decl := range n.Decls {
if opt, ok := decl.(*OptionNode); ok {
if !fn(opt) {
return
}
}
}
}
// OneofDeclNode is a node in the AST that defines a oneof. There are
// multiple types of AST nodes that declare oneofs:
// - *OneofNode
// - *SyntheticOneof
//
// This also allows NoSourceNode to be used in place of one of the above
// for some usages.
type OneofDeclNode interface {
NodeWithOptions
OneofName() Node
}
var _ OneofDeclNode = (*OneofNode)(nil)
var _ OneofDeclNode = (*SyntheticOneof)(nil)
var _ OneofDeclNode = (*NoSourceNode)(nil)
// OneofNode represents a one-of declaration. Example:
//
// oneof query {
// string by_name = 2;
// Type by_type = 3;
// Address by_address = 4;
// Labels by_label = 5;
// }
type OneofNode struct {
compositeNode
Keyword *KeywordNode
Name *IdentNode
OpenBrace *RuneNode
Decls []OneofElement
CloseBrace *RuneNode
}
func (*OneofNode) msgElement() {}
// NewOneofNode creates a new *OneofNode. All arguments must be non-nil. While
// it is technically allowed for decls to be nil or empty, the resulting node
// will not be a valid oneof, which must have at least one field.
// - keyword: The token corresponding to the "oneof" keyword.
// - name: The token corresponding to the oneof's name.
// - openBrace: The token corresponding to the "{" rune that starts the body.
// - decls: All declarations inside the oneof body.
// - closeBrace: The token corresponding to the "}" rune that ends the body.
func NewOneofNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []OneofElement, closeBrace *RuneNode) *OneofNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
if openBrace == nil {
panic("openBrace is nil")
}
if closeBrace == nil {
panic("closeBrace is nil")
}
children := make([]Node, 0, 4+len(decls))
children = append(children, keyword, name, openBrace)
for _, decl := range decls {
children = append(children, decl)
}
children = append(children, closeBrace)
for _, decl := range decls {
switch decl := decl.(type) {
case *OptionNode, *FieldNode, *GroupNode, *EmptyDeclNode:
default:
panic(fmt.Sprintf("invalid OneofElement type: %T", decl))
}
}
return &OneofNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
OpenBrace: openBrace,
Decls: decls,
CloseBrace: closeBrace,
}
}
func (n *OneofNode) OneofName() Node {
return n.Name
}
func (n *OneofNode) RangeOptions(fn func(*OptionNode) bool) {
for _, decl := range n.Decls {
if opt, ok := decl.(*OptionNode); ok {
if !fn(opt) {
return
}
}
}
}
// OneofElement is an interface implemented by all AST nodes that can
// appear in the body of a oneof declaration.
type OneofElement interface {
Node
oneofElement()
}
var _ OneofElement = (*OptionNode)(nil)
var _ OneofElement = (*FieldNode)(nil)
var _ OneofElement = (*GroupNode)(nil)
var _ OneofElement = (*EmptyDeclNode)(nil)
// SyntheticOneof is not an actual node in the AST but a synthetic node
// that represents the oneof implied by a proto3 optional field.
//
// This type is considered synthetic since it never appears in a file's AST,
// but is only returned from other functions (e.g. NewSyntheticOneof).
type SyntheticOneof struct {
// The proto3 optional field that implies the presence of this oneof.
Field *FieldNode
}
var _ Node = (*SyntheticOneof)(nil)
// NewSyntheticOneof creates a new *SyntheticOneof that corresponds to the
// given proto3 optional field.
func NewSyntheticOneof(field *FieldNode) *SyntheticOneof {
return &SyntheticOneof{Field: field}
}
func (n *SyntheticOneof) Start() Token {
return n.Field.Start()
}
func (n *SyntheticOneof) End() Token {
return n.Field.End()
}
func (n *SyntheticOneof) LeadingComments() []Comment {
return nil
}
func (n *SyntheticOneof) TrailingComments() []Comment {
return nil
}
func (n *SyntheticOneof) OneofName() Node {
return n.Field.FieldName()
}
func (n *SyntheticOneof) RangeOptions(_ func(*OptionNode) bool) {
}
// MapTypeNode represents the type declaration for a map field. It defines
// both the key and value types for the map. Example:
//
// map<string, Values>
type MapTypeNode struct {
compositeNode
Keyword *KeywordNode
OpenAngle *RuneNode
KeyType *IdentNode
Comma *RuneNode
ValueType IdentValueNode
CloseAngle *RuneNode
}
// NewMapTypeNode creates a new *MapTypeNode. All arguments must be non-nil.
// - keyword: The token corresponding to the "map" keyword.
// - openAngle: The token corresponding to the "<" rune after the keyword.
// - keyType: The token corresponding to the key type for the map.
// - comma: The token corresponding to the "," rune between key and value types.
// - valType: The token corresponding to the value type for the map.
// - closeAngle: The token corresponding to the ">" rune that ends the declaration.
func NewMapTypeNode(keyword *KeywordNode, openAngle *RuneNode, keyType *IdentNode, comma *RuneNode, valType IdentValueNode, closeAngle *RuneNode) *MapTypeNode {
if keyword == nil {
panic("keyword is nil")
}
if openAngle == nil {
panic("openAngle is nil")
}
if keyType == nil {
panic("keyType is nil")
}
if comma == nil {
panic("comma is nil")
}
if valType == nil {
panic("valType is nil")
}
if closeAngle == nil {
panic("closeAngle is nil")
}
children := []Node{keyword, openAngle, keyType, comma, valType, closeAngle}
return &MapTypeNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
OpenAngle: openAngle,
KeyType: keyType,
Comma: comma,
ValueType: valType,
CloseAngle: closeAngle,
}
}
// MapFieldNode represents a map field declaration. Example:
//
// map<string,string> replacements = 3 [deprecated = true];
type MapFieldNode struct {
compositeNode
MapType *MapTypeNode
Name *IdentNode
Equals *RuneNode
Tag *UintLiteralNode
Options *CompactOptionsNode
Semicolon *RuneNode
}
func (*MapFieldNode) msgElement() {}
// NewMapFieldNode creates a new *MapFieldNode. All arguments must be non-nil
// except opts, which may be nil.
// - mapType: The token corresponding to the map type.
// - name: The token corresponding to the field's name.
// - equals: The token corresponding to the '=' rune after the name.
// - tag: The token corresponding to the field's tag number.
// - opts: Optional set of field options.
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewMapFieldNode(mapType *MapTypeNode, name *IdentNode, equals *RuneNode, tag *UintLiteralNode, opts *CompactOptionsNode, semicolon *RuneNode) *MapFieldNode {
if mapType == nil {
panic("mapType is nil")
}
if name == nil {
panic("name is nil")
}
numChildren := 2
if equals != nil {
numChildren++
}
if tag != nil {
numChildren++
}
if opts != nil {
numChildren++
}
if semicolon != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
children = append(children, mapType, name)
if equals != nil {
children = append(children, equals)
}
if tag != nil {
children = append(children, tag)
}
if opts != nil {
children = append(children, opts)
}
if semicolon != nil {
children = append(children, semicolon)
}
return &MapFieldNode{
compositeNode: compositeNode{
children: children,
},
MapType: mapType,
Name: name,
Equals: equals,
Tag: tag,
Options: opts,
Semicolon: semicolon,
}
}
func (n *MapFieldNode) FieldLabel() Node {
return nil
}
func (n *MapFieldNode) FieldName() Node {
return n.Name
}
func (n *MapFieldNode) FieldType() Node {
return n.MapType
}
func (n *MapFieldNode) FieldTag() Node {
if n.Tag == nil {
return n
}
return n.Tag
}
func (n *MapFieldNode) FieldExtendee() Node {
return nil
}
func (n *MapFieldNode) GetGroupKeyword() Node {
return nil
}
func (n *MapFieldNode) GetOptions() *CompactOptionsNode {
return n.Options
}
func (n *MapFieldNode) RangeOptions(fn func(*OptionNode) bool) {
for _, opt := range n.Options.Options {
if !fn(opt) {
return
}
}
}
func (n *MapFieldNode) AsMessage() *SyntheticMapEntryNode {
return (*SyntheticMapEntryNode)(n)
}
func (n *MapFieldNode) KeyField() *SyntheticMapField {
return NewSyntheticMapField(n.MapType.KeyType, 1)
}
func (n *MapFieldNode) ValueField() *SyntheticMapField {
return NewSyntheticMapField(n.MapType.ValueType, 2)
}
// SyntheticMapEntryNode is a view of a MapFieldNode that implements MessageDeclNode.
// Since a map field implicitly defines a message type for the map entry,
// this node represents that message type.
//
// This type is considered synthetic since it never appears in a file's AST, but
// is only returned from other accessors (e.g. MapFieldNode.AsMessage).
type SyntheticMapEntryNode MapFieldNode
func (n *SyntheticMapEntryNode) MessageName() Node {
return n.Name
}
func (n *SyntheticMapEntryNode) RangeOptions(_ func(*OptionNode) bool) {
}
// SyntheticMapField is not an actual node in the AST but a synthetic node
// that implements FieldDeclNode. These are used to represent the implicit
// field declarations of the "key" and "value" fields in a map entry.
//
// This type is considered synthetic since it never appears in a file's AST,
// but is only returned from other accessors and functions (e.g.
// MapFieldNode.KeyField, MapFieldNode.ValueField, and NewSyntheticMapField).
type SyntheticMapField struct {
Ident IdentValueNode
Tag *UintLiteralNode
}
// NewSyntheticMapField creates a new *SyntheticMapField for the given
// identifier (either a key or value type in a map declaration) and tag
// number (1 for key, 2 for value).
func NewSyntheticMapField(ident IdentValueNode, tagNum uint64) *SyntheticMapField {
tag := &UintLiteralNode{
terminalNode: ident.Start().asTerminalNode(),
Val: tagNum,
}
return &SyntheticMapField{Ident: ident, Tag: tag}
}
func (n *SyntheticMapField) Start() Token {
return n.Ident.Start()
}
func (n *SyntheticMapField) End() Token {
return n.Ident.End()
}
func (n *SyntheticMapField) LeadingComments() []Comment {
return nil
}
func (n *SyntheticMapField) TrailingComments() []Comment {
return nil
}
func (n *SyntheticMapField) FieldLabel() Node {
return n.Ident
}
func (n *SyntheticMapField) FieldName() Node {
return n.Ident
}
func (n *SyntheticMapField) FieldType() Node {
return n.Ident
}
func (n *SyntheticMapField) FieldTag() Node {
if n.Tag == nil {
return n
}
return n.Tag
}
func (n *SyntheticMapField) FieldExtendee() Node {
return nil
}
func (n *SyntheticMapField) GetGroupKeyword() Node {
return nil
}
func (n *SyntheticMapField) GetOptions() *CompactOptionsNode {
return nil
}
func (n *SyntheticMapField) RangeOptions(_ func(*OptionNode) bool) {
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// FileDeclNode is a placeholder interface for AST nodes that represent files.
// This allows NoSourceNode to be used in place of *FileNode for some usages.
type FileDeclNode interface {
NodeWithOptions
Name() string
NodeInfo(n Node) NodeInfo
}
var _ FileDeclNode = (*FileNode)(nil)
var _ FileDeclNode = (*NoSourceNode)(nil)
// FileNode is the root of the AST hierarchy. It represents an entire
// protobuf source file.
type FileNode struct {
compositeNode
fileInfo *FileInfo
// A file has either a Syntax or Edition node, never both.
// If both are nil, neither declaration is present and the
// file is assumed to use "proto2" syntax.
Syntax *SyntaxNode
Edition *EditionNode
Decls []FileElement
// This synthetic node allows access to final comments and whitespace
EOF *RuneNode
}
// NewFileNode creates a new *FileNode. The syntax parameter is optional. If it
// is absent, it means the file had no syntax declaration.
//
// This function panics if the concrete type of any element of decls is not
// from this package.
func NewFileNode(info *FileInfo, syntax *SyntaxNode, decls []FileElement, eof Token) *FileNode {
return newFileNode(info, syntax, nil, decls, eof)
}
// NewFileNodeWithEdition creates a new *FileNode. The edition parameter is required. If a file
// has no edition declaration, use NewFileNode instead.
//
// This function panics if the concrete type of any element of decls is not
// from this package.
func NewFileNodeWithEdition(info *FileInfo, edition *EditionNode, decls []FileElement, eof Token) *FileNode {
if edition == nil {
panic("edition is nil")
}
return newFileNode(info, nil, edition, decls, eof)
}
func newFileNode(info *FileInfo, syntax *SyntaxNode, edition *EditionNode, decls []FileElement, eof Token) *FileNode {
numChildren := len(decls) + 1
if syntax != nil || edition != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
if syntax != nil {
children = append(children, syntax)
} else if edition != nil {
children = append(children, edition)
}
for _, decl := range decls {
switch decl := decl.(type) {
case *PackageNode, *ImportNode, *OptionNode, *MessageNode,
*EnumNode, *ExtendNode, *ServiceNode, *EmptyDeclNode:
default:
panic(fmt.Sprintf("invalid FileElement type: %T", decl))
}
children = append(children, decl)
}
eofNode := NewRuneNode(0, eof)
children = append(children, eofNode)
return &FileNode{
compositeNode: compositeNode{
children: children,
},
fileInfo: info,
Syntax: syntax,
Edition: edition,
Decls: decls,
EOF: eofNode,
}
}
// NewEmptyFileNode returns an empty AST for a file with the given name.
func NewEmptyFileNode(filename string) *FileNode {
fileInfo := NewFileInfo(filename, []byte{})
return NewFileNode(fileInfo, nil, nil, fileInfo.AddToken(0, 0))
}
func (f *FileNode) Name() string {
return f.fileInfo.Name()
}
func (f *FileNode) NodeInfo(n Node) NodeInfo {
return f.fileInfo.NodeInfo(n)
}
func (f *FileNode) TokenInfo(t Token) NodeInfo {
return f.fileInfo.TokenInfo(t)
}
func (f *FileNode) ItemInfo(i Item) ItemInfo {
return f.fileInfo.ItemInfo(i)
}
func (f *FileNode) GetItem(i Item) (Token, Comment) {
return f.fileInfo.GetItem(i)
}
func (f *FileNode) Items() Sequence[Item] {
return f.fileInfo.Items()
}
func (f *FileNode) Tokens() Sequence[Token] {
return f.fileInfo.Tokens()
}
func (f *FileNode) RangeOptions(fn func(*OptionNode) bool) {
for _, decl := range f.Decls {
if opt, ok := decl.(*OptionNode); ok {
if !fn(opt) {
return
}
}
}
}
// FileElement is an interface implemented by all AST nodes that are
// allowed as top-level declarations in the file.
type FileElement interface {
Node
fileElement()
}
var _ FileElement = (*ImportNode)(nil)
var _ FileElement = (*PackageNode)(nil)
var _ FileElement = (*OptionNode)(nil)
var _ FileElement = (*MessageNode)(nil)
var _ FileElement = (*EnumNode)(nil)
var _ FileElement = (*ExtendNode)(nil)
var _ FileElement = (*ServiceNode)(nil)
var _ FileElement = (*EmptyDeclNode)(nil)
// SyntaxNode represents a syntax declaration, which if present must be
// the first non-comment content. Example:
//
// syntax = "proto2";
//
// Files that don't have a syntax node are assumed to use proto2 syntax.
type SyntaxNode struct {
compositeNode
Keyword *KeywordNode
Equals *RuneNode
Syntax StringValueNode
Semicolon *RuneNode
}
// NewSyntaxNode creates a new *SyntaxNode. All four arguments must be non-nil:
// - keyword: The token corresponding to the "syntax" keyword.
// - equals: The token corresponding to the "=" rune.
// - syntax: The actual syntax value, e.g. "proto2" or "proto3".
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewSyntaxNode(keyword *KeywordNode, equals *RuneNode, syntax StringValueNode, semicolon *RuneNode) *SyntaxNode {
if keyword == nil {
panic("keyword is nil")
}
if equals == nil {
panic("equals is nil")
}
if syntax == nil {
panic("syntax is nil")
}
var children []Node
if semicolon == nil {
children = []Node{keyword, equals, syntax}
} else {
children = []Node{keyword, equals, syntax, semicolon}
}
return &SyntaxNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Equals: equals,
Syntax: syntax,
Semicolon: semicolon,
}
}
// EditionNode represents an edition declaration, which if present must be
// the first non-comment content. Example:
//
// edition = "2023";
//
// Files may include either an edition node or a syntax node, but not both.
// If neither are present, the file is assumed to use proto2 syntax.
type EditionNode struct {
compositeNode
Keyword *KeywordNode
Equals *RuneNode
Edition StringValueNode
Semicolon *RuneNode
}
// NewEditionNode creates a new *EditionNode. All four arguments must be non-nil:
// - keyword: The token corresponding to the "edition" keyword.
// - equals: The token corresponding to the "=" rune.
// - edition: The actual edition value, e.g. "2023".
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewEditionNode(keyword *KeywordNode, equals *RuneNode, edition StringValueNode, semicolon *RuneNode) *EditionNode {
if keyword == nil {
panic("keyword is nil")
}
if equals == nil {
panic("equals is nil")
}
if edition == nil {
panic("edition is nil")
}
if semicolon == nil {
panic("semicolon is nil")
}
children := []Node{keyword, equals, edition, semicolon}
return &EditionNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Equals: equals,
Edition: edition,
Semicolon: semicolon,
}
}
// ImportNode represents an import statement. Example:
//
// import "google/protobuf/empty.proto";
type ImportNode struct {
compositeNode
Keyword *KeywordNode
// Optional; if present indicates this is a public import
Public *KeywordNode
// Optional; if present indicates this is a weak import
Weak *KeywordNode
Name StringValueNode
Semicolon *RuneNode
}
// NewImportNode creates a new *ImportNode. The public and weak arguments are optional
// and only one or the other (or neither) may be specified, not both. When public is
// non-nil, it indicates the "public" keyword in the import statement and means this is
// a public import. When weak is non-nil, it indicates the "weak" keyword in the import
// statement and means this is a weak import. When both are nil, this is a normal import.
// The other arguments must be non-nil:
// - keyword: The token corresponding to the "import" keyword.
// - public: The token corresponding to the optional "public" keyword.
// - weak: The token corresponding to the optional "weak" keyword.
// - name: The actual imported file name.
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewImportNode(keyword *KeywordNode, public *KeywordNode, weak *KeywordNode, name StringValueNode, semicolon *RuneNode) *ImportNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
numChildren := 2
if semicolon == nil {
numChildren++
}
if public != nil || weak != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
children = append(children, keyword)
if public != nil {
children = append(children, public)
} else if weak != nil {
children = append(children, weak)
}
children = append(children, name)
if semicolon != nil {
children = append(children, semicolon)
}
return &ImportNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Public: public,
Weak: weak,
Name: name,
Semicolon: semicolon,
}
}
func (*ImportNode) fileElement() {}
// PackageNode represents a package declaration. Example:
//
// package foobar.com;
type PackageNode struct {
compositeNode
Keyword *KeywordNode
Name IdentValueNode
Semicolon *RuneNode
}
func (*PackageNode) fileElement() {}
// NewPackageNode creates a new *PackageNode. All three arguments must be non-nil:
// - keyword: The token corresponding to the "package" keyword.
// - name: The package name declared for the file.
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewPackageNode(keyword *KeywordNode, name IdentValueNode, semicolon *RuneNode) *PackageNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
var children []Node
if semicolon == nil {
children = []Node{keyword, name}
} else {
children = []Node{keyword, name, semicolon}
}
return &PackageNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
Semicolon: semicolon,
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"fmt"
"sort"
"unicode/utf8"
)
// FileInfo contains information about the contents of a source file, including
// details about comments and items. A lexer accumulates these details as it
// scans the file contents. This allows efficient representation of things like
// source positions.
type FileInfo struct {
// The name of the source file.
name string
// The raw contents of the source file.
data []byte
// The offsets for each line in the file. The value is the zero-based byte
// offset for a given line. The line is given by its index. So the value at
// index 0 is the offset for the first line (which is always zero). The
// value at index 1 is the offset at which the second line begins. Etc.
lines []int
// The info for every comment in the file. This is empty if the file has no
// comments. The first entry corresponds to the first comment in the file,
// and so on.
comments []commentInfo
// The info for every lexed item in the file. The last item in the slice
// corresponds to the EOF, so every file (even an empty one) should have at
// least one entry. This includes all terminal symbols (tokens) in the AST
// as well as all comments.
items []itemSpan
}
type commentInfo struct {
// the index of the item, in the file's items slice, that represents this
// comment
index int
// the index of the token to which this comment is attributed.
attributedToIndex int
}
type itemSpan struct {
// the offset into the file of the first character of an item.
offset int
// the length of the item
length int
}
// NewFileInfo creates a new instance for the given file.
func NewFileInfo(filename string, contents []byte) *FileInfo {
return &FileInfo{
name: filename,
data: contents,
lines: []int{0},
}
}
func (f *FileInfo) Name() string {
return f.name
}
// AddLine adds the offset representing the beginning of the "next" line in the file.
// The first line always starts at offset 0, the second line starts at offset-of-newline-char+1.
func (f *FileInfo) AddLine(offset int) {
if offset < 0 {
panic(fmt.Sprintf("invalid offset: %d must not be negative", offset))
}
if offset > len(f.data) {
panic(fmt.Sprintf("invalid offset: %d is greater than file size %d", offset, len(f.data)))
}
if len(f.lines) > 0 {
lastOffset := f.lines[len(f.lines)-1]
if offset <= lastOffset {
panic(fmt.Sprintf("invalid offset: %d is not greater than previously observed line offset %d", offset, lastOffset))
}
}
f.lines = append(f.lines, offset)
}
// AddToken adds info about a token at the given location to this file. It
// returns a value that allows access to all of the token's details.
func (f *FileInfo) AddToken(offset, length int) Token {
if offset < 0 {
panic(fmt.Sprintf("invalid offset: %d must not be negative", offset))
}
if length < 0 {
panic(fmt.Sprintf("invalid length: %d must not be negative", length))
}
if offset+length > len(f.data) {
panic(fmt.Sprintf("invalid offset+length: %d is greater than file size %d", offset+length, len(f.data)))
}
tokenID := len(f.items)
if len(f.items) > 0 {
lastToken := f.items[tokenID-1]
lastEnd := lastToken.offset + lastToken.length - 1
if offset <= lastEnd {
panic(fmt.Sprintf("invalid offset: %d is not greater than previously observed token end %d", offset, lastEnd))
}
}
f.items = append(f.items, itemSpan{offset: offset, length: length})
return Token(tokenID)
}
// AddComment adds info about a comment to this file. Comments must first be
// added as items via f.AddToken(). The given comment argument is the Token
// from that step. The given attributedTo argument indicates another token in the
// file with which the comment is associated. If comment's offset is before that
// of attributedTo, then this is a leading comment. Otherwise, it is a trailing
// comment.
func (f *FileInfo) AddComment(comment, attributedTo Token) Comment {
if len(f.comments) > 0 {
lastComment := f.comments[len(f.comments)-1]
if int(comment) <= lastComment.index {
panic(fmt.Sprintf("invalid index: %d is not greater than previously observed comment index %d", comment, lastComment.index))
}
if int(attributedTo) < lastComment.attributedToIndex {
panic(fmt.Sprintf("invalid attribution: %d is not greater than previously observed comment attribution index %d", attributedTo, lastComment.attributedToIndex))
}
}
f.comments = append(f.comments, commentInfo{index: int(comment), attributedToIndex: int(attributedTo)})
return Comment{
fileInfo: f,
index: len(f.comments) - 1,
}
}
// NodeInfo returns details from the original source for the given AST node.
//
// If the given n is out of range, this returns an invalid NodeInfo (i.e.
// nodeInfo.IsValid() returns false). If the given n is not out of range but
// also from a different file than f, then the result is undefined.
func (f *FileInfo) NodeInfo(n Node) NodeInfo {
return f.nodeInfo(int(n.Start()), int(n.End()))
}
// TokenInfo returns details from the original source for the given token.
//
// If the given t is out of range, this returns an invalid NodeInfo (i.e.
// nodeInfo.IsValid() returns false). If the given t is not out of range but
// also from a different file than f, then the result is undefined.
func (f *FileInfo) TokenInfo(t Token) NodeInfo {
return f.nodeInfo(int(t), int(t))
}
func (f *FileInfo) nodeInfo(start, end int) NodeInfo {
if start < 0 || start >= len(f.items) {
return NodeInfo{fileInfo: f}
}
if end < 0 || end >= len(f.items) {
return NodeInfo{fileInfo: f}
}
return NodeInfo{fileInfo: f, startIndex: start, endIndex: end}
}
// ItemInfo returns details from the original source for the given item.
//
// If the given i is out of range, this returns nil. If the given i is not
// out of range but also from a different file than f, then the result is
// undefined.
func (f *FileInfo) ItemInfo(i Item) ItemInfo {
tok, cmt := f.GetItem(i)
if tok != TokenError {
return f.TokenInfo(tok)
}
if cmt.IsValid() {
return cmt
}
return nil
}
// GetItem returns the token or comment represented by the given item. Only one
// of the return values will be valid. If the item is a token then the returned
// comment will be a zero value and thus invalid (i.e. comment.IsValid() returns
// false). If the item is a comment then the returned token will be TokenError.
//
// If the given i is out of range, this returns (TokenError, Comment{}). If the
// given i is not out of range but also from a different file than f, then
// the result is undefined.
func (f *FileInfo) GetItem(i Item) (Token, Comment) {
if i < 0 || int(i) >= len(f.items) {
return TokenError, Comment{}
}
if !f.isComment(i) {
return Token(i), Comment{}
}
// It's a comment, so find its location in f.comments
c := sort.Search(len(f.comments), func(c int) bool {
return f.comments[c].index >= int(i)
})
if c < len(f.comments) && f.comments[c].index == int(i) {
return TokenError, Comment{fileInfo: f, index: c}
}
// f.isComment(i) returned true, but we couldn't find it
// in f.comments? Uh oh... that shouldn't be possible.
return TokenError, Comment{}
}
func (f *FileInfo) isDummyFile() bool {
return f == nil || f.lines == nil
}
// Sequence represents a navigable sequence of elements.
type Sequence[T any] interface {
// First returns the first element in the sequence. The bool return
// is false if this sequence contains no elements. For example, an
// empty file has no items or tokens.
First() (T, bool)
// Next returns the next element in the sequence that comes after
// the given element. The bool returns is false if there is no next
// item (i.e. the given element is the last one). It also returns
// false if the given element is invalid.
Next(T) (T, bool)
// Last returns the last element in the sequence. The bool return
// is false if this sequence contains no elements. For example, an
// empty file has no items or tokens.
Last() (T, bool)
// Previous returns the previous element in the sequence that comes
// before the given element. The bool returns is false if there is no
// previous item (i.e. the given element is the first one). It also
// returns false if the given element is invalid.
Previous(T) (T, bool)
}
func (f *FileInfo) Items() Sequence[Item] {
return items{fileInfo: f}
}
func (f *FileInfo) Tokens() Sequence[Token] {
return tokens{fileInfo: f}
}
type items struct {
fileInfo *FileInfo
}
func (i items) First() (Item, bool) {
if len(i.fileInfo.items) == 0 {
return 0, false
}
return 0, true
}
func (i items) Next(item Item) (Item, bool) {
if item < 0 || int(item) >= len(i.fileInfo.items)-1 {
return 0, false
}
return i.fileInfo.itemForward(item+1, true)
}
func (i items) Last() (Item, bool) {
if len(i.fileInfo.items) == 0 {
return 0, false
}
return Item(len(i.fileInfo.items) - 1), true
}
func (i items) Previous(item Item) (Item, bool) {
if item <= 0 || int(item) >= len(i.fileInfo.items) {
return 0, false
}
return i.fileInfo.itemBackward(item-1, true)
}
type tokens struct {
fileInfo *FileInfo
}
func (t tokens) First() (Token, bool) {
i, ok := t.fileInfo.itemForward(0, false)
return Token(i), ok
}
func (t tokens) Next(tok Token) (Token, bool) {
if tok < 0 || int(tok) >= len(t.fileInfo.items)-1 {
return 0, false
}
i, ok := t.fileInfo.itemForward(Item(tok+1), false)
return Token(i), ok
}
func (t tokens) Last() (Token, bool) {
i, ok := t.fileInfo.itemBackward(Item(len(t.fileInfo.items))-1, false)
return Token(i), ok
}
func (t tokens) Previous(tok Token) (Token, bool) {
if tok <= 0 || int(tok) >= len(t.fileInfo.items) {
return 0, false
}
i, ok := t.fileInfo.itemBackward(Item(tok-1), false)
return Token(i), ok
}
func (f *FileInfo) itemForward(i Item, allowComment bool) (Item, bool) {
end := Item(len(f.items))
for i < end {
if allowComment || !f.isComment(i) {
return i, true
}
i++
}
return 0, false
}
func (f *FileInfo) itemBackward(i Item, allowComment bool) (Item, bool) {
for i >= 0 {
if allowComment || !f.isComment(i) {
return i, true
}
i--
}
return 0, false
}
// isComment is comment returns true if i refers to a comment.
// (If it returns false, i refers to a token.)
func (f *FileInfo) isComment(i Item) bool {
item := f.items[i]
if item.length < 2 {
return false
}
// see if item text starts with "//" or "/*"
if f.data[item.offset] != '/' {
return false
}
c := f.data[item.offset+1]
return c == '/' || c == '*'
}
func (f *FileInfo) SourcePos(offset int) SourcePos {
lineNumber := sort.Search(len(f.lines), func(n int) bool {
return f.lines[n] > offset
})
// If it weren't for tabs and multibyte unicode characters, we
// could trivially compute the column just based on offset and the
// starting offset of lineNumber :(
// Wish this were more efficient... that would require also storing
// computed line+column information, which would triple the size of
// f's items slice...
col := 0
for i := f.lines[lineNumber-1]; i < offset; i++ {
if f.data[i] == '\t' {
nextTabStop := 8 - (col % 8)
col += nextTabStop
} else if utf8.RuneStart(f.data[i]) {
col++
}
}
return SourcePos{
Filename: f.name,
Offset: offset,
Line: lineNumber,
// Columns are 1-indexed in this AST
Col: col + 1,
}
}
// Token represents a single lexed token.
type Token int
// TokenError indicates an invalid token. It is returned from query
// functions when no valid token satisfies the request.
const TokenError = Token(-1)
// AsItem returns the Item that corresponds to t.
func (t Token) AsItem() Item {
return Item(t)
}
func (t Token) asTerminalNode() terminalNode {
return terminalNode(t)
}
// Item represents an item lexed from source. It represents either
// a Token or a Comment.
type Item int
// ItemInfo provides details about an item's location in the source file and
// its contents.
type ItemInfo interface {
SourceSpan
LeadingWhitespace() string
RawText() string
}
// NodeInfo represents the details for a node or token in the source file's AST.
// It provides access to information about the node's location in the source
// file. It also provides access to the original text in the source file (with
// all the original formatting intact) and also provides access to surrounding
// comments.
type NodeInfo struct {
fileInfo *FileInfo
startIndex, endIndex int
}
var _ ItemInfo = NodeInfo{}
// IsValid returns true if this node info is valid. If n is a zero-value struct,
// it is not valid.
func (n NodeInfo) IsValid() bool {
return n.fileInfo != nil
}
// Start returns the starting position of the element. This is the first
// character of the node or token.
func (n NodeInfo) Start() SourcePos {
if n.fileInfo.isDummyFile() {
return UnknownPos(n.fileInfo.name)
}
tok := n.fileInfo.items[n.startIndex]
return n.fileInfo.SourcePos(tok.offset)
}
// End returns the ending position of the element, exclusive. This is the
// location after the last character of the node or token. If n returns
// the same position for Start() and End(), the element in source had a
// length of zero (which should only happen for the special EOF token
// that designates the end of the file).
func (n NodeInfo) End() SourcePos {
if n.fileInfo.isDummyFile() {
return UnknownPos(n.fileInfo.name)
}
tok := n.fileInfo.items[n.endIndex]
// find offset of last character in the span
offset := tok.offset
if tok.length > 0 {
offset += tok.length - 1
}
pos := n.fileInfo.SourcePos(offset)
if tok.length > 0 {
// We return "open range", so end is the position *after* the
// last character in the span. So we adjust
pos.Col++
}
return pos
}
// LeadingWhitespace returns any whitespace prior to the element. If there
// were comments in between this element and the previous one, this will
// return the whitespace between the last such comment in the element. If
// there were no such comments, this returns the whitespace between the
// previous element and the current one.
func (n NodeInfo) LeadingWhitespace() string {
if n.fileInfo.isDummyFile() {
return ""
}
tok := n.fileInfo.items[n.startIndex]
var prevEnd int
if n.startIndex > 0 {
prevTok := n.fileInfo.items[n.startIndex-1]
prevEnd = prevTok.offset + prevTok.length
}
return string(n.fileInfo.data[prevEnd:tok.offset])
}
// LeadingComments returns all comments in the source that exist between the
// element and the previous element, except for any trailing comment on the
// previous element.
func (n NodeInfo) LeadingComments() Comments {
if n.fileInfo.isDummyFile() {
return EmptyComments
}
start := sort.Search(len(n.fileInfo.comments), func(i int) bool {
return n.fileInfo.comments[i].attributedToIndex >= n.startIndex
})
if start == len(n.fileInfo.comments) || n.fileInfo.comments[start].attributedToIndex != n.startIndex {
// no comments associated with this token
return EmptyComments
}
numComments := 0
for i := start; i < len(n.fileInfo.comments); i++ {
comment := n.fileInfo.comments[i]
if comment.attributedToIndex == n.startIndex &&
comment.index < n.startIndex {
numComments++
} else {
break
}
}
return Comments{
fileInfo: n.fileInfo,
first: start,
num: numComments,
}
}
// TrailingComments returns the trailing comment for the element, if any.
// An element will have a trailing comment only if it is the last token
// on a line and is followed by a comment on the same line. Typically, the
// following comment is a line-style comment (starting with "//").
//
// If the following comment is a block-style comment that spans multiple
// lines, and the next token is on the same line as the end of the comment,
// the comment is NOT considered a trailing comment.
//
// Examples:
//
// foo // this is a trailing comment for foo
//
// bar /* this is a trailing comment for bar */
//
// baz /* this is a trailing
// comment for baz */
//
// fizz /* this is NOT a trailing
// comment for fizz because
// its on the same line as the
// following token buzz */ buzz
func (n NodeInfo) TrailingComments() Comments {
if n.fileInfo.isDummyFile() {
return EmptyComments
}
start := sort.Search(len(n.fileInfo.comments), func(i int) bool {
comment := n.fileInfo.comments[i]
return comment.attributedToIndex >= n.endIndex &&
comment.index > n.endIndex
})
if start == len(n.fileInfo.comments) || n.fileInfo.comments[start].attributedToIndex != n.endIndex {
// no comments associated with this token
return EmptyComments
}
numComments := 0
for i := start; i < len(n.fileInfo.comments); i++ {
comment := n.fileInfo.comments[i]
if comment.attributedToIndex == n.endIndex {
numComments++
} else {
break
}
}
return Comments{
fileInfo: n.fileInfo,
first: start,
num: numComments,
}
}
// RawText returns the actual text in the source file that corresponds to the
// element. If the element is a node in the AST that encompasses multiple
// items (like an entire declaration), the full text of all items is returned
// including any interior whitespace and comments.
func (n NodeInfo) RawText() string {
startTok := n.fileInfo.items[n.startIndex]
endTok := n.fileInfo.items[n.endIndex]
return string(n.fileInfo.data[startTok.offset : endTok.offset+endTok.length])
}
// SourcePos identifies a location in a proto source file.
type SourcePos struct {
Filename string
// The line and column numbers for this position. These are
// one-based, so the first line and column is 1 (not zero). If
// either is zero, then the line and column are unknown and
// only the file name is known.
Line, Col int
// The offset, in bytes, from the beginning of the file. This
// is zero-based: the first character in the file is offset zero.
Offset int
}
func (pos SourcePos) String() string {
if pos.Line <= 0 || pos.Col <= 0 {
return pos.Filename
}
return fmt.Sprintf("%s:%d:%d", pos.Filename, pos.Line, pos.Col)
}
// SourceSpan represents a range of source positions.
type SourceSpan interface {
Start() SourcePos
End() SourcePos
}
// NewSourceSpan creates a new span that covers the given range.
func NewSourceSpan(start SourcePos, end SourcePos) SourceSpan {
return sourceSpan{StartPos: start, EndPos: end}
}
type sourceSpan struct {
StartPos SourcePos
EndPos SourcePos
}
func (p sourceSpan) Start() SourcePos {
return p.StartPos
}
func (p sourceSpan) End() SourcePos {
return p.EndPos
}
var _ SourceSpan = sourceSpan{}
// Comments represents a range of sequential comments in a source file
// (e.g. no interleaving items or AST nodes).
type Comments struct {
fileInfo *FileInfo
first, num int
}
// EmptyComments is an empty set of comments.
var EmptyComments = Comments{}
// Len returns the number of comments in c.
func (c Comments) Len() int {
return c.num
}
func (c Comments) Index(i int) Comment {
if i < 0 || i >= c.num {
panic(fmt.Sprintf("index %d out of range (len = %d)", i, c.num))
}
return Comment{
fileInfo: c.fileInfo,
index: c.first + i,
}
}
// Comment represents a single comment in a source file. It indicates
// the position of the comment and its contents. A single comment means
// one line-style comment ("//" to end of line) or one block comment
// ("/*" through "*/"). If a longer comment uses multiple line comments,
// each line is considered to be a separate comment. For example:
//
// // This is a single comment, and
// // this is a separate comment.
type Comment struct {
fileInfo *FileInfo
index int
}
var _ ItemInfo = Comment{}
// IsValid returns true if this comment is valid. If this comment is
// a zero-value struct, it is not valid.
func (c Comment) IsValid() bool {
return c.fileInfo != nil && c.index >= 0
}
// AsItem returns the Item that corresponds to c.
func (c Comment) AsItem() Item {
return Item(c.fileInfo.comments[c.index].index)
}
func (c Comment) Start() SourcePos {
span := c.fileInfo.items[c.AsItem()]
return c.fileInfo.SourcePos(span.offset)
}
func (c Comment) End() SourcePos {
span := c.fileInfo.items[c.AsItem()]
return c.fileInfo.SourcePos(span.offset + span.length - 1)
}
func (c Comment) LeadingWhitespace() string {
item := c.AsItem()
span := c.fileInfo.items[item]
var prevEnd int
if item > 0 {
prevItem := c.fileInfo.items[item-1]
prevEnd = prevItem.offset + prevItem.length
}
return string(c.fileInfo.data[prevEnd:span.offset])
}
func (c Comment) RawText() string {
span := c.fileInfo.items[c.AsItem()]
return string(c.fileInfo.data[span.offset : span.offset+span.length])
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"fmt"
"strings"
)
// Identifier is a possibly-qualified name. This is used to distinguish
// ValueNode values that are references/identifiers vs. those that are
// string literals.
type Identifier string
// IdentValueNode is an AST node that represents an identifier.
type IdentValueNode interface {
ValueNode
AsIdentifier() Identifier
}
var _ IdentValueNode = (*IdentNode)(nil)
var _ IdentValueNode = (*CompoundIdentNode)(nil)
// IdentNode represents a simple, unqualified identifier. These are used to name
// elements declared in a protobuf file or to refer to elements. Example:
//
// foobar
type IdentNode struct {
terminalNode
Val string
}
// NewIdentNode creates a new *IdentNode. The given val is the identifier text.
func NewIdentNode(val string, tok Token) *IdentNode {
return &IdentNode{
terminalNode: tok.asTerminalNode(),
Val: val,
}
}
func (n *IdentNode) Value() any {
return n.AsIdentifier()
}
func (n *IdentNode) AsIdentifier() Identifier {
return Identifier(n.Val)
}
// ToKeyword is used to convert identifiers to keywords. Since keywords are not
// reserved in the protobuf language, they are initially lexed as identifiers
// and then converted to keywords based on context.
func (n *IdentNode) ToKeyword() *KeywordNode {
return (*KeywordNode)(n)
}
// CompoundIdentNode represents a qualified identifier. A qualified identifier
// has at least one dot and possibly multiple identifier names (all separated by
// dots). If the identifier has a leading dot, then it is a *fully* qualified
// identifier. Example:
//
// .com.foobar.Baz
type CompoundIdentNode struct {
compositeNode
// Optional leading dot, indicating that the identifier is fully qualified.
LeadingDot *RuneNode
Components []*IdentNode
// Dots[0] is the dot after Components[0]. The length of Dots is always
// one less than the length of Components.
Dots []*RuneNode
// The text value of the identifier, with all components and dots
// concatenated.
Val string
}
// NewCompoundIdentNode creates a *CompoundIdentNode. The leadingDot may be nil.
// The dots arg must have a length that is one less than the length of
// components. The components arg must not be empty.
func NewCompoundIdentNode(leadingDot *RuneNode, components []*IdentNode, dots []*RuneNode) *CompoundIdentNode {
if len(components) == 0 {
panic("must have at least one component")
}
if len(dots) != len(components)-1 && len(dots) != len(components) {
panic(fmt.Sprintf("%d components requires %d dots, not %d", len(components), len(components)-1, len(dots)))
}
numChildren := len(components) + len(dots)
if leadingDot != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
var b strings.Builder
if leadingDot != nil {
children = append(children, leadingDot)
b.WriteRune(leadingDot.Rune)
}
for i, comp := range components {
if i > 0 {
dot := dots[i-1]
children = append(children, dot)
b.WriteRune(dot.Rune)
}
children = append(children, comp)
b.WriteString(comp.Val)
}
if len(dots) == len(components) {
dot := dots[len(dots)-1]
children = append(children, dot)
b.WriteRune(dot.Rune)
}
return &CompoundIdentNode{
compositeNode: compositeNode{
children: children,
},
LeadingDot: leadingDot,
Components: components,
Dots: dots,
Val: b.String(),
}
}
func (n *CompoundIdentNode) Value() any {
return n.AsIdentifier()
}
func (n *CompoundIdentNode) AsIdentifier() Identifier {
return Identifier(n.Val)
}
// KeywordNode is an AST node that represents a keyword. Keywords are
// like identifiers, but they have special meaning in particular contexts.
// Example:
//
// message
type KeywordNode IdentNode
// NewKeywordNode creates a new *KeywordNode. The given val is the keyword.
func NewKeywordNode(val string, tok Token) *KeywordNode {
return &KeywordNode{
terminalNode: tok.asTerminalNode(),
Val: val,
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// MessageDeclNode is a node in the AST that defines a message type. This
// includes normal message fields as well as implicit messages:
// - *MessageNode
// - *SyntheticGroupMessageNode (the group is a field and inline message type)
// - *SyntheticMapEntryNode (map fields implicitly define a MapEntry message type)
//
// This also allows NoSourceNode to be used in place of one of the above
// for some usages.
type MessageDeclNode interface {
NodeWithOptions
MessageName() Node
}
var _ MessageDeclNode = (*MessageNode)(nil)
var _ MessageDeclNode = (*SyntheticGroupMessageNode)(nil)
var _ MessageDeclNode = (*SyntheticMapEntryNode)(nil)
var _ MessageDeclNode = (*NoSourceNode)(nil)
// MessageNode represents a message declaration. Example:
//
// message Foo {
// string name = 1;
// repeated string labels = 2;
// bytes extra = 3;
// }
type MessageNode struct {
compositeNode
Keyword *KeywordNode
Name *IdentNode
MessageBody
}
func (*MessageNode) fileElement() {}
func (*MessageNode) msgElement() {}
// NewMessageNode creates a new *MessageNode. All arguments must be non-nil.
// - keyword: The token corresponding to the "message" keyword.
// - name: The token corresponding to the field's name.
// - openBrace: The token corresponding to the "{" rune that starts the body.
// - decls: All declarations inside the message body.
// - closeBrace: The token corresponding to the "}" rune that ends the body.
func NewMessageNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) *MessageNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
if openBrace == nil {
panic("openBrace is nil")
}
if closeBrace == nil {
panic("closeBrace is nil")
}
children := make([]Node, 0, 4+len(decls))
children = append(children, keyword, name, openBrace)
for _, decl := range decls {
children = append(children, decl)
}
children = append(children, closeBrace)
ret := &MessageNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
}
populateMessageBody(&ret.MessageBody, openBrace, decls, closeBrace)
return ret
}
func (n *MessageNode) MessageName() Node {
return n.Name
}
func (n *MessageNode) RangeOptions(fn func(*OptionNode) bool) {
for _, decl := range n.Decls {
if opt, ok := decl.(*OptionNode); ok {
if !fn(opt) {
return
}
}
}
}
// MessageBody represents the body of a message. It is used by both
// MessageNodes and GroupNodes.
type MessageBody struct {
OpenBrace *RuneNode
Decls []MessageElement
CloseBrace *RuneNode
}
func populateMessageBody(m *MessageBody, openBrace *RuneNode, decls []MessageElement, closeBrace *RuneNode) {
m.OpenBrace = openBrace
m.Decls = decls
for _, decl := range decls {
switch decl.(type) {
case *OptionNode, *FieldNode, *MapFieldNode, *GroupNode, *OneofNode,
*MessageNode, *EnumNode, *ExtendNode, *ExtensionRangeNode,
*ReservedNode, *EmptyDeclNode:
default:
panic(fmt.Sprintf("invalid MessageElement type: %T", decl))
}
}
m.CloseBrace = closeBrace
}
// MessageElement is an interface implemented by all AST nodes that can
// appear in a message body.
type MessageElement interface {
Node
msgElement()
}
var _ MessageElement = (*OptionNode)(nil)
var _ MessageElement = (*FieldNode)(nil)
var _ MessageElement = (*MapFieldNode)(nil)
var _ MessageElement = (*OneofNode)(nil)
var _ MessageElement = (*GroupNode)(nil)
var _ MessageElement = (*MessageNode)(nil)
var _ MessageElement = (*EnumNode)(nil)
var _ MessageElement = (*ExtendNode)(nil)
var _ MessageElement = (*ExtensionRangeNode)(nil)
var _ MessageElement = (*ReservedNode)(nil)
var _ MessageElement = (*EmptyDeclNode)(nil)
// ExtendNode represents a declaration of extension fields. Example:
//
// extend google.protobuf.FieldOptions {
// bool redacted = 33333;
// }
type ExtendNode struct {
compositeNode
Keyword *KeywordNode
Extendee IdentValueNode
OpenBrace *RuneNode
Decls []ExtendElement
CloseBrace *RuneNode
}
func (*ExtendNode) fileElement() {}
func (*ExtendNode) msgElement() {}
// NewExtendNode creates a new *ExtendNode. All arguments must be non-nil.
// - keyword: The token corresponding to the "extend" keyword.
// - extendee: The token corresponding to the name of the extended message.
// - openBrace: The token corresponding to the "{" rune that starts the body.
// - decls: All declarations inside the message body.
// - closeBrace: The token corresponding to the "}" rune that ends the body.
func NewExtendNode(keyword *KeywordNode, extendee IdentValueNode, openBrace *RuneNode, decls []ExtendElement, closeBrace *RuneNode) *ExtendNode {
if keyword == nil {
panic("keyword is nil")
}
if extendee == nil {
panic("extendee is nil")
}
if openBrace == nil {
panic("openBrace is nil")
}
if closeBrace == nil {
panic("closeBrace is nil")
}
children := make([]Node, 0, 4+len(decls))
children = append(children, keyword, extendee, openBrace)
for _, decl := range decls {
children = append(children, decl)
}
children = append(children, closeBrace)
ret := &ExtendNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Extendee: extendee,
OpenBrace: openBrace,
Decls: decls,
CloseBrace: closeBrace,
}
for _, decl := range decls {
switch decl := decl.(type) {
case *FieldNode:
decl.Extendee = ret
case *GroupNode:
decl.Extendee = ret
case *EmptyDeclNode:
default:
panic(fmt.Sprintf("invalid ExtendElement type: %T", decl))
}
}
return ret
}
// ExtendElement is an interface implemented by all AST nodes that can
// appear in the body of an extends declaration.
type ExtendElement interface {
Node
extendElement()
}
var _ ExtendElement = (*FieldNode)(nil)
var _ ExtendElement = (*GroupNode)(nil)
var _ ExtendElement = (*EmptyDeclNode)(nil)
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
// UnknownPos is a placeholder position when only the source file
// name is known.
func UnknownPos(filename string) SourcePos {
return SourcePos{Filename: filename}
}
// UnknownSpan is a placeholder span when only the source file
// name is known.
func UnknownSpan(filename string) SourceSpan {
return unknownSpan{filename: filename}
}
type unknownSpan struct {
filename string
}
func (s unknownSpan) Start() SourcePos {
return UnknownPos(s.filename)
}
func (s unknownSpan) End() SourcePos {
return UnknownPos(s.filename)
}
// NoSourceNode is a placeholder AST node that implements numerous
// interfaces in this package. It can be used to represent an AST
// element for a file whose source is not available.
type NoSourceNode FileInfo
// NewNoSourceNode creates a new NoSourceNode for the given filename.
func NewNoSourceNode(filename string) *NoSourceNode {
return &NoSourceNode{name: filename}
}
func (n *NoSourceNode) Name() string {
return n.name
}
func (n *NoSourceNode) Start() Token {
return 0
}
func (n *NoSourceNode) End() Token {
return 0
}
func (n *NoSourceNode) NodeInfo(Node) NodeInfo {
return NodeInfo{
fileInfo: (*FileInfo)(n),
}
}
func (n *NoSourceNode) GetSyntax() Node {
return n
}
func (n *NoSourceNode) GetName() Node {
return n
}
func (n *NoSourceNode) GetValue() ValueNode {
return n
}
func (n *NoSourceNode) FieldLabel() Node {
return n
}
func (n *NoSourceNode) FieldName() Node {
return n
}
func (n *NoSourceNode) FieldType() Node {
return n
}
func (n *NoSourceNode) FieldTag() Node {
return n
}
func (n *NoSourceNode) FieldExtendee() Node {
return n
}
func (n *NoSourceNode) GetGroupKeyword() Node {
return n
}
func (n *NoSourceNode) GetOptions() *CompactOptionsNode {
return nil
}
func (n *NoSourceNode) RangeStart() Node {
return n
}
func (n *NoSourceNode) RangeEnd() Node {
return n
}
func (n *NoSourceNode) GetNumber() Node {
return n
}
func (n *NoSourceNode) MessageName() Node {
return n
}
func (n *NoSourceNode) OneofName() Node {
return n
}
func (n *NoSourceNode) GetInputType() Node {
return n
}
func (n *NoSourceNode) GetOutputType() Node {
return n
}
func (n *NoSourceNode) Value() any {
return nil
}
func (n *NoSourceNode) RangeOptions(func(*OptionNode) bool) {
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
// Node is the interface implemented by all nodes in the AST. It
// provides information about the span of this AST node in terms
// of location in the source file. It also provides information
// about all prior comments (attached as leading comments) and
// optional subsequent comments (attached as trailing comments).
type Node interface {
Start() Token
End() Token
}
// TerminalNode represents a leaf in the AST. These represent
// the items/lexemes in the protobuf language. Comments and
// whitespace are accumulated by the lexer and associated with
// the following lexed token.
type TerminalNode interface {
Node
Token() Token
}
var _ TerminalNode = (*StringLiteralNode)(nil)
var _ TerminalNode = (*UintLiteralNode)(nil)
var _ TerminalNode = (*FloatLiteralNode)(nil)
var _ TerminalNode = (*IdentNode)(nil)
var _ TerminalNode = (*SpecialFloatLiteralNode)(nil)
var _ TerminalNode = (*KeywordNode)(nil)
var _ TerminalNode = (*RuneNode)(nil)
// CompositeNode represents any non-terminal node in the tree. These
// are interior or root nodes and have child nodes.
type CompositeNode interface {
Node
// Children contains all AST nodes that are immediate children of this one.
Children() []Node
}
// terminalNode contains bookkeeping shared by all TerminalNode
// implementations. It is embedded in all such node types in this
// package. It provides the implementation of the TerminalNode
// interface.
type terminalNode Token
func (n terminalNode) Start() Token {
return Token(n)
}
func (n terminalNode) End() Token {
return Token(n)
}
func (n terminalNode) Token() Token {
return Token(n)
}
// compositeNode contains bookkeeping shared by all CompositeNode
// implementations. It is embedded in all such node types in this
// package. It provides the implementation of the CompositeNode
// interface.
type compositeNode struct {
children []Node
}
func (n *compositeNode) Children() []Node {
return n.children
}
func (n *compositeNode) Start() Token {
return n.children[0].Start()
}
func (n *compositeNode) End() Token {
return n.children[len(n.children)-1].End()
}
// RuneNode represents a single rune in protobuf source. Runes
// are typically collected into items, but some runes stand on
// their own, such as punctuation/symbols like commas, semicolons,
// equals signs, open and close symbols (braces, brackets, angles,
// and parentheses), and periods/dots.
// TODO: make this more compact; if runes don't have attributed comments
// then we don't need a Token to represent them and only need an offset
// into the file's contents.
type RuneNode struct {
terminalNode
Rune rune
}
// NewRuneNode creates a new *RuneNode with the given properties.
func NewRuneNode(r rune, tok Token) *RuneNode {
return &RuneNode{
terminalNode: tok.asTerminalNode(),
Rune: r,
}
}
// EmptyDeclNode represents an empty declaration in protobuf source.
// These amount to extra semicolons, with no actual content preceding
// the semicolon.
type EmptyDeclNode struct {
compositeNode
Semicolon *RuneNode
}
// NewEmptyDeclNode creates a new *EmptyDeclNode. The one argument must
// be non-nil.
func NewEmptyDeclNode(semicolon *RuneNode) *EmptyDeclNode {
if semicolon == nil {
panic("semicolon is nil")
}
return &EmptyDeclNode{
compositeNode: compositeNode{
children: []Node{semicolon},
},
Semicolon: semicolon,
}
}
func (e *EmptyDeclNode) fileElement() {}
func (e *EmptyDeclNode) msgElement() {}
func (e *EmptyDeclNode) extendElement() {}
func (e *EmptyDeclNode) oneofElement() {}
func (e *EmptyDeclNode) enumElement() {}
func (e *EmptyDeclNode) serviceElement() {}
func (e *EmptyDeclNode) methodElement() {}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// OptionDeclNode is a placeholder interface for AST nodes that represent
// options. This allows NoSourceNode to be used in place of *OptionNode
// for some usages.
type OptionDeclNode interface {
Node
GetName() Node
GetValue() ValueNode
}
var _ OptionDeclNode = (*OptionNode)(nil)
var _ OptionDeclNode = (*NoSourceNode)(nil)
// OptionNode represents the declaration of a single option for an element.
// It is used both for normal option declarations (start with "option" keyword
// and end with semicolon) and for compact options found in fields, enum values,
// and extension ranges. Example:
//
// option (custom.option) = "foo";
type OptionNode struct {
compositeNode
Keyword *KeywordNode // absent for compact options
Name *OptionNameNode
Equals *RuneNode
Val ValueNode
Semicolon *RuneNode // absent for compact options
}
func (*OptionNode) fileElement() {}
func (*OptionNode) msgElement() {}
func (*OptionNode) oneofElement() {}
func (*OptionNode) enumElement() {}
func (*OptionNode) serviceElement() {}
func (*OptionNode) methodElement() {}
// NewOptionNode creates a new *OptionNode for a full option declaration (as
// used in files, messages, oneofs, enums, services, and methods). All arguments
// must be non-nil. (Also see NewCompactOptionNode.)
// - keyword: The token corresponding to the "option" keyword.
// - name: The token corresponding to the name of the option.
// - equals: The token corresponding to the "=" rune after the name.
// - val: The token corresponding to the option value.
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewOptionNode(keyword *KeywordNode, name *OptionNameNode, equals *RuneNode, val ValueNode, semicolon *RuneNode) *OptionNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
if equals == nil {
panic("equals is nil")
}
if val == nil {
panic("val is nil")
}
var children []Node
if semicolon == nil {
children = []Node{keyword, name, equals, val}
} else {
children = []Node{keyword, name, equals, val, semicolon}
}
return &OptionNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
Equals: equals,
Val: val,
Semicolon: semicolon,
}
}
// NewCompactOptionNode creates a new *OptionNode for a full compact declaration
// (as used in fields, enum values, and extension ranges). All arguments must be
// non-nil.
// - name: The token corresponding to the name of the option.
// - equals: The token corresponding to the "=" rune after the name.
// - val: The token corresponding to the option value.
func NewCompactOptionNode(name *OptionNameNode, equals *RuneNode, val ValueNode) *OptionNode {
if name == nil {
panic("name is nil")
}
if equals == nil && val != nil {
panic("equals is nil but val is not")
}
if val == nil && equals != nil {
panic("val is nil but equals is not")
}
var children []Node
if equals == nil && val == nil {
children = []Node{name}
} else {
children = []Node{name, equals, val}
}
return &OptionNode{
compositeNode: compositeNode{
children: children,
},
Name: name,
Equals: equals,
Val: val,
}
}
func (n *OptionNode) GetName() Node {
return n.Name
}
func (n *OptionNode) GetValue() ValueNode {
return n.Val
}
// OptionNameNode represents an option name or even a traversal through message
// types to name a nested option field. Example:
//
// (foo.bar).baz.(bob)
type OptionNameNode struct {
compositeNode
Parts []*FieldReferenceNode
// Dots represent the separating '.' characters between name parts. The
// length of this slice must be exactly len(Parts)-1, each item in Parts
// having a corresponding item in this slice *except the last* (since a
// trailing dot is not allowed).
//
// These do *not* include dots that are inside of an extension name. For
// example: (foo.bar).baz.(bob) has three parts:
// 1. (foo.bar) - an extension name
// 2. baz - a regular field in foo.bar
// 3. (bob) - an extension field in baz
// Note that the dot in foo.bar will thus not be present in Dots but is
// instead in Parts[0].
Dots []*RuneNode
}
// NewOptionNameNode creates a new *OptionNameNode. The dots arg must have a
// length that is one less than the length of parts. The parts arg must not be
// empty.
func NewOptionNameNode(parts []*FieldReferenceNode, dots []*RuneNode) *OptionNameNode {
if len(parts) == 0 {
panic("must have at least one part")
}
if len(dots) != len(parts)-1 && len(dots) != len(parts) {
panic(fmt.Sprintf("%d parts requires %d dots, not %d", len(parts), len(parts)-1, len(dots)))
}
children := make([]Node, 0, len(parts)+len(dots))
for i, part := range parts {
if part == nil {
panic(fmt.Sprintf("parts[%d] is nil", i))
}
if i > 0 {
if dots[i-1] == nil {
panic(fmt.Sprintf("dots[%d] is nil", i-1))
}
children = append(children, dots[i-1])
}
children = append(children, part)
}
if len(dots) == len(parts) { // Add the erroneous, but tolerated trailing dot.
if dots[len(dots)-1] == nil {
panic(fmt.Sprintf("dots[%d] is nil", len(dots)-1))
}
children = append(children, dots[len(dots)-1])
}
return &OptionNameNode{
compositeNode: compositeNode{
children: children,
},
Parts: parts,
Dots: dots,
}
}
// FieldReferenceNode is a reference to a field name. It can indicate a regular
// field (simple unqualified name), an extension field (possibly-qualified name
// that is enclosed either in brackets or parentheses), or an "any" type
// reference (a type URL in the form "server.host/fully.qualified.Name" that is
// enclosed in brackets).
//
// Extension names are used in options to refer to custom options (which are
// actually extensions), in which case the name is enclosed in parentheses "("
// and ")". They can also be used to refer to extension fields of options.
//
// Extension names are also used in message literals to set extension fields,
// in which case the name is enclosed in square brackets "[" and "]".
//
// "Any" type references can only be used in message literals, and are not
// allowed in option names. They are always enclosed in square brackets. An
// "any" type reference is distinguished from an extension name by the presence
// of a slash, which must be present in an "any" type reference and must be
// absent in an extension name.
//
// Examples:
//
// foobar
// (foo.bar)
// [foo.bar]
// [type.googleapis.com/foo.bar]
type FieldReferenceNode struct {
compositeNode
Open *RuneNode // only present for extension names and "any" type references
// only present for "any" type references
URLPrefix IdentValueNode
Slash *RuneNode
Name IdentValueNode
Close *RuneNode // only present for extension names and "any" type references
}
// NewFieldReferenceNode creates a new *FieldReferenceNode for a regular field.
// The name arg must not be nil.
func NewFieldReferenceNode(name *IdentNode) *FieldReferenceNode {
if name == nil {
panic("name is nil")
}
children := []Node{name}
return &FieldReferenceNode{
compositeNode: compositeNode{
children: children,
},
Name: name,
}
}
// NewExtensionFieldReferenceNode creates a new *FieldReferenceNode for an
// extension field. All args must be non-nil. The openSym and closeSym runes
// should be "(" and ")" or "[" and "]".
func NewExtensionFieldReferenceNode(openSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode {
if name == nil {
panic("name is nil")
}
if openSym == nil {
panic("openSym is nil")
}
if closeSym == nil {
panic("closeSym is nil")
}
children := []Node{openSym, name, closeSym}
return &FieldReferenceNode{
compositeNode: compositeNode{
children: children,
},
Open: openSym,
Name: name,
Close: closeSym,
}
}
// NewAnyTypeReferenceNode creates a new *FieldReferenceNode for an "any"
// type reference. All args must be non-nil. The openSym and closeSym runes
// should be "[" and "]". The slashSym run should be "/".
func NewAnyTypeReferenceNode(openSym *RuneNode, urlPrefix IdentValueNode, slashSym *RuneNode, name IdentValueNode, closeSym *RuneNode) *FieldReferenceNode {
if name == nil {
panic("name is nil")
}
if openSym == nil {
panic("openSym is nil")
}
if closeSym == nil {
panic("closeSym is nil")
}
if urlPrefix == nil {
panic("urlPrefix is nil")
}
if slashSym == nil {
panic("slashSym is nil")
}
children := []Node{openSym, urlPrefix, slashSym, name, closeSym}
return &FieldReferenceNode{
compositeNode: compositeNode{
children: children,
},
Open: openSym,
URLPrefix: urlPrefix,
Slash: slashSym,
Name: name,
Close: closeSym,
}
}
// IsExtension reports if this is an extension name or not (e.g. enclosed in
// punctuation, such as parentheses or brackets).
func (a *FieldReferenceNode) IsExtension() bool {
return a.Open != nil && a.Slash == nil
}
// IsAnyTypeReference reports if this is an Any type reference.
func (a *FieldReferenceNode) IsAnyTypeReference() bool {
return a.Slash != nil
}
func (a *FieldReferenceNode) Value() string {
if a.Open != nil {
if a.Slash != nil {
return string(a.Open.Rune) + string(a.URLPrefix.AsIdentifier()) + string(a.Slash.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune)
}
return string(a.Open.Rune) + string(a.Name.AsIdentifier()) + string(a.Close.Rune)
}
return string(a.Name.AsIdentifier())
}
// CompactOptionsNode represents a compact options declaration, as used with
// fields, enum values, and extension ranges. Example:
//
// [deprecated = true, json_name = "foo_bar"]
type CompactOptionsNode struct {
compositeNode
OpenBracket *RuneNode
Options []*OptionNode
// Commas represent the separating ',' characters between options. The
// length of this slice must be exactly len(Options)-1, with each item
// in Options having a corresponding item in this slice *except the last*
// (since a trailing comma is not allowed).
Commas []*RuneNode
CloseBracket *RuneNode
}
// NewCompactOptionsNode creates a *CompactOptionsNode. All args must be
// non-nil. The commas arg must have a length that is one less than the
// length of opts. The opts arg must not be empty.
func NewCompactOptionsNode(openBracket *RuneNode, opts []*OptionNode, commas []*RuneNode, closeBracket *RuneNode) *CompactOptionsNode {
if openBracket == nil {
panic("openBracket is nil")
}
if closeBracket == nil {
panic("closeBracket is nil")
}
if len(opts) == 0 && len(commas) != 0 {
panic("opts is empty but commas is not")
}
if len(opts) != len(commas) && len(opts) != len(commas)+1 {
panic(fmt.Sprintf("%d opts requires %d commas, not %d", len(opts), len(opts)-1, len(commas)))
}
children := make([]Node, 0, len(opts)+len(commas)+2)
children = append(children, openBracket)
if len(opts) > 0 {
for i, opt := range opts {
if i > 0 {
if commas[i-1] == nil {
panic(fmt.Sprintf("commas[%d] is nil", i-1))
}
children = append(children, commas[i-1])
}
if opt == nil {
panic(fmt.Sprintf("opts[%d] is nil", i))
}
children = append(children, opt)
}
if len(opts) == len(commas) { // Add the erroneous, but tolerated trailing comma.
if commas[len(commas)-1] == nil {
panic(fmt.Sprintf("commas[%d] is nil", len(commas)-1))
}
children = append(children, commas[len(commas)-1])
}
}
children = append(children, closeBracket)
return &CompactOptionsNode{
compositeNode: compositeNode{
children: children,
},
OpenBracket: openBracket,
Options: opts,
Commas: commas,
CloseBracket: closeBracket,
}
}
func (e *CompactOptionsNode) GetElements() []*OptionNode {
if e == nil {
return nil
}
return e.Options
}
// NodeWithOptions represents a node in the AST that contains
// option statements.
type NodeWithOptions interface {
Node
RangeOptions(func(*OptionNode) bool)
}
var _ NodeWithOptions = FileDeclNode(nil)
var _ NodeWithOptions = MessageDeclNode(nil)
var _ NodeWithOptions = OneofDeclNode(nil)
var _ NodeWithOptions = (*EnumNode)(nil)
var _ NodeWithOptions = (*ServiceNode)(nil)
var _ NodeWithOptions = RPCDeclNode(nil)
var _ NodeWithOptions = FieldDeclNode(nil)
var _ NodeWithOptions = EnumValueDeclNode(nil)
var _ NodeWithOptions = (*ExtensionRangeNode)(nil)
var _ NodeWithOptions = (*NoSourceNode)(nil)
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// ExtensionRangeNode represents an extension range declaration in an extendable
// message. Example:
//
// extensions 100 to max;
type ExtensionRangeNode struct {
compositeNode
Keyword *KeywordNode
Ranges []*RangeNode
// Commas represent the separating ',' characters between ranges. The
// length of this slice must be exactly len(Ranges)-1, each item in Ranges
// having a corresponding item in this slice *except the last* (since a
// trailing comma is not allowed).
Commas []*RuneNode
Options *CompactOptionsNode
Semicolon *RuneNode
}
func (*ExtensionRangeNode) msgElement() {}
// NewExtensionRangeNode creates a new *ExtensionRangeNode. All args must be
// non-nil except opts, which may be nil.
// - keyword: The token corresponding to the "extends" keyword.
// - ranges: One or more range expressions.
// - commas: Tokens that represent the "," runes that delimit the range expressions.
// The length of commas must be one less than the length of ranges.
// - opts: The node corresponding to options that apply to each of the ranges.
// - semicolon The token corresponding to the ";" rune that ends the declaration.
func NewExtensionRangeNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, opts *CompactOptionsNode, semicolon *RuneNode) *ExtensionRangeNode {
if keyword == nil {
panic("keyword is nil")
}
if semicolon == nil {
panic("semicolon is nil")
}
if len(ranges) == 0 {
panic("must have at least one range")
}
if len(commas) != len(ranges)-1 {
panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas)))
}
numChildren := len(ranges)*2 + 1
if opts != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
children = append(children, keyword)
for i, rng := range ranges {
if i > 0 {
if commas[i-1] == nil {
panic(fmt.Sprintf("commas[%d] is nil", i-1))
}
children = append(children, commas[i-1])
}
if rng == nil {
panic(fmt.Sprintf("ranges[%d] is nil", i))
}
children = append(children, rng)
}
if opts != nil {
children = append(children, opts)
}
children = append(children, semicolon)
return &ExtensionRangeNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Ranges: ranges,
Commas: commas,
Options: opts,
Semicolon: semicolon,
}
}
func (e *ExtensionRangeNode) RangeOptions(fn func(*OptionNode) bool) {
for _, opt := range e.Options.Options {
if !fn(opt) {
return
}
}
}
// RangeDeclNode is a placeholder interface for AST nodes that represent
// numeric values. This allows NoSourceNode to be used in place of *RangeNode
// for some usages.
type RangeDeclNode interface {
Node
RangeStart() Node
RangeEnd() Node
}
var _ RangeDeclNode = (*RangeNode)(nil)
var _ RangeDeclNode = (*NoSourceNode)(nil)
// RangeNode represents a range expression, used in both extension ranges and
// reserved ranges. Example:
//
// 1000 to max
type RangeNode struct {
compositeNode
StartVal IntValueNode
// if To is non-nil, then exactly one of EndVal or Max must also be non-nil
To *KeywordNode
// EndVal and Max are mutually exclusive
EndVal IntValueNode
Max *KeywordNode
}
// NewRangeNode creates a new *RangeNode. The start argument must be non-nil.
// The to argument represents the "to" keyword. If present (i.e. if it is non-nil),
// then so must be exactly one of end or max. If max is non-nil, it indicates a
// "100 to max" style range. But if end is non-nil, the end of the range is a
// literal, such as "100 to 200".
func NewRangeNode(start IntValueNode, to *KeywordNode, end IntValueNode, maxEnd *KeywordNode) *RangeNode {
if start == nil {
panic("start is nil")
}
numChildren := 1
if to != nil {
if end == nil && maxEnd == nil {
panic("to is not nil, but end and max both are")
}
if end != nil && maxEnd != nil {
panic("end and max cannot be both non-nil")
}
numChildren = 3
} else {
if end != nil {
panic("to is nil, but end is not")
}
if maxEnd != nil {
panic("to is nil, but max is not")
}
}
children := make([]Node, 0, numChildren)
children = append(children, start)
if to != nil {
children = append(children, to)
if end != nil {
children = append(children, end)
} else {
children = append(children, maxEnd)
}
}
return &RangeNode{
compositeNode: compositeNode{
children: children,
},
StartVal: start,
To: to,
EndVal: end,
Max: maxEnd,
}
}
func (n *RangeNode) RangeStart() Node {
return n.StartVal
}
func (n *RangeNode) RangeEnd() Node {
if n.Max != nil {
return n.Max
}
if n.EndVal != nil {
return n.EndVal
}
return n.StartVal
}
func (n *RangeNode) StartValue() any {
return n.StartVal.Value()
}
func (n *RangeNode) StartValueAsInt32(minVal, maxVal int32) (int32, bool) {
return AsInt32(n.StartVal, minVal, maxVal)
}
func (n *RangeNode) EndValue() any {
if n.EndVal == nil {
return nil
}
return n.EndVal.Value()
}
func (n *RangeNode) EndValueAsInt32(minVal, maxVal int32) (int32, bool) {
if n.Max != nil {
return maxVal, true
}
if n.EndVal == nil {
return n.StartValueAsInt32(minVal, maxVal)
}
return AsInt32(n.EndVal, minVal, maxVal)
}
// ReservedNode represents reserved declaration, which can be used to reserve
// either names or numbers. Examples:
//
// reserved 1, 10-12, 15;
// reserved "foo", "bar", "baz";
// reserved foo, bar, baz;
type ReservedNode struct {
compositeNode
Keyword *KeywordNode
// If non-empty, this node represents reserved ranges, and Names and Identifiers
// will be empty.
Ranges []*RangeNode
// If non-empty, this node represents reserved names as string literals, and
// Ranges and Identifiers will be empty. String literals are used for reserved
// names in proto2 and proto3 syntax.
Names []StringValueNode
// If non-empty, this node represents reserved names as identifiers, and Ranges
// and Names will be empty. Identifiers are used for reserved names in editions.
Identifiers []*IdentNode
// Commas represent the separating ',' characters between options. The
// length of this slice must be exactly len(Ranges)-1 or len(Names)-1, depending
// on whether this node represents reserved ranges or reserved names. Each item
// in Ranges or Names has a corresponding item in this slice *except the last*
// (since a trailing comma is not allowed).
Commas []*RuneNode
Semicolon *RuneNode
}
func (*ReservedNode) msgElement() {}
func (*ReservedNode) enumElement() {}
// NewReservedRangesNode creates a new *ReservedNode that represents reserved
// numeric ranges. All args must be non-nil.
// - keyword: The token corresponding to the "reserved" keyword.
// - ranges: One or more range expressions.
// - commas: Tokens that represent the "," runes that delimit the range expressions.
// The length of commas must be one less than the length of ranges.
// - semicolon The token corresponding to the ";" rune that ends the declaration.
func NewReservedRangesNode(keyword *KeywordNode, ranges []*RangeNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode {
if keyword == nil {
panic("keyword is nil")
}
if semicolon == nil {
panic("semicolon is nil")
}
if len(ranges) == 0 {
panic("must have at least one range")
}
if len(commas) != len(ranges)-1 {
panic(fmt.Sprintf("%d ranges requires %d commas, not %d", len(ranges), len(ranges)-1, len(commas)))
}
children := make([]Node, 0, len(ranges)*2+1)
children = append(children, keyword)
for i, rng := range ranges {
if i > 0 {
if commas[i-1] == nil {
panic(fmt.Sprintf("commas[%d] is nil", i-1))
}
children = append(children, commas[i-1])
}
if rng == nil {
panic(fmt.Sprintf("ranges[%d] is nil", i))
}
children = append(children, rng)
}
children = append(children, semicolon)
return &ReservedNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Ranges: ranges,
Commas: commas,
Semicolon: semicolon,
}
}
// NewReservedNamesNode creates a new *ReservedNode that represents reserved
// names. All args must be non-nil.
// - keyword: The token corresponding to the "reserved" keyword.
// - names: One or more names.
// - commas: Tokens that represent the "," runes that delimit the names.
// The length of commas must be one less than the length of names.
// - semicolon The token corresponding to the ";" rune that ends the declaration.
func NewReservedNamesNode(keyword *KeywordNode, names []StringValueNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode {
if keyword == nil {
panic("keyword is nil")
}
if len(names) == 0 {
panic("must have at least one name")
}
if len(commas) != len(names)-1 {
panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas)))
}
numChildren := len(names) * 2
if semicolon != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
children = append(children, keyword)
for i, name := range names {
if i > 0 {
if commas[i-1] == nil {
panic(fmt.Sprintf("commas[%d] is nil", i-1))
}
children = append(children, commas[i-1])
}
if name == nil {
panic(fmt.Sprintf("names[%d] is nil", i))
}
children = append(children, name)
}
if semicolon != nil {
children = append(children, semicolon)
}
return &ReservedNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Names: names,
Commas: commas,
Semicolon: semicolon,
}
}
// NewReservedIdentifiersNode creates a new *ReservedNode that represents reserved
// names. All args must be non-nil.
// - keyword: The token corresponding to the "reserved" keyword.
// - names: One or more names.
// - commas: Tokens that represent the "," runes that delimit the names.
// The length of commas must be one less than the length of names.
// - semicolon The token corresponding to the ";" rune that ends the declaration.
func NewReservedIdentifiersNode(keyword *KeywordNode, names []*IdentNode, commas []*RuneNode, semicolon *RuneNode) *ReservedNode {
if keyword == nil {
panic("keyword is nil")
}
if len(names) == 0 {
panic("must have at least one name")
}
if len(commas) != len(names)-1 {
panic(fmt.Sprintf("%d names requires %d commas, not %d", len(names), len(names)-1, len(commas)))
}
numChildren := len(names) * 2
if semicolon != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
children = append(children, keyword)
for i, name := range names {
if i > 0 {
if commas[i-1] == nil {
panic(fmt.Sprintf("commas[%d] is nil", i-1))
}
children = append(children, commas[i-1])
}
if name == nil {
panic(fmt.Sprintf("names[%d] is nil", i))
}
children = append(children, name)
}
if semicolon != nil {
children = append(children, semicolon)
}
return &ReservedNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Identifiers: names,
Commas: commas,
Semicolon: semicolon,
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// ServiceNode represents a service declaration. Example:
//
// service Foo {
// rpc Bar (Baz) returns (Bob);
// rpc Frobnitz (stream Parts) returns (Gyzmeaux);
// }
type ServiceNode struct {
compositeNode
Keyword *KeywordNode
Name *IdentNode
OpenBrace *RuneNode
Decls []ServiceElement
CloseBrace *RuneNode
}
func (*ServiceNode) fileElement() {}
// NewServiceNode creates a new *ServiceNode. All arguments must be non-nil.
// - keyword: The token corresponding to the "service" keyword.
// - name: The token corresponding to the service's name.
// - openBrace: The token corresponding to the "{" rune that starts the body.
// - decls: All declarations inside the service body.
// - closeBrace: The token corresponding to the "}" rune that ends the body.
func NewServiceNode(keyword *KeywordNode, name *IdentNode, openBrace *RuneNode, decls []ServiceElement, closeBrace *RuneNode) *ServiceNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
if openBrace == nil {
panic("openBrace is nil")
}
if closeBrace == nil {
panic("closeBrace is nil")
}
children := make([]Node, 0, 4+len(decls))
children = append(children, keyword, name, openBrace)
for _, decl := range decls {
switch decl := decl.(type) {
case *OptionNode, *RPCNode, *EmptyDeclNode:
default:
panic(fmt.Sprintf("invalid ServiceElement type: %T", decl))
}
children = append(children, decl)
}
children = append(children, closeBrace)
return &ServiceNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
OpenBrace: openBrace,
Decls: decls,
CloseBrace: closeBrace,
}
}
func (n *ServiceNode) RangeOptions(fn func(*OptionNode) bool) {
for _, decl := range n.Decls {
if opt, ok := decl.(*OptionNode); ok {
if !fn(opt) {
return
}
}
}
}
// ServiceElement is an interface implemented by all AST nodes that can
// appear in the body of a service declaration.
type ServiceElement interface {
Node
serviceElement()
}
var _ ServiceElement = (*OptionNode)(nil)
var _ ServiceElement = (*RPCNode)(nil)
var _ ServiceElement = (*EmptyDeclNode)(nil)
// RPCDeclNode is a placeholder interface for AST nodes that represent RPC
// declarations. This allows NoSourceNode to be used in place of *RPCNode
// for some usages.
type RPCDeclNode interface {
NodeWithOptions
GetName() Node
GetInputType() Node
GetOutputType() Node
}
var _ RPCDeclNode = (*RPCNode)(nil)
var _ RPCDeclNode = (*NoSourceNode)(nil)
// RPCNode represents an RPC declaration. Example:
//
// rpc Foo (Bar) returns (Baz);
type RPCNode struct {
compositeNode
Keyword *KeywordNode
Name *IdentNode
Input *RPCTypeNode
Returns *KeywordNode
Output *RPCTypeNode
Semicolon *RuneNode
OpenBrace *RuneNode
Decls []RPCElement
CloseBrace *RuneNode
}
func (n *RPCNode) serviceElement() {}
// NewRPCNode creates a new *RPCNode with no body. All arguments must be non-nil.
// - keyword: The token corresponding to the "rpc" keyword.
// - name: The token corresponding to the RPC's name.
// - input: The token corresponding to the RPC input message type.
// - returns: The token corresponding to the "returns" keyword that precedes the output type.
// - output: The token corresponding to the RPC output message type.
// - semicolon: The token corresponding to the ";" rune that ends the declaration.
func NewRPCNode(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, semicolon *RuneNode) *RPCNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
if input == nil {
panic("input is nil")
}
if returns == nil {
panic("returns is nil")
}
if output == nil {
panic("output is nil")
}
var children []Node
if semicolon == nil {
children = []Node{keyword, name, input, returns, output}
} else {
children = []Node{keyword, name, input, returns, output, semicolon}
}
return &RPCNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
Input: input,
Returns: returns,
Output: output,
Semicolon: semicolon,
}
}
// NewRPCNodeWithBody creates a new *RPCNode that includes a body (and possibly
// options). All arguments must be non-nil.
// - keyword: The token corresponding to the "rpc" keyword.
// - name: The token corresponding to the RPC's name.
// - input: The token corresponding to the RPC input message type.
// - returns: The token corresponding to the "returns" keyword that precedes the output type.
// - output: The token corresponding to the RPC output message type.
// - openBrace: The token corresponding to the "{" rune that starts the body.
// - decls: All declarations inside the RPC body.
// - closeBrace: The token corresponding to the "}" rune that ends the body.
func NewRPCNodeWithBody(keyword *KeywordNode, name *IdentNode, input *RPCTypeNode, returns *KeywordNode, output *RPCTypeNode, openBrace *RuneNode, decls []RPCElement, closeBrace *RuneNode) *RPCNode {
if keyword == nil {
panic("keyword is nil")
}
if name == nil {
panic("name is nil")
}
if input == nil {
panic("input is nil")
}
if returns == nil {
panic("returns is nil")
}
if output == nil {
panic("output is nil")
}
if openBrace == nil {
panic("openBrace is nil")
}
if closeBrace == nil {
panic("closeBrace is nil")
}
children := make([]Node, 0, 7+len(decls))
children = append(children, keyword, name, input, returns, output, openBrace)
for _, decl := range decls {
switch decl := decl.(type) {
case *OptionNode, *EmptyDeclNode:
default:
panic(fmt.Sprintf("invalid RPCElement type: %T", decl))
}
children = append(children, decl)
}
children = append(children, closeBrace)
return &RPCNode{
compositeNode: compositeNode{
children: children,
},
Keyword: keyword,
Name: name,
Input: input,
Returns: returns,
Output: output,
OpenBrace: openBrace,
Decls: decls,
CloseBrace: closeBrace,
}
}
func (n *RPCNode) GetName() Node {
return n.Name
}
func (n *RPCNode) GetInputType() Node {
return n.Input.MessageType
}
func (n *RPCNode) GetOutputType() Node {
return n.Output.MessageType
}
func (n *RPCNode) RangeOptions(fn func(*OptionNode) bool) {
for _, decl := range n.Decls {
if opt, ok := decl.(*OptionNode); ok {
if !fn(opt) {
return
}
}
}
}
// RPCElement is an interface implemented by all AST nodes that can
// appear in the body of an rpc declaration (aka method).
type RPCElement interface {
Node
methodElement()
}
var _ RPCElement = (*OptionNode)(nil)
var _ RPCElement = (*EmptyDeclNode)(nil)
// RPCTypeNode represents the declaration of a request or response type for an
// RPC. Example:
//
// (stream foo.Bar)
type RPCTypeNode struct {
compositeNode
OpenParen *RuneNode
Stream *KeywordNode
MessageType IdentValueNode
CloseParen *RuneNode
}
// NewRPCTypeNode creates a new *RPCTypeNode. All arguments must be non-nil
// except stream, which may be nil.
// - openParen: The token corresponding to the "(" rune that starts the declaration.
// - stream: The token corresponding to the "stream" keyword or nil if not present.
// - msgType: The token corresponding to the message type's name.
// - closeParen: The token corresponding to the ")" rune that ends the declaration.
func NewRPCTypeNode(openParen *RuneNode, stream *KeywordNode, msgType IdentValueNode, closeParen *RuneNode) *RPCTypeNode {
if openParen == nil {
panic("openParen is nil")
}
if msgType == nil {
panic("msgType is nil")
}
if closeParen == nil {
panic("closeParen is nil")
}
var children []Node
if stream != nil {
children = []Node{openParen, stream, msgType, closeParen}
} else {
children = []Node{openParen, msgType, closeParen}
}
return &RPCTypeNode{
compositeNode: compositeNode{
children: children,
},
OpenParen: openParen,
Stream: stream,
MessageType: msgType,
CloseParen: closeParen,
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import (
"fmt"
"math"
"strings"
)
// ValueNode is an AST node that represents a literal value.
//
// It also includes references (e.g. IdentifierValueNode), which can be
// used as values in some contexts, such as describing the default value
// for a field, which can refer to an enum value.
//
// This also allows NoSourceNode to be used in place of a real value node
// for some usages.
type ValueNode interface {
Node
// Value returns a Go representation of the value. For scalars, this
// will be a string, int64, uint64, float64, or bool. This could also
// be an Identifier (e.g. IdentValueNodes). It can also be a composite
// literal:
// * For array literals, the type returned will be []ValueNode
// * For message literals, the type returned will be []*MessageFieldNode
//
// If the ValueNode is a NoSourceNode, indicating that there is no actual
// source code (and thus not AST information), then this method always
// returns nil.
Value() any
}
var _ ValueNode = (*IdentNode)(nil)
var _ ValueNode = (*CompoundIdentNode)(nil)
var _ ValueNode = (*StringLiteralNode)(nil)
var _ ValueNode = (*CompoundStringLiteralNode)(nil)
var _ ValueNode = (*UintLiteralNode)(nil)
var _ ValueNode = (*NegativeIntLiteralNode)(nil)
var _ ValueNode = (*FloatLiteralNode)(nil)
var _ ValueNode = (*SpecialFloatLiteralNode)(nil)
var _ ValueNode = (*SignedFloatLiteralNode)(nil)
var _ ValueNode = (*ArrayLiteralNode)(nil)
var _ ValueNode = (*MessageLiteralNode)(nil)
var _ ValueNode = (*NoSourceNode)(nil)
// StringValueNode is an AST node that represents a string literal.
// Such a node can be a single literal (*StringLiteralNode) or a
// concatenation of multiple literals (*CompoundStringLiteralNode).
type StringValueNode interface {
ValueNode
AsString() string
}
var _ StringValueNode = (*StringLiteralNode)(nil)
var _ StringValueNode = (*CompoundStringLiteralNode)(nil)
// StringLiteralNode represents a simple string literal. Example:
//
// "proto2"
type StringLiteralNode struct {
terminalNode
// Val is the actual string value that the literal indicates.
Val string
}
// NewStringLiteralNode creates a new *StringLiteralNode with the given val.
func NewStringLiteralNode(val string, tok Token) *StringLiteralNode {
return &StringLiteralNode{
terminalNode: tok.asTerminalNode(),
Val: val,
}
}
func (n *StringLiteralNode) Value() any {
return n.AsString()
}
func (n *StringLiteralNode) AsString() string {
return n.Val
}
// CompoundStringLiteralNode represents a compound string literal, which is
// the concatenaton of adjacent string literals. Example:
//
// "this " "is" " all one " "string"
type CompoundStringLiteralNode struct {
compositeNode
Val string
}
// NewCompoundLiteralStringNode creates a new *CompoundStringLiteralNode that
// consists of the given string components. The components argument may not be
// empty.
func NewCompoundLiteralStringNode(components ...*StringLiteralNode) *CompoundStringLiteralNode {
if len(components) == 0 {
panic("must have at least one component")
}
children := make([]Node, len(components))
var b strings.Builder
for i, comp := range components {
children[i] = comp
b.WriteString(comp.Val)
}
return &CompoundStringLiteralNode{
compositeNode: compositeNode{
children: children,
},
Val: b.String(),
}
}
func (n *CompoundStringLiteralNode) Value() any {
return n.AsString()
}
func (n *CompoundStringLiteralNode) AsString() string {
return n.Val
}
// IntValueNode is an AST node that represents an integer literal. If
// an integer literal is too large for an int64 (or uint64 for
// positive literals), it is represented instead by a FloatValueNode.
type IntValueNode interface {
ValueNode
AsInt64() (int64, bool)
AsUint64() (uint64, bool)
}
// AsInt32 range checks the given int value and returns its value is
// in the range or 0, false if it is outside the range.
func AsInt32(n IntValueNode, minVal, maxVal int32) (int32, bool) {
i, ok := n.AsInt64()
if !ok {
return 0, false
}
if i < int64(minVal) || i > int64(maxVal) {
return 0, false
}
return int32(i), true
}
var _ IntValueNode = (*UintLiteralNode)(nil)
var _ IntValueNode = (*NegativeIntLiteralNode)(nil)
// UintLiteralNode represents a simple integer literal with no sign character.
type UintLiteralNode struct {
terminalNode
// Val is the numeric value indicated by the literal
Val uint64
}
// NewUintLiteralNode creates a new *UintLiteralNode with the given val.
func NewUintLiteralNode(val uint64, tok Token) *UintLiteralNode {
return &UintLiteralNode{
terminalNode: tok.asTerminalNode(),
Val: val,
}
}
func (n *UintLiteralNode) Value() any {
return n.Val
}
func (n *UintLiteralNode) AsInt64() (int64, bool) {
if n.Val > math.MaxInt64 {
return 0, false
}
return int64(n.Val), true
}
func (n *UintLiteralNode) AsUint64() (uint64, bool) {
return n.Val, true
}
func (n *UintLiteralNode) AsFloat() float64 {
return float64(n.Val)
}
// NegativeIntLiteralNode represents an integer literal with a negative (-) sign.
type NegativeIntLiteralNode struct {
compositeNode
Minus *RuneNode
Uint *UintLiteralNode
Val int64
}
// NewNegativeIntLiteralNode creates a new *NegativeIntLiteralNode. Both
// arguments must be non-nil.
func NewNegativeIntLiteralNode(sign *RuneNode, i *UintLiteralNode) *NegativeIntLiteralNode {
if sign == nil {
panic("sign is nil")
}
if i == nil {
panic("i is nil")
}
children := []Node{sign, i}
return &NegativeIntLiteralNode{
compositeNode: compositeNode{
children: children,
},
Minus: sign,
Uint: i,
Val: -int64(i.Val),
}
}
func (n *NegativeIntLiteralNode) Value() any {
return n.Val
}
func (n *NegativeIntLiteralNode) AsInt64() (int64, bool) {
return n.Val, true
}
func (n *NegativeIntLiteralNode) AsUint64() (uint64, bool) {
if n.Val < 0 {
return 0, false
}
return uint64(n.Val), true
}
// FloatValueNode is an AST node that represents a numeric literal with
// a floating point, in scientific notation, or too large to fit in an
// int64 or uint64.
type FloatValueNode interface {
ValueNode
AsFloat() float64
}
var _ FloatValueNode = (*FloatLiteralNode)(nil)
var _ FloatValueNode = (*SpecialFloatLiteralNode)(nil)
var _ FloatValueNode = (*UintLiteralNode)(nil)
// FloatLiteralNode represents a floating point numeric literal.
type FloatLiteralNode struct {
terminalNode
// Val is the numeric value indicated by the literal
Val float64
}
// NewFloatLiteralNode creates a new *FloatLiteralNode with the given val.
func NewFloatLiteralNode(val float64, tok Token) *FloatLiteralNode {
return &FloatLiteralNode{
terminalNode: tok.asTerminalNode(),
Val: val,
}
}
func (n *FloatLiteralNode) Value() any {
return n.AsFloat()
}
func (n *FloatLiteralNode) AsFloat() float64 {
return n.Val
}
// SpecialFloatLiteralNode represents a special floating point numeric literal
// for "inf" and "nan" values.
type SpecialFloatLiteralNode struct {
*KeywordNode
Val float64
}
// NewSpecialFloatLiteralNode returns a new *SpecialFloatLiteralNode for the
// given keyword. The given keyword should be "inf", "infinity", or "nan"
// in any case.
func NewSpecialFloatLiteralNode(name *KeywordNode) *SpecialFloatLiteralNode {
var f float64
switch strings.ToLower(name.Val) {
case "inf", "infinity":
f = math.Inf(1)
default:
f = math.NaN()
}
return &SpecialFloatLiteralNode{
KeywordNode: name,
Val: f,
}
}
func (n *SpecialFloatLiteralNode) Value() any {
return n.AsFloat()
}
func (n *SpecialFloatLiteralNode) AsFloat() float64 {
return n.Val
}
// SignedFloatLiteralNode represents a signed floating point number.
type SignedFloatLiteralNode struct {
compositeNode
Sign *RuneNode
Float FloatValueNode
Val float64
}
// NewSignedFloatLiteralNode creates a new *SignedFloatLiteralNode. Both
// arguments must be non-nil.
func NewSignedFloatLiteralNode(sign *RuneNode, f FloatValueNode) *SignedFloatLiteralNode {
if sign == nil {
panic("sign is nil")
}
if f == nil {
panic("f is nil")
}
children := []Node{sign, f}
val := f.AsFloat()
if sign.Rune == '-' {
val = -val
}
return &SignedFloatLiteralNode{
compositeNode: compositeNode{
children: children,
},
Sign: sign,
Float: f,
Val: val,
}
}
func (n *SignedFloatLiteralNode) Value() any {
return n.Val
}
func (n *SignedFloatLiteralNode) AsFloat() float64 {
return n.Val
}
// ArrayLiteralNode represents an array literal, which is only allowed inside of
// a MessageLiteralNode, to indicate values for a repeated field. Example:
//
// ["foo", "bar", "baz"]
type ArrayLiteralNode struct {
compositeNode
OpenBracket *RuneNode
Elements []ValueNode
// Commas represent the separating ',' characters between elements. The
// length of this slice must be exactly len(Elements)-1, with each item
// in Elements having a corresponding item in this slice *except the last*
// (since a trailing comma is not allowed).
Commas []*RuneNode
CloseBracket *RuneNode
}
// NewArrayLiteralNode creates a new *ArrayLiteralNode. The openBracket and
// closeBracket args must be non-nil and represent the "[" and "]" runes that
// surround the array values. The given commas arg must have a length that is
// one less than the length of the vals arg. However, vals may be empty, in
// which case commas must also be empty.
func NewArrayLiteralNode(openBracket *RuneNode, vals []ValueNode, commas []*RuneNode, closeBracket *RuneNode) *ArrayLiteralNode {
if openBracket == nil {
panic("openBracket is nil")
}
if closeBracket == nil {
panic("closeBracket is nil")
}
if len(vals) == 0 && len(commas) != 0 {
panic("vals is empty but commas is not")
}
if len(vals) > 0 && len(commas) != len(vals)-1 {
panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals)-1, len(commas)))
}
children := make([]Node, 0, len(vals)*2+1)
children = append(children, openBracket)
for i, val := range vals {
if i > 0 {
if commas[i-1] == nil {
panic(fmt.Sprintf("commas[%d] is nil", i-1))
}
children = append(children, commas[i-1])
}
if val == nil {
panic(fmt.Sprintf("vals[%d] is nil", i))
}
children = append(children, val)
}
children = append(children, closeBracket)
return &ArrayLiteralNode{
compositeNode: compositeNode{
children: children,
},
OpenBracket: openBracket,
Elements: vals,
Commas: commas,
CloseBracket: closeBracket,
}
}
func (n *ArrayLiteralNode) Value() any {
return n.Elements
}
// MessageLiteralNode represents a message literal, which is compatible with the
// protobuf text format and can be used for custom options with message types.
// Example:
//
// { foo:1 foo:2 foo:3 bar:<name:"abc" id:123> }
type MessageLiteralNode struct {
compositeNode
Open *RuneNode // should be '{' or '<'
Elements []*MessageFieldNode
// Separator characters between elements, which can be either ','
// or ';' if present. This slice must be exactly len(Elements) in
// length, with each item in Elements having one corresponding item
// in Seps. Separators in message literals are optional, so a given
// item in this slice may be nil to indicate absence of a separator.
Seps []*RuneNode
Close *RuneNode // should be '}' or '>', depending on Open
}
// NewMessageLiteralNode creates a new *MessageLiteralNode. The openSym and
// closeSym runes must not be nil and should be "{" and "}" or "<" and ">".
//
// Unlike separators (dots and commas) used for other AST nodes that represent
// a list of elements, the seps arg must be the SAME length as vals, and it may
// contain nil values to indicate absence of a separator (in fact, it could be
// all nils).
func NewMessageLiteralNode(openSym *RuneNode, vals []*MessageFieldNode, seps []*RuneNode, closeSym *RuneNode) *MessageLiteralNode {
if openSym == nil {
panic("openSym is nil")
}
if closeSym == nil {
panic("closeSym is nil")
}
if len(seps) != len(vals) {
panic(fmt.Sprintf("%d vals requires %d commas, not %d", len(vals), len(vals), len(seps)))
}
numChildren := len(vals) + 2
for _, sep := range seps {
if sep != nil {
numChildren++
}
}
children := make([]Node, 0, numChildren)
children = append(children, openSym)
for i, val := range vals {
if val == nil {
panic(fmt.Sprintf("vals[%d] is nil", i))
}
children = append(children, val)
if seps[i] != nil {
children = append(children, seps[i])
}
}
children = append(children, closeSym)
return &MessageLiteralNode{
compositeNode: compositeNode{
children: children,
},
Open: openSym,
Elements: vals,
Seps: seps,
Close: closeSym,
}
}
func (n *MessageLiteralNode) Value() any {
return n.Elements
}
// MessageFieldNode represents a single field (name and value) inside of a
// message literal. Example:
//
// foo:"bar"
type MessageFieldNode struct {
compositeNode
Name *FieldReferenceNode
// Sep represents the ':' separator between the name and value. If
// the value is a message or list literal (and thus starts with '<',
// '{', or '['), then the separator may be omitted and this field may
// be nil.
Sep *RuneNode
Val ValueNode
}
// NewMessageFieldNode creates a new *MessageFieldNode. All args except sep
// must be non-nil.
func NewMessageFieldNode(name *FieldReferenceNode, sep *RuneNode, val ValueNode) *MessageFieldNode {
if name == nil {
panic("name is nil")
}
if val == nil {
panic("val is nil")
}
numChildren := 2
if sep != nil {
numChildren++
}
children := make([]Node, 0, numChildren)
children = append(children, name)
if sep != nil {
children = append(children, sep)
}
children = append(children, val)
return &MessageFieldNode{
compositeNode: compositeNode{
children: children,
},
Name: name,
Sep: sep,
Val: val,
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package ast
import "fmt"
// Walk conducts a walk of the AST rooted at the given root using the
// given visitor. It performs a "pre-order traversal", visiting a
// given AST node before it visits that node's descendants.
//
// If a visitor returns an error while walking the tree, the entire
// operation is aborted and that error is returned.
func Walk(root Node, v Visitor, opts ...WalkOption) error {
var wOpts walkOptions
for _, opt := range opts {
opt(&wOpts)
}
return walk(root, v, wOpts)
}
// WalkOption represents an option used with the Walk function. These
// allow optional before and after hooks to be invoked as each node in
// the tree is visited.
type WalkOption func(*walkOptions)
type walkOptions struct {
before, after func(Node) error
}
// WithBefore returns a WalkOption that will cause the given function to be
// invoked before a node is visited during a walk operation. If this hook
// returns an error, the node is not visited and the walk operation is aborted.
func WithBefore(fn func(Node) error) WalkOption {
return func(options *walkOptions) {
options.before = fn
}
}
// WithAfter returns a WalkOption that will cause the given function to be
// invoked after a node (as well as any descendants) is visited during a walk
// operation. If this hook returns an error, the node is not visited and the
// walk operation is aborted.
//
// If the walk is aborted due to some other visitor or before hook returning an
// error, the after hook is still called for all nodes that have been visited.
// However, the walk operation fails with the first error it encountered, so any
// error returned from an after hook is effectively ignored.
func WithAfter(fn func(Node) error) WalkOption {
return func(options *walkOptions) {
options.after = fn
}
}
func walk(root Node, v Visitor, opts walkOptions) (err error) {
if opts.before != nil {
if err := opts.before(root); err != nil {
return err
}
}
if opts.after != nil {
defer func() {
if afterErr := opts.after(root); afterErr != nil {
// if another call already returned an error then we
// have to ignore the error from the after hook
if err == nil {
err = afterErr
}
}
}()
}
if err := Visit(root, v); err != nil {
return err
}
if comp, ok := root.(CompositeNode); ok {
for _, child := range comp.Children() {
if err := walk(child, v, opts); err != nil {
return err
}
}
}
return nil
}
// Visit implements the double-dispatch idiom and visits the given node by
// calling the appropriate method of the given visitor.
func Visit(n Node, v Visitor) error {
switch n := n.(type) {
case *FileNode:
return v.VisitFileNode(n)
case *SyntaxNode:
return v.VisitSyntaxNode(n)
case *EditionNode:
return v.VisitEditionNode(n)
case *PackageNode:
return v.VisitPackageNode(n)
case *ImportNode:
return v.VisitImportNode(n)
case *OptionNode:
return v.VisitOptionNode(n)
case *OptionNameNode:
return v.VisitOptionNameNode(n)
case *FieldReferenceNode:
return v.VisitFieldReferenceNode(n)
case *CompactOptionsNode:
return v.VisitCompactOptionsNode(n)
case *MessageNode:
return v.VisitMessageNode(n)
case *ExtendNode:
return v.VisitExtendNode(n)
case *ExtensionRangeNode:
return v.VisitExtensionRangeNode(n)
case *ReservedNode:
return v.VisitReservedNode(n)
case *RangeNode:
return v.VisitRangeNode(n)
case *FieldNode:
return v.VisitFieldNode(n)
case *GroupNode:
return v.VisitGroupNode(n)
case *MapFieldNode:
return v.VisitMapFieldNode(n)
case *MapTypeNode:
return v.VisitMapTypeNode(n)
case *OneofNode:
return v.VisitOneofNode(n)
case *EnumNode:
return v.VisitEnumNode(n)
case *EnumValueNode:
return v.VisitEnumValueNode(n)
case *ServiceNode:
return v.VisitServiceNode(n)
case *RPCNode:
return v.VisitRPCNode(n)
case *RPCTypeNode:
return v.VisitRPCTypeNode(n)
case *IdentNode:
return v.VisitIdentNode(n)
case *CompoundIdentNode:
return v.VisitCompoundIdentNode(n)
case *StringLiteralNode:
return v.VisitStringLiteralNode(n)
case *CompoundStringLiteralNode:
return v.VisitCompoundStringLiteralNode(n)
case *UintLiteralNode:
return v.VisitUintLiteralNode(n)
case *NegativeIntLiteralNode:
return v.VisitNegativeIntLiteralNode(n)
case *FloatLiteralNode:
return v.VisitFloatLiteralNode(n)
case *SpecialFloatLiteralNode:
return v.VisitSpecialFloatLiteralNode(n)
case *SignedFloatLiteralNode:
return v.VisitSignedFloatLiteralNode(n)
case *ArrayLiteralNode:
return v.VisitArrayLiteralNode(n)
case *MessageLiteralNode:
return v.VisitMessageLiteralNode(n)
case *MessageFieldNode:
return v.VisitMessageFieldNode(n)
case *KeywordNode:
return v.VisitKeywordNode(n)
case *RuneNode:
return v.VisitRuneNode(n)
case *EmptyDeclNode:
return v.VisitEmptyDeclNode(n)
default:
panic(fmt.Sprintf("unexpected type of node: %T", n))
}
}
// AncestorTracker is used to track the path of nodes during a walk operation.
// By passing AsWalkOptions to a call to Walk, a visitor can inspect the path to
// the node being visited using this tracker.
type AncestorTracker struct {
ancestors []Node
}
// AsWalkOptions returns WalkOption values that will cause this ancestor tracker
// to track the path through the AST during the walk operation.
func (t *AncestorTracker) AsWalkOptions() []WalkOption {
return []WalkOption{
WithBefore(func(n Node) error {
t.ancestors = append(t.ancestors, n)
return nil
}),
WithAfter(func(_ Node) error {
t.ancestors = t.ancestors[:len(t.ancestors)-1]
return nil
}),
}
}
// Path returns a slice of nodes that represents the path from the root of the
// walk operaiton to the currently visited node. The first element in the path
// is the root supplied to Walk. The last element in the path is the currently
// visited node.
//
// The returned slice is not a defensive copy; so callers should NOT mutate it.
func (t *AncestorTracker) Path() []Node {
return t.ancestors
}
// Parent returns the parent node of the currently visited node. If the node
// currently being visited is the root supplied to Walk then nil is returned.
func (t *AncestorTracker) Parent() Node {
if len(t.ancestors) <= 1 {
return nil
}
return t.ancestors[len(t.ancestors)-2]
}
// VisitChildren visits all direct children of the given node using the given
// visitor. If visiting a child returns an error, that error is immediately
// returned, and other children will not be visited.
func VisitChildren(n CompositeNode, v Visitor) error {
for _, ch := range n.Children() {
if err := Visit(ch, v); err != nil {
return err
}
}
return nil
}
// Visitor provides a technique for walking the AST that allows for
// dynamic dispatch, where a particular function is invoked based on
// the runtime type of the argument.
//
// It consists of a number of functions, each of which matches a
// concrete Node type.
//
// NOTE: As the language evolves, new methods may be added to this
// interface to correspond to new grammar elements. That is why it
// cannot be directly implemented outside this package. Visitor
// implementations must embed NoOpVisitor and then implement the
// subset of methods of interest. If such an implementation is used
// with an AST that has newer elements, the visitor will not do
// anything in response to the new node types.
//
// An alternative to embedding NoOpVisitor is to use an instance of
// SimpleVisitor.
//
// Visitors can be supplied to a Walk operation or passed to a call
// to Visit or VisitChildren.
//
// Note that there are some AST node types defined in this package
// that do not have corresponding visit methods. These are synthetic
// node types, that have specialized use from the parser, but never
// appear in an actual AST (which is always rooted at FileNode).
// These include SyntheticMapField, SyntheticOneof,
// SyntheticGroupMessageNode, and SyntheticMapEntryNode.
type Visitor interface {
// VisitFileNode is invoked when visiting a *FileNode in the AST.
VisitFileNode(*FileNode) error
// VisitSyntaxNode is invoked when visiting a *SyntaxNode in the AST.
VisitSyntaxNode(*SyntaxNode) error
// VisitEditionNode is invoked when visiting an *EditionNode in the AST.
VisitEditionNode(*EditionNode) error
// VisitPackageNode is invoked when visiting a *PackageNode in the AST.
VisitPackageNode(*PackageNode) error
// VisitImportNode is invoked when visiting an *ImportNode in the AST.
VisitImportNode(*ImportNode) error
// VisitOptionNode is invoked when visiting an *OptionNode in the AST.
VisitOptionNode(*OptionNode) error
// VisitOptionNameNode is invoked when visiting an *OptionNameNode in the AST.
VisitOptionNameNode(*OptionNameNode) error
// VisitFieldReferenceNode is invoked when visiting a *FieldReferenceNode in the AST.
VisitFieldReferenceNode(*FieldReferenceNode) error
// VisitCompactOptionsNode is invoked when visiting a *CompactOptionsNode in the AST.
VisitCompactOptionsNode(*CompactOptionsNode) error
// VisitMessageNode is invoked when visiting a *MessageNode in the AST.
VisitMessageNode(*MessageNode) error
// VisitExtendNode is invoked when visiting an *ExtendNode in the AST.
VisitExtendNode(*ExtendNode) error
// VisitExtensionRangeNode is invoked when visiting an *ExtensionRangeNode in the AST.
VisitExtensionRangeNode(*ExtensionRangeNode) error
// VisitReservedNode is invoked when visiting a *ReservedNode in the AST.
VisitReservedNode(*ReservedNode) error
// VisitRangeNode is invoked when visiting a *RangeNode in the AST.
VisitRangeNode(*RangeNode) error
// VisitFieldNode is invoked when visiting a *FieldNode in the AST.
VisitFieldNode(*FieldNode) error
// VisitGroupNode is invoked when visiting a *GroupNode in the AST.
VisitGroupNode(*GroupNode) error
// VisitMapFieldNode is invoked when visiting a *MapFieldNode in the AST.
VisitMapFieldNode(*MapFieldNode) error
// VisitMapTypeNode is invoked when visiting a *MapTypeNode in the AST.
VisitMapTypeNode(*MapTypeNode) error
// VisitOneofNode is invoked when visiting a *OneofNode in the AST.
VisitOneofNode(*OneofNode) error
// VisitEnumNode is invoked when visiting an *EnumNode in the AST.
VisitEnumNode(*EnumNode) error
// VisitEnumValueNode is invoked when visiting an *EnumValueNode in the AST.
VisitEnumValueNode(*EnumValueNode) error
// VisitServiceNode is invoked when visiting a *ServiceNode in the AST.
VisitServiceNode(*ServiceNode) error
// VisitRPCNode is invoked when visiting an *RPCNode in the AST.
VisitRPCNode(*RPCNode) error
// VisitRPCTypeNode is invoked when visiting an *RPCTypeNode in the AST.
VisitRPCTypeNode(*RPCTypeNode) error
// VisitIdentNode is invoked when visiting an *IdentNode in the AST.
VisitIdentNode(*IdentNode) error
// VisitCompoundIdentNode is invoked when visiting a *CompoundIdentNode in the AST.
VisitCompoundIdentNode(*CompoundIdentNode) error
// VisitStringLiteralNode is invoked when visiting a *StringLiteralNode in the AST.
VisitStringLiteralNode(*StringLiteralNode) error
// VisitCompoundStringLiteralNode is invoked when visiting a *CompoundStringLiteralNode in the AST.
VisitCompoundStringLiteralNode(*CompoundStringLiteralNode) error
// VisitUintLiteralNode is invoked when visiting a *UintLiteralNode in the AST.
VisitUintLiteralNode(*UintLiteralNode) error
// VisitNegativeIntLiteralNode is invoked when visiting a *NegativeIntLiteralNode in the AST.
VisitNegativeIntLiteralNode(*NegativeIntLiteralNode) error
// VisitFloatLiteralNode is invoked when visiting a *FloatLiteralNode in the AST.
VisitFloatLiteralNode(*FloatLiteralNode) error
// VisitSpecialFloatLiteralNode is invoked when visiting a *SpecialFloatLiteralNode in the AST.
VisitSpecialFloatLiteralNode(*SpecialFloatLiteralNode) error
// VisitSignedFloatLiteralNode is invoked when visiting a *SignedFloatLiteralNode in the AST.
VisitSignedFloatLiteralNode(*SignedFloatLiteralNode) error
// VisitArrayLiteralNode is invoked when visiting an *ArrayLiteralNode in the AST.
VisitArrayLiteralNode(*ArrayLiteralNode) error
// VisitMessageLiteralNode is invoked when visiting a *MessageLiteralNode in the AST.
VisitMessageLiteralNode(*MessageLiteralNode) error
// VisitMessageFieldNode is invoked when visiting a *MessageFieldNode in the AST.
VisitMessageFieldNode(*MessageFieldNode) error
// VisitKeywordNode is invoked when visiting a *KeywordNode in the AST.
VisitKeywordNode(*KeywordNode) error
// VisitRuneNode is invoked when visiting a *RuneNode in the AST.
VisitRuneNode(*RuneNode) error
// VisitEmptyDeclNode is invoked when visiting a *EmptyDeclNode in the AST.
VisitEmptyDeclNode(*EmptyDeclNode) error
// Unexported method prevents callers from directly implementing.
isVisitor()
}
// NoOpVisitor is a visitor implementation that does nothing. All methods
// unconditionally return nil. This can be embedded into a struct to make that
// struct implement the Visitor interface, and only the relevant visit methods
// then need to be implemented on the struct.
type NoOpVisitor struct{}
var _ Visitor = NoOpVisitor{}
func (n NoOpVisitor) isVisitor() {}
func (n NoOpVisitor) VisitFileNode(_ *FileNode) error {
return nil
}
func (n NoOpVisitor) VisitSyntaxNode(_ *SyntaxNode) error {
return nil
}
func (n NoOpVisitor) VisitEditionNode(_ *EditionNode) error {
return nil
}
func (n NoOpVisitor) VisitPackageNode(_ *PackageNode) error {
return nil
}
func (n NoOpVisitor) VisitImportNode(_ *ImportNode) error {
return nil
}
func (n NoOpVisitor) VisitOptionNode(_ *OptionNode) error {
return nil
}
func (n NoOpVisitor) VisitOptionNameNode(_ *OptionNameNode) error {
return nil
}
func (n NoOpVisitor) VisitFieldReferenceNode(_ *FieldReferenceNode) error {
return nil
}
func (n NoOpVisitor) VisitCompactOptionsNode(_ *CompactOptionsNode) error {
return nil
}
func (n NoOpVisitor) VisitMessageNode(_ *MessageNode) error {
return nil
}
func (n NoOpVisitor) VisitExtendNode(_ *ExtendNode) error {
return nil
}
func (n NoOpVisitor) VisitExtensionRangeNode(_ *ExtensionRangeNode) error {
return nil
}
func (n NoOpVisitor) VisitReservedNode(_ *ReservedNode) error {
return nil
}
func (n NoOpVisitor) VisitRangeNode(_ *RangeNode) error {
return nil
}
func (n NoOpVisitor) VisitFieldNode(_ *FieldNode) error {
return nil
}
func (n NoOpVisitor) VisitGroupNode(_ *GroupNode) error {
return nil
}
func (n NoOpVisitor) VisitMapFieldNode(_ *MapFieldNode) error {
return nil
}
func (n NoOpVisitor) VisitMapTypeNode(_ *MapTypeNode) error {
return nil
}
func (n NoOpVisitor) VisitOneofNode(_ *OneofNode) error {
return nil
}
func (n NoOpVisitor) VisitEnumNode(_ *EnumNode) error {
return nil
}
func (n NoOpVisitor) VisitEnumValueNode(_ *EnumValueNode) error {
return nil
}
func (n NoOpVisitor) VisitServiceNode(_ *ServiceNode) error {
return nil
}
func (n NoOpVisitor) VisitRPCNode(_ *RPCNode) error {
return nil
}
func (n NoOpVisitor) VisitRPCTypeNode(_ *RPCTypeNode) error {
return nil
}
func (n NoOpVisitor) VisitIdentNode(_ *IdentNode) error {
return nil
}
func (n NoOpVisitor) VisitCompoundIdentNode(_ *CompoundIdentNode) error {
return nil
}
func (n NoOpVisitor) VisitStringLiteralNode(_ *StringLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitCompoundStringLiteralNode(_ *CompoundStringLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitUintLiteralNode(_ *UintLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitNegativeIntLiteralNode(_ *NegativeIntLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitFloatLiteralNode(_ *FloatLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitSpecialFloatLiteralNode(_ *SpecialFloatLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitSignedFloatLiteralNode(_ *SignedFloatLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitArrayLiteralNode(_ *ArrayLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitMessageLiteralNode(_ *MessageLiteralNode) error {
return nil
}
func (n NoOpVisitor) VisitMessageFieldNode(_ *MessageFieldNode) error {
return nil
}
func (n NoOpVisitor) VisitKeywordNode(_ *KeywordNode) error {
return nil
}
func (n NoOpVisitor) VisitRuneNode(_ *RuneNode) error {
return nil
}
func (n NoOpVisitor) VisitEmptyDeclNode(_ *EmptyDeclNode) error {
return nil
}
// SimpleVisitor is a visitor implementation that uses numerous function fields.
// If a relevant function field is not nil, then it will be invoked when a node
// is visited.
//
// In addition to a function for each concrete node type (and thus for each
// Visit* method of the Visitor interface), it also has function fields that
// accept interface types. So a visitor can, for example, easily treat all
// ValueNodes uniformly by providing a non-nil value for DoVisitValueNode
// instead of having to supply values for the various DoVisit*Node methods
// corresponding to all types that implement ValueNode.
//
// The most specific function provided that matches a given node is the one that
// will be invoked. For example, DoVisitStringValueNode will be called if
// present and applicable before DoVisitValueNode. Similarly, DoVisitValueNode
// would be called before DoVisitTerminalNode or DoVisitCompositeNode. The
// DoVisitNode is the most generic function and is called only if no more
// specific function is present for a given node type.
//
// The *UintLiteralNode type implements both IntValueNode and FloatValueNode.
// In this case, the DoVisitIntValueNode function is considered more specific
// than DoVisitFloatValueNode, so will be preferred if present.
//
// Similarly, *MapFieldNode and *GroupNode implement both FieldDeclNode and
// MessageDeclNode. In this case, the DoVisitFieldDeclNode function is
// treated as more specific than DoVisitMessageDeclNode, so will be preferred
// if both are present.
type SimpleVisitor struct {
DoVisitFileNode func(*FileNode) error
DoVisitSyntaxNode func(*SyntaxNode) error
DoVisitEditionNode func(*EditionNode) error
DoVisitPackageNode func(*PackageNode) error
DoVisitImportNode func(*ImportNode) error
DoVisitOptionNode func(*OptionNode) error
DoVisitOptionNameNode func(*OptionNameNode) error
DoVisitFieldReferenceNode func(*FieldReferenceNode) error
DoVisitCompactOptionsNode func(*CompactOptionsNode) error
DoVisitMessageNode func(*MessageNode) error
DoVisitExtendNode func(*ExtendNode) error
DoVisitExtensionRangeNode func(*ExtensionRangeNode) error
DoVisitReservedNode func(*ReservedNode) error
DoVisitRangeNode func(*RangeNode) error
DoVisitFieldNode func(*FieldNode) error
DoVisitGroupNode func(*GroupNode) error
DoVisitMapFieldNode func(*MapFieldNode) error
DoVisitMapTypeNode func(*MapTypeNode) error
DoVisitOneofNode func(*OneofNode) error
DoVisitEnumNode func(*EnumNode) error
DoVisitEnumValueNode func(*EnumValueNode) error
DoVisitServiceNode func(*ServiceNode) error
DoVisitRPCNode func(*RPCNode) error
DoVisitRPCTypeNode func(*RPCTypeNode) error
DoVisitIdentNode func(*IdentNode) error
DoVisitCompoundIdentNode func(*CompoundIdentNode) error
DoVisitStringLiteralNode func(*StringLiteralNode) error
DoVisitCompoundStringLiteralNode func(*CompoundStringLiteralNode) error
DoVisitUintLiteralNode func(*UintLiteralNode) error
DoVisitNegativeIntLiteralNode func(*NegativeIntLiteralNode) error
DoVisitFloatLiteralNode func(*FloatLiteralNode) error
DoVisitSpecialFloatLiteralNode func(*SpecialFloatLiteralNode) error
DoVisitSignedFloatLiteralNode func(*SignedFloatLiteralNode) error
DoVisitArrayLiteralNode func(*ArrayLiteralNode) error
DoVisitMessageLiteralNode func(*MessageLiteralNode) error
DoVisitMessageFieldNode func(*MessageFieldNode) error
DoVisitKeywordNode func(*KeywordNode) error
DoVisitRuneNode func(*RuneNode) error
DoVisitEmptyDeclNode func(*EmptyDeclNode) error
DoVisitFieldDeclNode func(FieldDeclNode) error
DoVisitMessageDeclNode func(MessageDeclNode) error
DoVisitIdentValueNode func(IdentValueNode) error
DoVisitStringValueNode func(StringValueNode) error
DoVisitIntValueNode func(IntValueNode) error
DoVisitFloatValueNode func(FloatValueNode) error
DoVisitValueNode func(ValueNode) error
DoVisitTerminalNode func(TerminalNode) error
DoVisitCompositeNode func(CompositeNode) error
DoVisitNode func(Node) error
}
var _ Visitor = (*SimpleVisitor)(nil)
func (v *SimpleVisitor) isVisitor() {}
func (v *SimpleVisitor) visitInterface(node Node) error {
switch n := node.(type) {
case FieldDeclNode:
if v.DoVisitFieldDeclNode != nil {
return v.DoVisitFieldDeclNode(n)
}
// *MapFieldNode and *GroupNode both implement both FieldDeclNode and
// MessageDeclNode, so handle other case here
if fn, ok := n.(MessageDeclNode); ok && v.DoVisitMessageDeclNode != nil {
return v.DoVisitMessageDeclNode(fn)
}
case MessageDeclNode:
if v.DoVisitMessageDeclNode != nil {
return v.DoVisitMessageDeclNode(n)
}
case IdentValueNode:
if v.DoVisitIdentValueNode != nil {
return v.DoVisitIdentValueNode(n)
}
case StringValueNode:
if v.DoVisitStringValueNode != nil {
return v.DoVisitStringValueNode(n)
}
case IntValueNode:
if v.DoVisitIntValueNode != nil {
return v.DoVisitIntValueNode(n)
}
// *UintLiteralNode implements both IntValueNode and FloatValueNode,
// so handle other case here
if fn, ok := n.(FloatValueNode); ok && v.DoVisitFloatValueNode != nil {
return v.DoVisitFloatValueNode(fn)
}
case FloatValueNode:
if v.DoVisitFloatValueNode != nil {
return v.DoVisitFloatValueNode(n)
}
}
if n, ok := node.(ValueNode); ok && v.DoVisitValueNode != nil {
return v.DoVisitValueNode(n)
}
switch n := node.(type) {
case TerminalNode:
if v.DoVisitTerminalNode != nil {
return v.DoVisitTerminalNode(n)
}
case CompositeNode:
if v.DoVisitCompositeNode != nil {
return v.DoVisitCompositeNode(n)
}
}
if v.DoVisitNode != nil {
return v.DoVisitNode(node)
}
return nil
}
func (v *SimpleVisitor) VisitFileNode(node *FileNode) error {
if v.DoVisitFileNode != nil {
return v.DoVisitFileNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitSyntaxNode(node *SyntaxNode) error {
if v.DoVisitSyntaxNode != nil {
return v.DoVisitSyntaxNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitEditionNode(node *EditionNode) error {
if v.DoVisitEditionNode != nil {
return v.DoVisitEditionNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitPackageNode(node *PackageNode) error {
if v.DoVisitPackageNode != nil {
return v.DoVisitPackageNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitImportNode(node *ImportNode) error {
if v.DoVisitImportNode != nil {
return v.DoVisitImportNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitOptionNode(node *OptionNode) error {
if v.DoVisitOptionNode != nil {
return v.DoVisitOptionNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitOptionNameNode(node *OptionNameNode) error {
if v.DoVisitOptionNameNode != nil {
return v.DoVisitOptionNameNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitFieldReferenceNode(node *FieldReferenceNode) error {
if v.DoVisitFieldReferenceNode != nil {
return v.DoVisitFieldReferenceNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitCompactOptionsNode(node *CompactOptionsNode) error {
if v.DoVisitCompactOptionsNode != nil {
return v.DoVisitCompactOptionsNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitMessageNode(node *MessageNode) error {
if v.DoVisitMessageNode != nil {
return v.DoVisitMessageNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitExtendNode(node *ExtendNode) error {
if v.DoVisitExtendNode != nil {
return v.DoVisitExtendNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitExtensionRangeNode(node *ExtensionRangeNode) error {
if v.DoVisitExtensionRangeNode != nil {
return v.DoVisitExtensionRangeNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitReservedNode(node *ReservedNode) error {
if v.DoVisitReservedNode != nil {
return v.DoVisitReservedNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitRangeNode(node *RangeNode) error {
if v.DoVisitRangeNode != nil {
return v.DoVisitRangeNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitFieldNode(node *FieldNode) error {
if v.DoVisitFieldNode != nil {
return v.DoVisitFieldNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitGroupNode(node *GroupNode) error {
if v.DoVisitGroupNode != nil {
return v.DoVisitGroupNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitMapFieldNode(node *MapFieldNode) error {
if v.DoVisitMapFieldNode != nil {
return v.DoVisitMapFieldNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitMapTypeNode(node *MapTypeNode) error {
if v.DoVisitMapTypeNode != nil {
return v.DoVisitMapTypeNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitOneofNode(node *OneofNode) error {
if v.DoVisitOneofNode != nil {
return v.DoVisitOneofNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitEnumNode(node *EnumNode) error {
if v.DoVisitEnumNode != nil {
return v.DoVisitEnumNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitEnumValueNode(node *EnumValueNode) error {
if v.DoVisitEnumValueNode != nil {
return v.DoVisitEnumValueNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitServiceNode(node *ServiceNode) error {
if v.DoVisitServiceNode != nil {
return v.DoVisitServiceNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitRPCNode(node *RPCNode) error {
if v.DoVisitRPCNode != nil {
return v.DoVisitRPCNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitRPCTypeNode(node *RPCTypeNode) error {
if v.DoVisitRPCTypeNode != nil {
return v.DoVisitRPCTypeNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitIdentNode(node *IdentNode) error {
if v.DoVisitIdentNode != nil {
return v.DoVisitIdentNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitCompoundIdentNode(node *CompoundIdentNode) error {
if v.DoVisitCompoundIdentNode != nil {
return v.DoVisitCompoundIdentNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitStringLiteralNode(node *StringLiteralNode) error {
if v.DoVisitStringLiteralNode != nil {
return v.DoVisitStringLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitCompoundStringLiteralNode(node *CompoundStringLiteralNode) error {
if v.DoVisitCompoundStringLiteralNode != nil {
return v.DoVisitCompoundStringLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitUintLiteralNode(node *UintLiteralNode) error {
if v.DoVisitUintLiteralNode != nil {
return v.DoVisitUintLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitNegativeIntLiteralNode(node *NegativeIntLiteralNode) error {
if v.DoVisitNegativeIntLiteralNode != nil {
return v.DoVisitNegativeIntLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitFloatLiteralNode(node *FloatLiteralNode) error {
if v.DoVisitFloatLiteralNode != nil {
return v.DoVisitFloatLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitSpecialFloatLiteralNode(node *SpecialFloatLiteralNode) error {
if v.DoVisitSpecialFloatLiteralNode != nil {
return v.DoVisitSpecialFloatLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitSignedFloatLiteralNode(node *SignedFloatLiteralNode) error {
if v.DoVisitSignedFloatLiteralNode != nil {
return v.DoVisitSignedFloatLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitArrayLiteralNode(node *ArrayLiteralNode) error {
if v.DoVisitArrayLiteralNode != nil {
return v.DoVisitArrayLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitMessageLiteralNode(node *MessageLiteralNode) error {
if v.DoVisitMessageLiteralNode != nil {
return v.DoVisitMessageLiteralNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitMessageFieldNode(node *MessageFieldNode) error {
if v.DoVisitMessageFieldNode != nil {
return v.DoVisitMessageFieldNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitKeywordNode(node *KeywordNode) error {
if v.DoVisitKeywordNode != nil {
return v.DoVisitKeywordNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitRuneNode(node *RuneNode) error {
if v.DoVisitRuneNode != nil {
return v.DoVisitRuneNode(node)
}
return v.visitInterface(node)
}
func (v *SimpleVisitor) VisitEmptyDeclNode(node *EmptyDeclNode) error {
if v.DoVisitEmptyDeclNode != nil {
return v.DoVisitEmptyDeclNode(node)
}
return v.visitInterface(node)
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocompile
import (
"bytes"
"context"
"errors"
"fmt"
"io"
"runtime"
"runtime/debug"
"strings"
"sync"
"golang.org/x/sync/semaphore"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/linker"
"github.com/bufbuild/protocompile/options"
"github.com/bufbuild/protocompile/parser"
"github.com/bufbuild/protocompile/reporter"
"github.com/bufbuild/protocompile/sourceinfo"
)
// Compiler handles compilation tasks, to turn protobuf source files, or other
// intermediate representations, into fully linked descriptors.
//
// The compilation process involves five steps for each protobuf source file:
// 1. Parsing the source into an AST (abstract syntax tree).
// 2. Converting the AST into descriptor protos.
// 3. Linking descriptor protos into fully linked descriptors.
// 4. Interpreting options.
// 5. Computing source code information.
//
// With fully linked descriptors, code generators and protoc plugins could be
// invoked (though that step is not implemented by this package and not a
// responsibility of this type).
type Compiler struct {
// Resolves path/file names into source code or intermediate representations
// for protobuf source files. This is how the compiler loads the files to
// be compiled as well as all dependencies. This field is the only required
// field.
Resolver Resolver
// The maximum parallelism to use when compiling. If unspecified or set to
// a non-positive value, then min(runtime.NumCPU(), runtime.GOMAXPROCS(-1))
// will be used.
MaxParallelism int
// A custom error and warning reporter. If unspecified a default reporter
// is used. A default reporter fails the compilation after encountering any
// errors and ignores all warnings.
Reporter reporter.Reporter
// If unspecified or set to SourceInfoNone, source code information will not
// be included in the resulting descriptors. Source code information is
// metadata in the file descriptor that provides position information (i.e.
// the line and column where file elements were defined) as well as comments.
//
// If set to SourceInfoStandard, normal source code information will be
// included in the resulting descriptors. This matches the output of protoc
// (the reference compiler for Protocol Buffers). If set to
// SourceInfoMoreComments, the resulting descriptor will attempt to preserve
// as many comments as possible, for all elements in the file, not just for
// complete declarations.
//
// If Resolver returns descriptors or descriptor protos for a file, then
// those descriptors will not be modified. If they do not already include
// source code info, they will be left that way when the compile operation
// concludes. Similarly, if they already have source code info but this flag
// is false, existing info will be left in place.
SourceInfoMode SourceInfoMode
// If true, ASTs are retained in compilation results for which an AST was
// constructed. So any linker.Result value in the resulting compiled files
// will have an AST, in addition to descriptors. If left false, the AST
// will be removed as soon as it's no longer needed. This can help reduce
// total memory usage for operations involving a large number of files.
RetainASTs bool
// If non-nil, the set of symbols already known. Any symbols in the current
// compilation will be added to it. If the compilation tries to redefine any
// of these symbols, it will be reported as a collision.
//
// This allows a large compilation to be split up into multiple, smaller
// operations and still be able to identify naming collisions and extension
// number collisions across all operations.
Symbols *linker.Symbols
}
// SourceInfoMode indicates how source code info is generated by a Compiler.
type SourceInfoMode int
const (
// SourceInfoNone indicates that no source code info is generated.
SourceInfoNone = SourceInfoMode(0)
// SourceInfoStandard indicates that the standard source code info is
// generated, which includes comments only for complete declarations.
SourceInfoStandard = SourceInfoMode(1)
// SourceInfoExtraComments indicates that source code info is generated
// and will include comments for all elements (more comments than would
// be found in a descriptor produced by protoc).
SourceInfoExtraComments = SourceInfoMode(2)
// SourceInfoExtraOptionLocations indicates that source code info is
// generated with additional locations for elements inside of message
// literals in option values. This can be combined with the above by
// bitwise-OR'ing it with SourceInfoExtraComments.
SourceInfoExtraOptionLocations = SourceInfoMode(4)
)
// Compile compiles the given file names into fully-linked descriptors. The
// compiler's resolver is used to locate source code (or intermediate artifacts
// such as parsed ASTs or descriptor protos) and then do what is necessary to
// transform that into descriptors (parsing, linking, etc).
//
// Elements in the given returned files will implement [linker.Result] if the
// compiler had to link it (i.e. the resolver provided either a descriptor proto
// or source code). That result will contain a full AST for the file if the
// compiler had to parse it (i.e. the resolver provided source code for that
// file).
func (c *Compiler) Compile(ctx context.Context, files ...string) (linker.Files, error) {
if len(files) == 0 {
return nil, nil
}
ctx, cancel := context.WithCancel(ctx)
defer cancel()
par := c.MaxParallelism
if par <= 0 {
par = runtime.GOMAXPROCS(-1)
cpus := runtime.NumCPU()
if par > cpus {
par = cpus
}
}
h := reporter.NewHandler(c.Reporter)
sym := c.Symbols
if sym == nil {
sym = &linker.Symbols{}
}
e := executor{
c: c,
h: h,
s: semaphore.NewWeighted(int64(par)),
cancel: cancel,
sym: sym,
results: map[string]*result{},
}
// We lock now and create all tasks under lock to make sure that no
// async task can create a duplicate result. For example, if files
// contains both "foo.proto" and "bar.proto", then there is a race
// after we start compiling "foo.proto" between this loop and the
// async compilation task to create the result for "bar.proto". But
// we need to know if the file is directly requested for compilation,
// so we need this loop to define the result. So this loop holds the
// lock the whole time so async tasks can't create a result first.
results := make([]*result, len(files))
func() {
e.mu.Lock()
defer e.mu.Unlock()
for i, f := range files {
results[i] = e.compileLocked(ctx, f, true)
}
}()
descs := make([]linker.File, len(files))
var firstError error
for i, r := range results {
select {
case <-r.ready:
case <-ctx.Done():
return nil, ctx.Err()
}
if r.err != nil {
if firstError == nil {
firstError = r.err
}
}
descs[i] = r.res
}
if err := h.Error(); err != nil {
return descs, err
}
// this should probably never happen; if any task returned an
// error, h.Error() should be non-nil
return descs, firstError
}
type result struct {
name string
ready chan struct{}
// true if this file was explicitly provided to the compiler; otherwise
// this file is an import that is implicitly included
explicitFile bool
// produces a linker.File or error, only available when ready is closed
res linker.File
err error
mu sync.Mutex
// the results that are dependencies of this result; this result is
// blocked, waiting on these dependencies to complete
blockedOn []string
}
func (r *result) fail(err error) {
r.err = err
close(r.ready)
}
func (r *result) complete(f linker.File) {
r.res = f
close(r.ready)
}
func (r *result) setBlockedOn(deps []string) {
r.mu.Lock()
defer r.mu.Unlock()
r.blockedOn = deps
}
func (r *result) getBlockedOn() []string {
r.mu.Lock()
defer r.mu.Unlock()
return r.blockedOn
}
type executor struct {
c *Compiler
h *reporter.Handler
s *semaphore.Weighted
cancel context.CancelFunc
sym *linker.Symbols
descriptorProtoCheck sync.Once
descriptorProtoIsCustom bool
mu sync.Mutex
results map[string]*result
}
func (e *executor) compile(ctx context.Context, file string) *result {
e.mu.Lock()
defer e.mu.Unlock()
return e.compileLocked(ctx, file, false)
}
func (e *executor) compileLocked(ctx context.Context, file string, explicitFile bool) *result {
r := e.results[file]
if r != nil {
return r
}
r = &result{
name: file,
ready: make(chan struct{}),
explicitFile: explicitFile,
}
e.results[file] = r
go func() {
defer func() {
if p := recover(); p != nil {
if r.err == nil {
// TODO: strip top frames from stack trace so that the panic is
// the top of the trace?
panicErr := PanicError{File: file, Value: p, Stack: string(debug.Stack())}
r.fail(panicErr)
}
// TODO: if r.err != nil, then this task has already
// failed and there's nothing we can really do to
// communicate this panic to parent goroutine. This
// means the panic must have happened *after* the
// failure was already recorded (or during?)
// It would be nice to do something else here, like
// send the compiler an out-of-band error? Or log?
}
}()
e.doCompile(ctx, file, r)
}()
return r
}
// PanicError is an error value that represents a recovered panic. It includes
// the value returned by recover() as well as the stack trace.
//
// This should generally only be seen if a Resolver implementation panics.
//
// An error returned by a Compiler may wrap a PanicError, so you may need to
// use errors.As(...) to access panic details.
type PanicError struct {
// The file that was being processed when the panic occurred
File string
// The value returned by recover()
Value any
// A formatted stack trace
Stack string
}
// Error implements the error interface. It does NOT include the stack trace.
// Use a type assertion and query the Stack field directly to access that.
func (p PanicError) Error() string {
return fmt.Sprintf("panic handling %q: %v", p.File, p.Value)
}
type errFailedToResolve struct {
err error
path string
}
func (e errFailedToResolve) Error() string {
errMsg := e.err.Error()
if strings.Contains(errMsg, e.path) {
// underlying error already refers to path in question, so we don't need to add more context
return errMsg
}
return fmt.Sprintf("could not resolve path %q: %s", e.path, e.err.Error())
}
func (e errFailedToResolve) Unwrap() error {
return e.err
}
func (e *executor) hasOverrideDescriptorProto() bool {
e.descriptorProtoCheck.Do(func() {
defer func() {
// ignore a panic here; just assume no custom descriptor.proto
_ = recover()
}()
res, err := e.c.Resolver.FindFileByPath(descriptorProtoPath)
e.descriptorProtoIsCustom = err == nil && res.Desc != standardImports[descriptorProtoPath]
})
return e.descriptorProtoIsCustom
}
func (e *executor) doCompile(ctx context.Context, file string, r *result) {
t := task{e: e, h: e.h.SubHandler(), r: r}
if err := e.s.Acquire(ctx, 1); err != nil {
r.fail(err)
return
}
defer t.release()
sr, err := e.c.Resolver.FindFileByPath(file)
if err != nil {
r.fail(errFailedToResolve{err: err, path: file})
return
}
defer func() {
// if results included a result, don't leave it open if it can be closed
if sr.Source == nil {
return
}
if c, ok := sr.Source.(io.Closer); ok {
_ = c.Close()
}
}()
desc, err := t.asFile(ctx, file, sr)
if err != nil {
r.fail(err)
return
}
r.complete(desc)
}
// A compilation task. The executor has a semaphore that limits the number
// of concurrent, running tasks.
type task struct {
e *executor
// handler for this task
h *reporter.Handler
// If true, this task needs to acquire a semaphore permit before running.
// If false, this task needs to release its semaphore permit on completion.
released bool
// the result that is populated by this task
r *result
}
func (t *task) release() {
if !t.released {
t.e.s.Release(1)
t.released = true
}
}
const descriptorProtoPath = "google/protobuf/descriptor.proto"
func (t *task) asFile(ctx context.Context, name string, r SearchResult) (linker.File, error) {
if r.Desc != nil {
if r.Desc.Path() != name {
return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.Desc.Path())
}
return linker.NewFileRecursive(r.Desc)
}
parseRes, err := t.asParseResult(name, r)
if err != nil {
return nil, err
}
if linkRes, ok := parseRes.(linker.Result); ok {
// if resolver returned a parse result that was actually a link result,
// use the link result directly (no other steps needed)
return linkRes, nil
}
var deps []linker.File
fileDescriptorProto := parseRes.FileDescriptorProto()
var wantsDescriptorProto bool
imports := fileDescriptorProto.Dependency
if t.e.hasOverrideDescriptorProto() {
// we only consider implicitly including descriptor.proto if it's overridden
if name != descriptorProtoPath {
var includesDescriptorProto bool
for _, dep := range fileDescriptorProto.Dependency {
if dep == descriptorProtoPath {
includesDescriptorProto = true
break
}
}
if !includesDescriptorProto {
wantsDescriptorProto = true
// make a defensive copy so we don't inadvertently mutate
// slice's backing array when adding this implicit dep
importsCopy := make([]string, len(imports)+1)
copy(importsCopy, imports)
importsCopy[len(imports)] = descriptorProtoPath
imports = importsCopy
}
}
}
var overrideDescriptorProto linker.File
if len(imports) > 0 {
t.r.setBlockedOn(imports)
results := make([]*result, len(fileDescriptorProto.Dependency))
checked := map[string]struct{}{}
for i, dep := range fileDescriptorProto.Dependency {
span := findImportSpan(parseRes, dep)
if name == dep {
// doh! file imports itself
handleImportCycle(t.h, span, []string{name}, dep)
return nil, t.h.Error()
}
res := t.e.compile(ctx, dep)
// check for dependency cycle to prevent deadlock
if err := t.e.checkForDependencyCycle(res, []string{name, dep}, span, checked); err != nil {
return nil, err
}
results[i] = res
}
deps = make([]linker.File, len(results))
var descriptorProtoRes *result
if wantsDescriptorProto {
descriptorProtoRes = t.e.compile(ctx, descriptorProtoPath)
}
// release our semaphore so dependencies can be processed w/out risk of deadlock
t.e.s.Release(1)
t.released = true
// now we wait for them all to be computed
for i, res := range results {
select {
case <-res.ready:
if res.err != nil {
if rerr, ok := res.err.(errFailedToResolve); ok {
// We don't report errors to get file from resolver to handler since
// it's usually considered immediately fatal. However, if the reason
// we were resolving is due to an import, turn this into an error with
// source position that pinpoints the import statement and report it.
return nil, reporter.Error(findImportSpan(parseRes, res.name), rerr)
}
return nil, res.err
}
deps[i] = res.res
case <-ctx.Done():
return nil, ctx.Err()
}
}
if descriptorProtoRes != nil {
select {
case <-descriptorProtoRes.ready:
// descriptor.proto wasn't explicitly imported, so we can ignore a failure
if descriptorProtoRes.err == nil {
overrideDescriptorProto = descriptorProtoRes.res
}
case <-ctx.Done():
return nil, ctx.Err()
}
}
// all deps resolved
t.r.setBlockedOn(nil)
// reacquire semaphore so we can proceed
if err := t.e.s.Acquire(ctx, 1); err != nil {
return nil, err
}
t.released = false
}
return t.link(parseRes, deps, overrideDescriptorProto)
}
func (e *executor) checkForDependencyCycle(res *result, sequence []string, span ast.SourceSpan, checked map[string]struct{}) error {
if _, ok := checked[res.name]; ok {
// already checked this one
return nil
}
checked[res.name] = struct{}{}
deps := res.getBlockedOn()
for _, dep := range deps {
// is this a cycle?
for _, file := range sequence {
if file == dep {
handleImportCycle(e.h, span, sequence, dep)
return e.h.Error()
}
}
e.mu.Lock()
depRes := e.results[dep]
e.mu.Unlock()
if depRes == nil {
continue
}
if err := e.checkForDependencyCycle(depRes, append(sequence, dep), span, checked); err != nil {
return err
}
}
return nil
}
func handleImportCycle(h *reporter.Handler, span ast.SourceSpan, importSequence []string, dep string) {
var buf bytes.Buffer
buf.WriteString("cycle found in imports: ")
for _, imp := range importSequence {
_, _ = fmt.Fprintf(&buf, "%q -> ", imp)
}
_, _ = fmt.Fprintf(&buf, "%q", dep)
// error is saved and returned in caller
_ = h.HandleErrorWithPos(span, errors.New(buf.String()))
}
func findImportSpan(res parser.Result, dep string) ast.SourceSpan {
root := res.AST()
if root == nil {
return ast.UnknownSpan(res.FileNode().Name())
}
for _, decl := range root.Decls {
if imp, ok := decl.(*ast.ImportNode); ok {
if imp.Name.AsString() == dep {
return root.NodeInfo(imp.Name)
}
}
}
// this should never happen...
return ast.UnknownSpan(res.FileNode().Name())
}
func (t *task) link(parseRes parser.Result, deps linker.Files, overrideDescriptorProtoRes linker.File) (linker.File, error) {
file, err := linker.Link(parseRes, deps, t.e.sym, t.h)
if err != nil {
return nil, err
}
var interpretOpts []options.InterpreterOption
if overrideDescriptorProtoRes != nil {
interpretOpts = []options.InterpreterOption{options.WithOverrideDescriptorProto(overrideDescriptorProtoRes)}
}
optsIndex, err := options.InterpretOptions(file, t.h, interpretOpts...)
if err != nil {
return nil, err
}
// now that options are interpreted, we can do some additional checks
if err := file.ValidateOptions(t.h, t.e.sym); err != nil {
return nil, err
}
if t.r.explicitFile {
file.CheckForUnusedImports(t.h)
}
if err := t.h.Error(); err != nil {
return nil, err
}
if needsSourceInfo(parseRes, t.e.c.SourceInfoMode) {
var srcInfoOpts []sourceinfo.GenerateOption
if t.e.c.SourceInfoMode&SourceInfoExtraComments != 0 {
srcInfoOpts = append(srcInfoOpts, sourceinfo.WithExtraComments())
}
if t.e.c.SourceInfoMode&SourceInfoExtraOptionLocations != 0 {
srcInfoOpts = append(srcInfoOpts, sourceinfo.WithExtraOptionLocations())
}
parseRes.FileDescriptorProto().SourceCodeInfo = sourceinfo.GenerateSourceInfo(parseRes.AST(), optsIndex, srcInfoOpts...)
} else if t.e.c.SourceInfoMode == SourceInfoNone {
// If results came from unlinked FileDescriptorProto, it could have
// source info that we should strip.
parseRes.FileDescriptorProto().SourceCodeInfo = nil
}
if len(parseRes.FileDescriptorProto().GetSourceCodeInfo().GetLocation()) > 0 {
// If we have source code info in the descriptor proto at this point,
// we have to build the index of locations.
file.PopulateSourceCodeInfo()
}
if !t.e.c.RetainASTs {
file.RemoveAST()
}
return file, nil
}
func needsSourceInfo(parseRes parser.Result, mode SourceInfoMode) bool {
return mode != SourceInfoNone && parseRes.AST() != nil && parseRes.FileDescriptorProto().SourceCodeInfo == nil
}
func (t *task) asParseResult(name string, r SearchResult) (parser.Result, error) {
if r.ParseResult != nil {
if r.ParseResult.FileDescriptorProto().GetName() != name {
return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.ParseResult.FileDescriptorProto().GetName())
}
// If the file descriptor needs linking, it will be mutated during the
// next stage. So to make anu mutations thread-safe, we must make a
// defensive copy.
res := parser.Clone(r.ParseResult)
return res, nil
}
if r.Proto != nil {
if r.Proto.GetName() != name {
return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.Proto.GetName())
}
// If the file descriptor needs linking, it will be mutated during the
// next stage. So to make any mutations thread-safe, we must make a
// defensive copy.
descProto := proto.Clone(r.Proto).(*descriptorpb.FileDescriptorProto) //nolint:errcheck
return parser.ResultWithoutAST(descProto), nil
}
file, err := t.asAST(name, r)
if err != nil {
return nil, err
}
return parser.ResultFromAST(file, true, t.h)
}
func (t *task) asAST(name string, r SearchResult) (*ast.FileNode, error) {
if r.AST != nil {
if r.AST.Name() != name {
return nil, fmt.Errorf("search result for %q returned descriptor for %q", name, r.AST.Name())
}
return r.AST, nil
}
return parser.Parse(name, r.Source, t.h)
}
// Copyright 2024 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocompile
import (
"bytes"
"context"
"io"
)
func FuzzProtoCompile(data []byte) int {
compiler := &Compiler{
Resolver: &SourceResolver{
Accessor: func(_ string) (closer io.ReadCloser, e error) {
return io.NopCloser(bytes.NewReader(data)), nil
},
},
}
_, err := compiler.Compile(context.Background(), "test.proto")
if err != nil {
return 0
}
return 1
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package editions contains helpers related to resolving features for
// Protobuf editions. These are lower-level helpers. Higher-level helpers
// (which use this package under the hood) can be found in the exported
// protoutil package.
package editions
import (
"fmt"
"strings"
"sync"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/dynamicpb"
)
const (
// MinSupportedEdition is the earliest edition supported by this module.
// It should be 2023 (the first edition) for the indefinite future.
MinSupportedEdition = descriptorpb.Edition_EDITION_2023
// MaxSupportedEdition is the most recent edition supported by this module.
MaxSupportedEdition = descriptorpb.Edition_EDITION_2023
)
var (
// SupportedEditions is the exhaustive set of editions that protocompile
// can support. We don't allow it to compile future/unknown editions, to
// make sure we don't generate incorrect descriptors, in the event that
// a future edition introduces a change or new feature that requires
// new logic in the compiler.
SupportedEditions = computeSupportedEditions(MinSupportedEdition, MaxSupportedEdition)
// FeatureSetDescriptor is the message descriptor for the compiled-in
// version (in the descriptorpb package) of the google.protobuf.FeatureSet
// message type.
FeatureSetDescriptor = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Descriptor()
// FeatureSetType is the message type for the compiled-in version (in
// the descriptorpb package) of google.protobuf.FeatureSet.
FeatureSetType = (*descriptorpb.FeatureSet)(nil).ProtoReflect().Type()
editionDefaults map[descriptorpb.Edition]*descriptorpb.FeatureSet
editionDefaultsInit sync.Once
)
// HasFeatures is implemented by all options messages and provides a
// nil-receiver-safe way of accessing the features explicitly configured
// in those options.
type HasFeatures interface {
GetFeatures() *descriptorpb.FeatureSet
}
var _ HasFeatures = (*descriptorpb.FileOptions)(nil)
var _ HasFeatures = (*descriptorpb.MessageOptions)(nil)
var _ HasFeatures = (*descriptorpb.FieldOptions)(nil)
var _ HasFeatures = (*descriptorpb.OneofOptions)(nil)
var _ HasFeatures = (*descriptorpb.ExtensionRangeOptions)(nil)
var _ HasFeatures = (*descriptorpb.EnumOptions)(nil)
var _ HasFeatures = (*descriptorpb.EnumValueOptions)(nil)
var _ HasFeatures = (*descriptorpb.ServiceOptions)(nil)
var _ HasFeatures = (*descriptorpb.MethodOptions)(nil)
// ResolveFeature resolves a feature for the given descriptor. This simple
// helper examines the given element and its ancestors, searching for an
// override. If there is no overridden value, it returns a zero value.
func ResolveFeature(
element protoreflect.Descriptor,
fields ...protoreflect.FieldDescriptor,
) (protoreflect.Value, error) {
for {
var features *descriptorpb.FeatureSet
if withFeatures, ok := element.Options().(HasFeatures); ok {
// It should not really be possible for 'ok' to ever be false...
features = withFeatures.GetFeatures()
}
// TODO: adaptFeatureSet is only looking at the first field. But if we needed to
// support an extension field inside a custom feature, we'd really need
// to check all fields. That gets particularly complicated if the traversal
// path of fields includes list and map values. Luckily, features are not
// supposed to be repeated and not supposed to themselves have extensions.
// So this should be fine, at least for now.
msgRef, err := adaptFeatureSet(features, fields[0])
if err != nil {
return protoreflect.Value{}, err
}
// Navigate the fields to find the value
var val protoreflect.Value
for i, field := range fields {
if i > 0 {
msgRef = val.Message()
}
if !msgRef.Has(field) {
val = protoreflect.Value{}
break
}
val = msgRef.Get(field)
}
if val.IsValid() {
// All fields were set!
return val, nil
}
parent := element.Parent()
if parent == nil {
// We've reached the end of the inheritance chain.
return protoreflect.Value{}, nil
}
element = parent
}
}
// HasEdition should be implemented by values that implement
// [protoreflect.FileDescriptor], to provide access to the file's
// edition when its syntax is [protoreflect.Editions].
type HasEdition interface {
// Edition returns the numeric value of a google.protobuf.Edition enum
// value that corresponds to the edition of this file. If the file does
// not use editions, it should return the enum value that corresponds
// to the syntax level, EDITION_PROTO2 or EDITION_PROTO3.
Edition() int32
}
// GetEdition returns the edition for a given element. It returns
// EDITION_PROTO2 or EDITION_PROTO3 if the element is in a file that
// uses proto2 or proto3 syntax, respectively. It returns EDITION_UNKNOWN
// if the syntax of the given element is not recognized or if the edition
// cannot be ascertained from the element's [protoreflect.FileDescriptor].
func GetEdition(d protoreflect.Descriptor) descriptorpb.Edition {
switch d.ParentFile().Syntax() {
case protoreflect.Proto2:
return descriptorpb.Edition_EDITION_PROTO2
case protoreflect.Proto3:
return descriptorpb.Edition_EDITION_PROTO3
case protoreflect.Editions:
withEdition, ok := d.ParentFile().(HasEdition)
if !ok {
// The parent file should always be a *result, so we should
// never be able to actually get in here. If we somehow did
// have another implementation of protoreflect.FileDescriptor,
// it doesn't provide a way to get the edition, other than the
// potentially expensive step of generating a FileDescriptorProto
// and then querying for the edition from that. :/
return descriptorpb.Edition_EDITION_UNKNOWN
}
return descriptorpb.Edition(withEdition.Edition())
default:
return descriptorpb.Edition_EDITION_UNKNOWN
}
}
// GetEditionDefaults returns the default feature values for the given edition.
// It returns nil if the given edition is not known.
//
// This only populates known features, those that are fields of [*descriptorpb.FeatureSet].
// It does not populate any extension fields.
//
// The returned value must not be mutated as it references shared package state.
func GetEditionDefaults(edition descriptorpb.Edition) *descriptorpb.FeatureSet {
editionDefaultsInit.Do(func() {
editionDefaults = make(map[descriptorpb.Edition]*descriptorpb.FeatureSet, len(descriptorpb.Edition_name))
// Compute default for all known editions in descriptorpb.
for editionInt := range descriptorpb.Edition_name {
edition := descriptorpb.Edition(editionInt)
defaults := &descriptorpb.FeatureSet{}
defaultsRef := defaults.ProtoReflect()
fields := defaultsRef.Descriptor().Fields()
// Note: we are not computing defaults for extensions. Those are not needed
// by anything in the compiler, so we can get away with just computing
// defaults for these static, non-extension fields.
for i, length := 0, fields.Len(); i < length; i++ {
field := fields.Get(i)
val, err := GetFeatureDefault(edition, FeatureSetType, field)
if err != nil {
// should we fail somehow??
continue
}
defaultsRef.Set(field, val)
}
editionDefaults[edition] = defaults
}
})
return editionDefaults[edition]
}
// GetFeatureDefault computes the default value for a feature. The given container
// is the message type that contains the field. This should usually be the descriptor
// for google.protobuf.FeatureSet, but can be a different message for computing the
// default value of custom features.
//
// Note that this always re-computes the default. For known fields of FeatureSet,
// it is more efficient to query from the statically computed default messages,
// like so:
//
// editions.GetEditionDefaults(edition).ProtoReflect().Get(feature)
func GetFeatureDefault(edition descriptorpb.Edition, container protoreflect.MessageType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
opts, ok := feature.Options().(*descriptorpb.FieldOptions)
if !ok {
// this is most likely impossible except for contrived use cases...
return protoreflect.Value{}, fmt.Errorf("options is %T instead of *descriptorpb.FieldOptions", feature.Options())
}
maxEdition := descriptorpb.Edition(-1)
var maxVal string
for _, def := range opts.EditionDefaults {
if def.GetEdition() <= edition && def.GetEdition() > maxEdition {
maxEdition = def.GetEdition()
maxVal = def.GetValue()
}
}
if maxEdition == -1 {
// no matching default found
return protoreflect.Value{}, fmt.Errorf("no relevant default for edition %s", edition)
}
// We use a typed nil so that it won't fall back to the global registry. Features
// should not use extensions or google.protobuf.Any, so a nil *Types is fine.
unmarshaler := prototext.UnmarshalOptions{Resolver: (*protoregistry.Types)(nil)}
// The string value is in the text format: either a field value literal or a
// message literal. (Repeated and map features aren't supported, so there's no
// array or map literal syntax to worry about.)
if feature.Kind() == protoreflect.MessageKind || feature.Kind() == protoreflect.GroupKind {
fldVal := container.Zero().NewField(feature)
err := unmarshaler.Unmarshal([]byte(maxVal), fldVal.Message().Interface())
if err != nil {
return protoreflect.Value{}, err
}
return fldVal, nil
}
// The value is the textformat for the field. But prototext doesn't provide a way
// to unmarshal a single field value. To work around, we unmarshal into an enclosing
// message, which means we must prefix the value with the field name.
if feature.IsExtension() {
maxVal = fmt.Sprintf("[%s]: %s", feature.FullName(), maxVal)
} else {
maxVal = fmt.Sprintf("%s: %s", feature.Name(), maxVal)
}
empty := container.New()
err := unmarshaler.Unmarshal([]byte(maxVal), empty.Interface())
if err != nil {
return protoreflect.Value{}, err
}
return empty.Get(feature), nil
}
func adaptFeatureSet(msg *descriptorpb.FeatureSet, field protoreflect.FieldDescriptor) (protoreflect.Message, error) {
msgRef := msg.ProtoReflect()
var actualField protoreflect.FieldDescriptor
switch {
case field.IsExtension():
// Extensions can be used directly with the feature set, even if
// field.ContainingMessage() != FeatureSetDescriptor. But only if
// the value is either not a message or is a message with the
// right descriptor, i.e. val.Descriptor() == field.Message().
if actualField = actualDescriptor(msgRef, field); actualField == nil || actualField == field {
if msgRef.Has(field) || len(msgRef.GetUnknown()) == 0 {
return msgRef, nil
}
// The field is not present, but the message has unrecognized values. So
// let's try to parse the unrecognized bytes, just in case they contain
// this extension.
temp := &descriptorpb.FeatureSet{}
unmarshaler := proto.UnmarshalOptions{
AllowPartial: true,
Resolver: resolverForExtension{field},
}
if err := unmarshaler.Unmarshal(msgRef.GetUnknown(), temp); err != nil {
return nil, fmt.Errorf("failed to parse unrecognized fields of FeatureSet: %w", err)
}
return temp.ProtoReflect(), nil
}
case field.ContainingMessage() == FeatureSetDescriptor:
// Known field, not dynamically generated. Can directly use with the feature set.
return msgRef, nil
default:
actualField = FeatureSetDescriptor.Fields().ByNumber(field.Number())
}
// If we get here, we have a dynamic field descriptor or an extension
// descriptor whose message type does not match the descriptor of the
// stored value. We need to copy its value into a dynamic message,
// which requires marshalling/unmarshalling.
// We only need to copy over the unrecognized bytes (if any)
// and the same field (if present).
data := msgRef.GetUnknown()
if actualField != nil && msgRef.Has(actualField) {
subset := &descriptorpb.FeatureSet{}
subset.ProtoReflect().Set(actualField, msgRef.Get(actualField))
var err error
data, err = proto.MarshalOptions{AllowPartial: true}.MarshalAppend(data, subset)
if err != nil {
return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
}
}
if len(data) == 0 {
// No relevant data to copy over, so we can just return
// a zero value message
return dynamicpb.NewMessageType(field.ContainingMessage()).Zero(), nil
}
other := dynamicpb.NewMessage(field.ContainingMessage())
// We don't need to use a resolver for this step because we know that
// field is not an extension. And features are not allowed to themselves
// have extensions.
if err := (proto.UnmarshalOptions{AllowPartial: true}).Unmarshal(data, other); err != nil {
return nil, fmt.Errorf("failed to marshal FeatureSet field %s to bytes: %w", field.Name(), err)
}
return other, nil
}
type resolverForExtension struct {
ext protoreflect.ExtensionDescriptor
}
func (r resolverForExtension) FindMessageByName(_ protoreflect.FullName) (protoreflect.MessageType, error) {
return nil, protoregistry.NotFound
}
func (r resolverForExtension) FindMessageByURL(_ string) (protoreflect.MessageType, error) {
return nil, protoregistry.NotFound
}
func (r resolverForExtension) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if field == r.ext.FullName() {
return asExtensionType(r.ext), nil
}
return nil, protoregistry.NotFound
}
func (r resolverForExtension) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if message == r.ext.ContainingMessage().FullName() && field == r.ext.Number() {
return asExtensionType(r.ext), nil
}
return nil, protoregistry.NotFound
}
func asExtensionType(ext protoreflect.ExtensionDescriptor) protoreflect.ExtensionType {
if xtd, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok {
return xtd.Type()
}
return dynamicpb.NewExtensionType(ext)
}
func computeSupportedEditions(minEdition, maxEdition descriptorpb.Edition) map[string]descriptorpb.Edition {
supportedEditions := map[string]descriptorpb.Edition{}
for editionNum := range descriptorpb.Edition_name {
edition := descriptorpb.Edition(editionNum)
if edition >= minEdition && edition <= maxEdition {
name := strings.TrimPrefix(edition.String(), "EDITION_")
supportedEditions[name] = edition
}
}
return supportedEditions
}
// actualDescriptor returns the actual field descriptor referenced by msg that
// corresponds to the given ext (i.e. same number). It returns nil if msg has
// no reference, if the actual descriptor is the same as ext, or if ext is
// otherwise safe to use as is.
func actualDescriptor(msg protoreflect.Message, ext protoreflect.ExtensionDescriptor) protoreflect.FieldDescriptor {
if !msg.Has(ext) || ext.Message() == nil {
// nothing to match; safe as is
return nil
}
val := msg.Get(ext)
switch {
case ext.IsMap(): // should not actually be possible
expectedDescriptor := ext.MapValue().Message()
if expectedDescriptor == nil {
return nil // nothing to match
}
// We know msg.Has(field) is true, from above, so there's at least one entry.
var matches bool
val.Map().Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool {
matches = val.Message().Descriptor() == expectedDescriptor
return false
})
if matches {
return nil
}
case ext.IsList():
// We know msg.Has(field) is true, from above, so there's at least one entry.
if val.List().Get(0).Message().Descriptor() == ext.Message() {
return nil
}
case !ext.IsMap():
if val.Message().Descriptor() == ext.Message() {
return nil
}
}
// The underlying message descriptors do not match. So we need to return
// the actual field descriptor. Sadly, protoreflect.Message provides no way
// to query the field descriptor in a message by number. For non-extensions,
// one can query the associated message descriptor. But for extensions, we
// have to do the slow thing, and range through all fields looking for it.
var actualField protoreflect.FieldDescriptor
msg.Range(func(fd protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
if fd.Number() == ext.Number() {
actualField = fd
return false
}
return true
})
return actualField
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// package cmpx contains extensions to Go's package cmp.
package cmpx
import (
"cmp"
"fmt"
"math"
"reflect"
)
// Result is the type returned by an [Ordering], and in particular
// [cmp.Compare].
type Result = int
const (
// [cmp.Compare] guarantees these return values.
Less Result = -1
Equal Result = 0
Greater Result = 1
)
// Ordered is like [cmp.Ordered], but includes additional types.
type Ordered interface {
~bool | cmp.Ordered
}
// Ordering is an ordering for the type T, which is any function with the same
// signature as [Compare].
type Ordering[T any] func(T, T) Result
// Key returns an ordering for T according to a key function, which must return
// a [cmp.Ordered] value.
func Key[T any, U cmp.Ordered](key func(T) U) Ordering[T] {
return func(a, b T) Result { return cmp.Compare(key(a), key(b)) }
}
// Join returns an ordering for T which returns the first of cmps returns a
// non-[Equal] value.
func Join[T any](cmps ...Ordering[T]) Ordering[T] {
return func(a, b T) Result {
for _, cmp := range cmps {
if n := cmp(a, b); n != Equal {
return n
}
}
return Equal
}
}
// Map is like [Join], but it maps the inputs with the given function first.
func Map[T any, U any](f func(T) U, cmps ...Ordering[U]) Ordering[T] {
return func(x, y T) Result {
a, b := f(x), f(y)
for _, cmp := range cmps {
if n := cmp(a, b); n != Equal {
return n
}
}
return Equal
}
}
// Reverse returns an ordering which is the reverse of cmp.
func Reverse[T any](cmp Ordering[T]) Ordering[T] {
return func(a, b T) Result { return -cmp(a, b) }
}
// Bool compares two bools, where false < true.
//
// This works around a bug where bool does not satisfy [cmp.Ordered].
func Bool[B ~bool](a, b B) Result {
var ai, bi byte
if a {
ai = 1
}
if b {
bi = 1
}
return cmp.Compare(ai, bi)
}
// Any compares any two [cmp.Ordered] types, according to the following criteria:
//
// 1. any(nil) is least of all.
//
// 2. If the values are not mutually comparable, their [reflect.Kind]s are
// compared.
//
// 3. If either value is not of a [cmp.Ordered] type, this function panics.
//
// 4. Otherwise, the arguments are compared as-if by [cmp.Compare].
//
// For the purposes of this function, bool is treated as satisfying [cmp.Compare].
func Any(a, b any) Result {
if a == nil || b == nil {
return Bool(a != nil, b != nil)
}
ra := reflect.ValueOf(a)
rb := reflect.ValueOf(b)
type kind int
const (
kBool kind = 1 << iota
kInt
kUint
kFloat
kString
)
which := func(r reflect.Value) kind {
switch r.Kind() {
case reflect.Bool:
return kBool
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
return kInt
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64,
reflect.Uintptr:
return kUint
case reflect.Float32, reflect.Float64:
return kFloat
case reflect.String:
return kString
default:
panic(fmt.Sprintf("cmpx.Any: incomparable value %v (type %[1]T)", r.Interface()))
}
}
//nolint:revive // Recommends removing some else {} branches that make the code less symmetric
switch which(ra) | which(rb) {
case kBool:
return Bool(ra.Bool(), rb.Bool())
case kInt:
return cmp.Compare(ra.Int(), rb.Int())
case kUint:
return cmp.Compare(ra.Uint(), rb.Uint())
case kInt | kUint:
if rb.CanUint() {
v := rb.Uint()
if v > math.MaxInt64 {
return Less
}
return cmp.Compare(ra.Int(), int64(v))
} else {
v := ra.Uint()
if v > math.MaxInt64 {
return Greater
}
return cmp.Compare(int64(v), rb.Int())
}
case kFloat:
return cmp.Compare(ra.Float(), rb.Float())
case kFloat | kInt:
if ra.CanFloat() {
return cmp.Compare(ra.Float(), float64(rb.Int()))
} else {
return cmp.Compare(float64(ra.Int()), rb.Float())
}
case kFloat | kUint:
if ra.CanFloat() {
return cmp.Compare(ra.Float(), float64(rb.Uint()))
} else {
return cmp.Compare(float64(ra.Uint()), rb.Float())
}
case kString:
return cmp.Compare(ra.String(), rb.String())
default:
return cmp.Compare(ra.Kind(), rb.Kind())
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package unsafex contains extensions to Go's package unsafe.
//
// Importing this package should be treated as equivalent to importing unsafe.
package unsafex
import (
"fmt"
"unsafe"
)
// Int is a constraint for any integer type.
type Int interface {
~int8 | ~int16 | ~int32 | ~int64 | ~int |
~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uint |
~uintptr
}
// Size is like [unsafe.Sizeof], but it is a generic function and it returns
// an int instead of a uintptr (Go does not have types so large they would
// overflow an int).
func Size[T any]() int {
var v T
return int(unsafe.Sizeof(v))
}
// Add is like [unsafe.Add], but it operates on a typed pointer and scales the
// offset by that type's size, similar to pointer arithmetic in Rust or C.
//
// This function has the same safety caveats as [unsafe.Add].
//
//go:nosplit
func Add[P ~*E, E any, I Int](p P, idx I) P {
raw := unsafe.Pointer(p)
raw = unsafe.Add(raw, int(idx)*Size[E]())
return P(raw)
}
// Bitcast bit-casts a value of type From to a value of type To.
//
// This operation is very dangerous, because it can be used to break package
// export barriers, read uninitialized memory, and forge pointers in violation
// of [unsafe.Pointer]'s contract, resulting in memory errors in the GC.
//
// Panics if To and From have different sizes.
//
//go:nosplit
func Bitcast[To, From any](v From) To {
// This function is correctly compiled down to a mov, as seen here:
// https://godbolt.org/z/qvndcYYba
//
// With redundant code removed, stenciling Bitcast[float64, int64] produces
// (as seen in the above Godbolt):
//
// TEXT unsafex.Bitcast[float64,int64]
// MOVQ 32(R14), R12
// TESTQ R12, R12
// JNE morestack
// XCHGL AX, AX
// MOVQ AX, X0
// RET
// This check is necessary because casting a smaller type into a larger
// type will result in reading uninitialized memory, especially in the
// presence of inlining that causes &aligned below to point into the heap.
// The equivalent functions in Rust and C++ perform this check statically,
// because it is so important.
if Size[To]() != Size[From]() {
// This check will always be inlined away, because Bitcast is
// manifestly inline-able.
//
// NOTE: This could potentially be replaced with a link error, by making
// this call a function with no body (and then not defining that
// function in a .s file; although, note we do need an empty.s to
// silence a compiler error in that case).
panic(badBitcast[To, From]{})
}
// To avoid an unaligned load below, we copy From into a struct aligned to
// To's alignment. Consider the following situation: we call
// Bitcast[int32, [4]byte]. There is no guarantee that &v will be aligned
// to the four byte boundary required for int32, and thus casting it to *To
// may result in an unaligned load.
//
// As seen in the Godbolt above, for cases where the alignment change
// is redundant, this gets optimized away.
aligned := struct {
_ [0]To
v From
}{v: v}
return *(*To)(unsafe.Pointer(&aligned.v))
}
type badBitcast[To, From any] struct{}
func (badBitcast[To, From]) Error() string {
var to To
var from From
return fmt.Sprintf(
"unsafex: %T and %T are of unequal size (%d != %d)",
to, from,
Size[To](), Size[From](),
)
}
// StringAlias returns a string that aliases a slice. This is useful for
// situations where we're allocating a string on the stack, or where we have
// a slice that will never be written to and we want to interpret as a string
// without a copy.
//
// data must not be written to: for the lifetime of the returned string (that
// is, until its final use in the program upon which a finalizer set on it could
// run), it must be treated as if goroutines are concurrently reading from it:
// data must not be mutated in any way.
//
//go:nosplit
func StringAlias[S ~[]E, E any](data S) string {
return unsafe.String(
Bitcast[*byte](unsafe.SliceData(data)),
len(data)*Size[E](),
)
}
// BytesAlias is the inverse of [StringAlias].
//
// The same caveats apply as with [StringAlias] around mutating `data`.
//
//go:nosplit
func BytesAlias[S ~[]B, B ~byte](data string) []B {
return unsafe.Slice(
Bitcast[*B](unsafe.StringData(data)),
len(data),
)
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package featuresext provides file descriptors for the
// "google/protobuf/cpp_features.proto" and "google/protobuf/java_features.proto"
// standard import files. Unlike the other standard/well-known
// imports, these files have no standard Go package in their
// runtime with generated code. So in order to make them available
// as "standard imports" to compiler users, we must embed these
// descriptors into a Go package.
package featuresext
import (
_ "embed"
"fmt"
"sync"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/descriptorpb"
)
var (
//go:embed cpp_features.protoset
cppFeatures []byte
//go:embed java_features.protoset
javaFeatures []byte
initOnce sync.Once
initCppFeatures protoreflect.FileDescriptor
initCppErr error
initJavaFeatures protoreflect.FileDescriptor
initJavaErr error
)
func initDescriptors() {
initOnce.Do(func() {
initCppFeatures, initCppErr = buildDescriptor("google/protobuf/cpp_features.proto", cppFeatures)
initJavaFeatures, initJavaErr = buildDescriptor("google/protobuf/java_features.proto", javaFeatures)
})
}
func CppFeaturesDescriptor() (protoreflect.FileDescriptor, error) {
initDescriptors()
return initCppFeatures, initCppErr
}
func JavaFeaturesDescriptor() (protoreflect.FileDescriptor, error) {
initDescriptors()
return initJavaFeatures, initJavaErr
}
func buildDescriptor(name string, data []byte) (protoreflect.FileDescriptor, error) {
var files descriptorpb.FileDescriptorSet
err := proto.Unmarshal(data, &files)
if err != nil {
return nil, fmt.Errorf("failed to load descriptor for %q: %w", name, err)
}
if len(files.File) != 1 {
return nil, fmt.Errorf("failed to load descriptor for %q: expected embedded descriptor set to contain exactly one file but it instead has %d", name, len(files.File))
}
if files.File[0].GetName() != name {
return nil, fmt.Errorf("failed to load descriptor for %q: embedded descriptor contains wrong file %q", name, files.File[0].GetName())
}
descriptor, err := protodesc.NewFile(files.File[0], protoregistry.GlobalFiles)
if err != nil {
return nil, fmt.Errorf("failed to load descriptor for %q: %w", name, err)
}
return descriptor, nil
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"bytes"
"fmt"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
)
// ParsedFile wraps an optional AST and required FileDescriptorProto.
// This is used so types like parser.Result can be passed to this internal package avoiding circular imports.
// Additionally, it makes it less likely that users might specify one or the other.
type ParsedFile interface {
// AST returns the parsed abstract syntax tree. This returns nil if the
// Result was created without an AST.
AST() *ast.FileNode
// FileDescriptorProto returns the file descriptor proto.
FileDescriptorProto() *descriptorpb.FileDescriptorProto
}
// MessageContext provides information about the location in a descriptor
// hierarchy, for adding context to warnings and error messages.
type MessageContext struct {
// The relevant file
File ParsedFile
// The type and fully-qualified name of the element within the file.
ElementType string
ElementName string
// If the element being processed is an option (or *in* an option)
// on the named element above, this will be non-nil.
Option *descriptorpb.UninterpretedOption
// If the element being processed is inside a message literal in an
// option value, this will be non-empty and represent a traversal
// to the element in question.
OptAggPath string
}
func (c *MessageContext) String() string {
var ctx bytes.Buffer
if c.ElementType != "file" {
_, _ = fmt.Fprintf(&ctx, "%s %s: ", c.ElementType, c.ElementName)
}
if c.Option != nil && c.Option.Name != nil {
ctx.WriteString("option ")
writeOptionName(&ctx, c.Option.Name)
if c.File.AST() == nil {
// if we have no source position info, try to provide as much context
// as possible (if nodes != nil, we don't need this because any errors
// will actually have file and line numbers)
if c.OptAggPath != "" {
_, _ = fmt.Fprintf(&ctx, " at %s", c.OptAggPath)
}
}
ctx.WriteString(": ")
}
return ctx.String()
}
func writeOptionName(buf *bytes.Buffer, parts []*descriptorpb.UninterpretedOption_NamePart) {
first := true
for _, p := range parts {
if first {
first = false
} else {
buf.WriteByte('.')
}
nm := p.GetNamePart()
if nm[0] == '.' {
// skip leading dot
nm = nm[1:]
}
if p.GetIsExtension() {
buf.WriteByte('(')
buf.WriteString(nm)
buf.WriteByte(')')
} else {
buf.WriteString(nm)
}
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package messageset
import (
"math"
"sync"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/types/descriptorpb"
)
var (
messageSetSupport bool
messageSetSupportInit sync.Once
)
// CanSupportMessageSets returns true if the protobuf-go runtime supports
// serializing messages with the message set wire format.
func CanSupportMessageSets() bool {
messageSetSupportInit.Do(func() {
// We check using the protodesc package, instead of just relying
// on protolegacy build tag, in case someone links in a fork of
// the protobuf-go runtime that supports legacy proto1 features
// or in case the protobuf-go runtime adds another mechanism to
// enable or disable it (such as environment variable).
_, err := protodesc.NewFile(&descriptorpb.FileDescriptorProto{
Name: proto.String("test.proto"),
MessageType: []*descriptorpb.DescriptorProto{
{
Name: proto.String("MessageSet"),
Options: &descriptorpb.MessageOptions{
MessageSetWireFormat: proto.Bool(true),
},
ExtensionRange: []*descriptorpb.DescriptorProto_ExtensionRange{
{
Start: proto.Int32(1),
End: proto.Int32(math.MaxInt32),
},
},
},
},
}, nil)
// When message sets are not supported, the above returns an error:
// message "MessageSet" is a MessageSet, which is a legacy proto1 feature that is no longer supported
messageSetSupport = err == nil
})
return messageSetSupport
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
//go:build !debug
// See debug.go.
package internal
const Debug = false
func DebugLog([]any, string, string, ...any) {}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
)
type hasOptionNode interface {
OptionNode(part *descriptorpb.UninterpretedOption) ast.OptionDeclNode
FileNode() ast.FileDeclNode // needed in order to query for NodeInfo
}
type errorHandler func(span ast.SourceSpan, format string, args ...any) error
func FindFirstOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) {
return findOption(res, handler, scope, opts, name, false, true)
}
func FindOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string) (int, error) {
return findOption(res, handler, scope, opts, name, true, false)
}
func findOption(res hasOptionNode, handler errorHandler, scope string, opts []*descriptorpb.UninterpretedOption, name string, exact, first bool) (int, error) {
found := -1
for i, opt := range opts {
if exact && len(opt.Name) != 1 {
continue
}
if opt.Name[0].GetIsExtension() || opt.Name[0].GetNamePart() != name {
continue
}
if first {
return i, nil
}
if found >= 0 {
optNode := res.OptionNode(opt)
fn := res.FileNode()
node := optNode.GetName()
nodeInfo := fn.NodeInfo(node)
return -1, handler(nodeInfo, "%s: option %s cannot be defined more than once", scope, name)
}
found = i
}
return found, nil
}
func RemoveOption(uo []*descriptorpb.UninterpretedOption, indexToRemove int) []*descriptorpb.UninterpretedOption {
switch {
case indexToRemove == 0:
return uo[1:]
case indexToRemove == len(uo)-1:
return uo[:len(uo)-1]
default:
return append(uo[:indexToRemove], uo[indexToRemove+1:]...)
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prototest
import (
"path/filepath"
"runtime"
"testing"
)
// CallerDir returns the directory of the file in which this function is called.
//
// This function is intended for tests to find their test data only. Panics
// if called within a stripped binary.
func CallerDir(t *testing.T) string {
return CallerDirWithSkip(t, 1)
}
// CallerDirWithSkip returns the directory of the file in which this function is
// called.
//
// skip is the number of callers to skip, like in [runtime.Caller]. A value of
// zero represents the caller of CallerDirWithSkip.
//
// This function is intended for tests to find their test data only. Panics
// if called within a stripped binary.
func CallerDirWithSkip(t *testing.T, skip int) string {
_, file, _, ok := runtime.Caller(skip + 1)
if !ok {
t.Fatal("protocompile/internal: could not determine test file's directory; the binary may have been stripped")
}
return filepath.Dir(file)
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prototest
import (
"reflect"
"testing"
"github.com/stretchr/testify/require"
)
// RequireSameLayout generates require assertions for ensuring that a and b have
// the same layout.
//
// This is useful for verifying that a type used for unsafe.Pointer shenanigans
// matches another.
//
// NOTE: This will currently recurse infinitely on a type such as
//
// type T struct { p *T }
//
// This function is only intended for testing so actually making sure we don't
// hit that case is not currently necessary.
func RequireSameLayout(t *testing.T, a, b reflect.Type) {
t.Helper()
if a == b {
return // No need to check further.
}
require.Equal(
t, a.Kind(), b.Kind(),
"mismatched kinds: %s is %s; %s is %s", a, a.Kind(), b, b.Kind())
switch a.Kind() {
case reflect.Struct:
require.Equal(t, a.NumField(), b.NumField(),
"mismatched field counts: %s has %d fields; %s has %d fields", a, a.NumField(), b, b.NumField())
for i := range a.NumField() {
RequireSameLayout(t, a.Field(i).Type, b.Field(i).Type)
}
case reflect.Slice, reflect.Chan, reflect.Pointer:
RequireSameLayout(t, a.Elem(), b.Elem())
case reflect.Array:
RequireSameLayout(t, a.Elem(), b.Elem())
require.Equal(t, a.Len(), b.Len(), "mismatched array lengths: %s != %s", a, b)
case reflect.Map:
RequireSameLayout(t, a.Key(), b.Key())
RequireSameLayout(t, a.Elem(), b.Elem())
case reflect.Interface:
require.True(t, a.Implements(b), "mismatched interface types: %s != %s", a, b)
require.True(t, b.Implements(a), "mismatched interface types: %s != %s", a, b)
case reflect.Func:
require.True(t, a.ConvertibleTo(b), "mismatched function types: %s != %s", a, b)
require.True(t, b.ConvertibleTo(a), "mismatched function types: %s != %s", a, b)
default:
// The others are simple scalars, so same kind is sufficient.
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prototest
import (
"os"
"testing"
"github.com/google/go-cmp/cmp"
"github.com/google/go-cmp/cmp/cmpopts"
"github.com/stretchr/testify/require"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/testing/protocmp"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/linker"
"github.com/bufbuild/protocompile/protoutil"
)
func LoadDescriptorSet(t *testing.T, path string, res linker.Resolver) *descriptorpb.FileDescriptorSet {
t.Helper()
data, err := os.ReadFile(path)
require.NoError(t, err)
var fdset descriptorpb.FileDescriptorSet
err = proto.UnmarshalOptions{Resolver: res}.Unmarshal(data, &fdset)
require.NoError(t, err)
return &fdset
}
func CheckFiles(t *testing.T, act protoreflect.FileDescriptor, expSet *descriptorpb.FileDescriptorSet, recursive bool) bool {
t.Helper()
return checkFiles(t, act, expSet, recursive, map[string]struct{}{})
}
func checkFiles(t *testing.T, act protoreflect.FileDescriptor, expSet *descriptorpb.FileDescriptorSet, recursive bool, checked map[string]struct{}) bool {
if _, ok := checked[act.Path()]; ok {
// already checked
return true
}
checked[act.Path()] = struct{}{}
expProto := findFileInSet(expSet, act.Path())
actProto := protoutil.ProtoFromFileDescriptor(act)
ret := AssertMessagesEqual(t, expProto, actProto, expProto.GetName())
if recursive {
for i := range act.Imports().Len() {
if !checkFiles(t, act.Imports().Get(i), expSet, true, checked) {
ret = false
}
}
}
return ret
}
func findFileInSet(fps *descriptorpb.FileDescriptorSet, name string) *descriptorpb.FileDescriptorProto {
files := fps.File
for _, fd := range files {
if fd.GetName() == name {
return fd
}
}
return nil
}
func AssertMessagesEqual(t *testing.T, exp, act proto.Message, description string) bool {
t.Helper()
if diff := cmp.Diff(exp, act, protocmp.Transform(), cmpopts.EquateNaNs()); diff != "" {
t.Errorf("%s: message mismatch (-want, +got):\n%s", description, diff)
return false
}
return true
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package prototest
import (
"fmt"
"math"
"slices"
"strconv"
"strings"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"github.com/bufbuild/protocompile/internal/ext/cmpx"
)
// ToYAMLOptions contains configuration for [ToYAML].
type ToYAMLOptions struct {
// If set, zero values of implicit presence fields are set.
EmitZeros bool
// The maximum column width before wrapping starts to occur.
MaxWidth int
}
// ToYAML converts a Protobuf message into a YAML document in a deterministic
// manner. This is intended for generating YAML for golden outputs.
//
// The result will use a compressed representation where possible.
func ToYAML(m proto.Message, opts ToYAMLOptions) string {
y := &toYAML{
ToYAMLOptions: opts,
}
d := y.message(m.ProtoReflect())
if len(d.pairs) == 0 {
return ""
}
d.prepare()
y.write(d)
return y.out.String()
}
// toYAML is state of an on-going YAML conversion.
type toYAML struct {
ToYAMLOptions
out strings.Builder
nesting int
}
// message converts a Protobuf message into a [doc], which is used as an
// intermediate processing stage to help make formatting decisions
// (such as compressing nested messages).
func (y *toYAML) message(m protoreflect.Message) *doc {
desc := m.Descriptor()
fs := desc.Fields()
d := new(doc)
for i := range fs.Len() {
f := fs.Get(i)
has := m.Has(f)
if y.EmitZeros && !has && !f.HasPresence() {
has = true
}
if !has {
continue
}
d.push(
f.Name(),
y.value(m.Get(f), f),
)
}
return d
}
// value converts a Protobuf value into a value that can be placed into a
// [doc].
func (y *toYAML) value(v protoreflect.Value, f protoreflect.FieldDescriptor) any {
switch v := v.Interface().(type) {
case protoreflect.Message:
return y.message(v)
case protoreflect.List:
d := new(doc)
for i := range v.Len() {
d.push(nil, y.value(v.Get(i), f))
}
return d
case protoreflect.Map:
d := new(doc)
d.needsSort = true
v.Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
d.push(
y.value(k.Value(), f.MapKey()),
y.value(v, f.MapValue()),
)
return true
})
return d
case protoreflect.EnumNumber:
enum := f.Enum()
if value := enum.Values().ByNumber(v); value != nil {
return value.Name()
}
return int32(v)
case []byte:
return string(v)
default:
return v
}
}
// write writes a value returned by [toYAML.value] into the internal output
// buffer.
func (y *toYAML) write(v any) {
switch v := v.(type) {
case bool, int32, int64, uint32, uint64, protoreflect.Name:
fmt.Fprint(&y.out, v)
case float32, float64:
y.writeFloat(v)
case string:
fmt.Fprintf(&y.out, "%q", v)
case *doc:
if y.isOneLine(v) {
y.writeOneLineDoc(v)
return
}
for _, pair := range v.pairs {
oneLine := y.isOneLine(pair[1])
y.indent()
if pair[0] == nil {
y.out.WriteString("- ")
} else {
y.write(pair[0])
if !oneLine {
y.out.WriteString(":\n")
} else {
y.out.WriteString(": ")
}
}
if !oneLine {
y.nesting++
}
y.write(pair[1])
if !oneLine {
y.nesting--
} else {
y.out.WriteString("\n")
}
}
}
}
func (y *toYAML) writeFloat(v any) {
var f float64
var bits int
switch v := v.(type) {
case float32:
f = float64(v)
bits = 32
case float64:
f = v
bits = 64
}
switch {
case math.IsInf(f, 1):
y.out.WriteString("inf")
case math.IsInf(f, -1):
y.out.WriteString("-inf")
case math.IsNaN(f):
switch v := v.(type) {
case float32:
fmt.Fprintf(&y.out, "nan@%08x", math.Float32bits(v))
case float64:
fmt.Fprintf(&y.out, "nan@%016x", math.Float64bits(v))
}
default:
y.out.WriteString(strconv.FormatFloat(f, 'g', -1, bits))
}
}
func (y *toYAML) writeOneLineDoc(d *doc) {
switch {
case d.isArray:
y.out.WriteString("[")
for i, pair := range d.pairs {
if i > 0 {
y.out.WriteString(", ")
}
y.write(pair[1])
}
y.out.WriteString("]")
case len(d.pairs) == 0:
y.out.WriteString("{}")
case len(d.pairs) == 1 && strings.HasSuffix(y.out.String(), "- "):
// Special case: if we are a list element, and there is only
// one entry, print it directly.
y.write(d.pairs[0][0])
y.out.WriteString(": ")
y.write(d.pairs[0][1])
default:
y.out.WriteString("{ ")
for i, pair := range d.pairs {
if i > 0 {
y.out.WriteString(", ")
}
y.write(pair[0])
y.out.WriteString(": ")
y.write(pair[1])
}
y.out.WriteString(" }")
}
}
func (y *toYAML) isOneLine(v any) bool {
maxWidth := y.MaxWidth
if maxWidth == 0 {
maxWidth = 80
}
maxWidth -= y.nesting * 2
doc, ok := v.(*doc)
return !ok || doc.width < maxWidth
}
// indent appends indentation if necessary.
func (y *toYAML) indent() {
s := y.out.String()
if s == "" || strings.HasSuffix(s, "\n") {
for range y.nesting {
y.out.WriteString(" ")
}
}
}
// doc is a generic document structure used as an intermediate for generating
// the compressed output of ToYAML.
//
// It is composed of an array of pairs of arbitrary values.
type doc struct {
pairs [][2]any
width int
isArray, needsSort bool
}
// push adds a new entry to this document.
//
// All pushes entries must either have a non-nil key OR a nil key.
func (d *doc) push(k, v any) {
if len(d.pairs) == 0 {
d.isArray = k == nil
} else if d.isArray != (k == nil) {
panic("misuse of doc.push()")
}
d.pairs = append(d.pairs, [2]any{k, v})
}
// prepare prepares a document for printing by compressing elements as
// appropriate.
func (d *doc) prepare() {
if d.needsSort {
slices.SortFunc(d.pairs, func(a, b [2]any) int {
return cmpx.Any(a[0], b[0])
})
}
if d.isArray || len(d.pairs) == 0 {
d.width = 2 // Accounts for [] or an empty {}.
} else {
d.width = 4 // Accounts for the { ... } delimiters.
}
for i := range d.pairs {
pair := &d.pairs[i]
if pair[0] != nil {
// The 2 accounts for the ": " token.
d.width += len(fmt.Sprint(pair[0])) + 2
}
if i > 0 {
d.width += 2 // Accounts for the ", "
}
switch v := pair[1].(type) {
case int32, int64, uint32, uint64, float32, float64, protoreflect.Name, string:
d.width += len(fmt.Sprint(v))
case *doc:
v.prepare()
d.width += v.width
if len(v.pairs) == 1 {
outer, ok1 := pair[0].(protoreflect.Name)
inner, ok2 := v.pairs[0][0].(protoreflect.Name)
if ok1 && ok2 {
//nolint:unconvert // Conversion below is included for readability.
pair[0] = protoreflect.Name(outer + "." + inner)
pair[1] = v.pairs[0][1]
}
}
}
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"bytes"
"unicode"
"unicode/utf8"
"google.golang.org/protobuf/reflect/protoreflect"
)
// JSONName returns the default JSON name for a field with the given name.
// This mirrors the algorithm in protoc:
//
// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L95
func JSONName(name string) string {
var js []rune
nextUpper := false
for _, r := range name {
if r == '_' {
nextUpper = true
continue
}
if nextUpper {
nextUpper = false
js = append(js, unicode.ToUpper(r))
} else {
js = append(js, r)
}
}
return string(js)
}
// InitCap returns the given field name, but with the first letter capitalized.
func InitCap(name string) string {
r, sz := utf8.DecodeRuneInString(name)
return string(unicode.ToUpper(r)) + name[sz:]
}
// CreatePrefixList returns a list of package prefixes to search when resolving
// a symbol name. If the given package is blank, it returns only the empty
// string. If the given package contains only one token, e.g. "foo", it returns
// that token and the empty string, e.g. ["foo", ""]. Otherwise, it returns
// successively shorter prefixes of the package and then the empty string. For
// example, for a package named "foo.bar.baz" it will return the following list:
//
// ["foo.bar.baz", "foo.bar", "foo", ""]
func CreatePrefixList(pkg string) []string {
if pkg == "" {
return []string{""}
}
numDots := 0
// one pass to pre-allocate the returned slice
for i := range len(pkg) {
if pkg[i] == '.' {
numDots++
}
}
if numDots == 0 {
return []string{pkg, ""}
}
prefixes := make([]string, numDots+2)
// second pass to fill in returned slice
for i := range len(pkg) {
if pkg[i] == '.' {
prefixes[numDots] = pkg[:i]
numDots--
}
}
prefixes[0] = pkg
return prefixes
}
func WriteEscapedBytes(buf *bytes.Buffer, b []byte) {
// This uses the same algorithm as the protoc C++ code for escaping strings.
// The protoc C++ code in turn uses the abseil C++ library's CEscape function:
// https://github.com/abseil/abseil-cpp/blob/934f613818ffcb26c942dff4a80be9a4031c662c/absl/strings/escaping.cc#L406
for _, c := range b {
switch c {
case '\n':
buf.WriteString("\\n")
case '\r':
buf.WriteString("\\r")
case '\t':
buf.WriteString("\\t")
case '"':
buf.WriteString("\\\"")
case '\'':
buf.WriteString("\\'")
case '\\':
buf.WriteString("\\\\")
default:
if c >= 0x20 && c < 0x7f {
// simple printable characters
buf.WriteByte(c)
} else {
// use octal escape for all other values
buf.WriteRune('\\')
buf.WriteByte('0' + ((c >> 6) & 0x7))
buf.WriteByte('0' + ((c >> 3) & 0x7))
buf.WriteByte('0' + (c & 0x7))
}
}
}
}
// IsZeroLocation returns true if the given loc is a zero value
// (which is returned from queries that have no result).
func IsZeroLocation(loc protoreflect.SourceLocation) bool {
return loc.Path == nil &&
loc.StartLine == 0 &&
loc.StartColumn == 0 &&
loc.EndLine == 0 &&
loc.EndColumn == 0 &&
loc.LeadingDetachedComments == nil &&
loc.LeadingComments == "" &&
loc.TrailingComments == "" &&
loc.Next == 0
}
// ComputePath computes the source location path for the given descriptor.
// The boolean value indicates whether the result is valid. If the path
// cannot be computed for d, the function returns nil, false.
func ComputePath(d protoreflect.Descriptor) (protoreflect.SourcePath, bool) {
_, ok := d.(protoreflect.FileDescriptor)
if ok {
return nil, true
}
var path protoreflect.SourcePath
for {
p := d.Parent()
switch d := d.(type) {
case protoreflect.FileDescriptor:
return reverse(path), true
case protoreflect.MessageDescriptor:
path = append(path, int32(d.Index()))
switch p.(type) {
case protoreflect.FileDescriptor:
path = append(path, FileMessagesTag)
case protoreflect.MessageDescriptor:
path = append(path, MessageNestedMessagesTag)
default:
return nil, false
}
case protoreflect.FieldDescriptor:
path = append(path, int32(d.Index()))
switch p.(type) {
case protoreflect.FileDescriptor:
if d.IsExtension() {
path = append(path, FileExtensionsTag)
} else {
return nil, false
}
case protoreflect.MessageDescriptor:
if d.IsExtension() {
path = append(path, MessageExtensionsTag)
} else {
path = append(path, MessageFieldsTag)
}
default:
return nil, false
}
case protoreflect.OneofDescriptor:
path = append(path, int32(d.Index()))
if _, ok := p.(protoreflect.MessageDescriptor); ok {
path = append(path, MessageOneofsTag)
} else {
return nil, false
}
case protoreflect.EnumDescriptor:
path = append(path, int32(d.Index()))
switch p.(type) {
case protoreflect.FileDescriptor:
path = append(path, FileEnumsTag)
case protoreflect.MessageDescriptor:
path = append(path, MessageEnumsTag)
default:
return nil, false
}
case protoreflect.EnumValueDescriptor:
path = append(path, int32(d.Index()))
if _, ok := p.(protoreflect.EnumDescriptor); ok {
path = append(path, EnumValuesTag)
} else {
return nil, false
}
case protoreflect.ServiceDescriptor:
path = append(path, int32(d.Index()))
if _, ok := p.(protoreflect.FileDescriptor); ok {
path = append(path, FileServicesTag)
} else {
return nil, false
}
case protoreflect.MethodDescriptor:
path = append(path, int32(d.Index()))
if _, ok := p.(protoreflect.ServiceDescriptor); ok {
path = append(path, ServiceMethodsTag)
} else {
return nil, false
}
}
d = p
}
}
// CanPack returns true if a repeated field of the given kind
// can use packed encoding.
func CanPack(k protoreflect.Kind) bool {
switch k {
case protoreflect.MessageKind, protoreflect.GroupKind, protoreflect.StringKind, protoreflect.BytesKind:
return false
default:
return true
}
}
func ClonePath(path protoreflect.SourcePath) protoreflect.SourcePath {
clone := make(protoreflect.SourcePath, len(path))
copy(clone, path)
return clone
}
func reverse(p protoreflect.SourcePath) protoreflect.SourcePath {
for i, j := 0, len(p)-1; i < j; i, j = i+1, j-1 {
p[i], p[j] = p[j], p[i]
}
return p
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linker
import (
"fmt"
"slices"
"strconv"
"strings"
"unicode/utf8"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/dynamicpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
"github.com/bufbuild/protocompile/internal/editions"
"github.com/bufbuild/protocompile/internal/ext/unsafex"
"github.com/bufbuild/protocompile/parser"
"github.com/bufbuild/protocompile/protoutil"
)
var (
// These "noOp*" values are all descriptors. The protoreflect.Descriptor
// interface and its sub-interfaces are all marked with an unexported
// method so that they cannot be implemented outside of the google.golang.org/protobuf
// module. So, to provide implementations from this package, we must embed
// them. If we simply left the embedded interface field nil, then if/when
// new methods are added to the interfaces, it could induce panics in this
// package or users of this module (since trying to invoke one of these new
// methods would end up trying to call a method on a nil interface value).
//
// So instead of leaving the embedded interface fields nil, we embed an actual
// value. While new methods are unlikely to return the correct value (since
// the calls will be delegated to these no-op instances), it is a less
// dangerous latent bug than inducing a nil-dereference panic.
noOpFile protoreflect.FileDescriptor
noOpMessage protoreflect.MessageDescriptor
noOpOneof protoreflect.OneofDescriptor
noOpField protoreflect.FieldDescriptor
noOpEnum protoreflect.EnumDescriptor
noOpEnumValue protoreflect.EnumValueDescriptor
noOpExtension protoreflect.ExtensionDescriptor
noOpService protoreflect.ServiceDescriptor
noOpMethod protoreflect.MethodDescriptor
)
var (
fieldPresenceField = editions.FeatureSetDescriptor.Fields().ByName("field_presence")
repeatedFieldEncodingField = editions.FeatureSetDescriptor.Fields().ByName("repeated_field_encoding")
messageEncodingField = editions.FeatureSetDescriptor.Fields().ByName("message_encoding")
enumTypeField = editions.FeatureSetDescriptor.Fields().ByName("enum_type")
jsonFormatField = editions.FeatureSetDescriptor.Fields().ByName("json_format")
)
func init() {
noOpFile, _ = protodesc.NewFile(
&descriptorpb.FileDescriptorProto{
Name: proto.String("no-op.proto"),
Syntax: proto.String("proto2"),
Dependency: []string{"google/protobuf/descriptor.proto"},
MessageType: []*descriptorpb.DescriptorProto{
{
Name: proto.String("NoOpMsg"),
Field: []*descriptorpb.FieldDescriptorProto{
{
Name: proto.String("no_op"),
Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(),
Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum(),
Number: proto.Int32(1),
JsonName: proto.String("noOp"),
OneofIndex: proto.Int32(0),
},
},
OneofDecl: []*descriptorpb.OneofDescriptorProto{
{
Name: proto.String("no_op_oneof"),
},
},
},
},
EnumType: []*descriptorpb.EnumDescriptorProto{
{
Name: proto.String("NoOpEnum"),
Value: []*descriptorpb.EnumValueDescriptorProto{
{
Name: proto.String("NO_OP"),
Number: proto.Int32(0),
},
},
},
},
Extension: []*descriptorpb.FieldDescriptorProto{
{
Extendee: proto.String(".google.protobuf.FileOptions"),
Name: proto.String("no_op"),
Type: descriptorpb.FieldDescriptorProto_TYPE_STRING.Enum(),
Label: descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum(),
Number: proto.Int32(50000),
},
},
Service: []*descriptorpb.ServiceDescriptorProto{
{
Name: proto.String("NoOpService"),
Method: []*descriptorpb.MethodDescriptorProto{
{
Name: proto.String("NoOp"),
InputType: proto.String(".NoOpMsg"),
OutputType: proto.String(".NoOpMsg"),
},
},
},
},
},
protoregistry.GlobalFiles,
)
noOpMessage = noOpFile.Messages().Get(0)
noOpOneof = noOpMessage.Oneofs().Get(0)
noOpField = noOpMessage.Fields().Get(0)
noOpEnum = noOpFile.Enums().Get(0)
noOpEnumValue = noOpEnum.Values().Get(0)
noOpExtension = noOpFile.Extensions().Get(0)
noOpService = noOpFile.Services().Get(0)
noOpMethod = noOpService.Methods().Get(0)
}
// This file contains implementations of protoreflect.Descriptor. Note that
// this is a hack since those interfaces have a "doNotImplement" tag
// interface therein. We do just enough to make dynamicpb happy; constructing
// a regular descriptor would fail because we haven't yet interpreted options
// at the point we need these, and some validations will fail if the options
// aren't present.
type result struct {
protoreflect.FileDescriptor
parser.Result
prefix string
deps Files
// A map of all descriptors keyed by their fully-qualified name (without
// any leading dot).
descriptors map[string]protoreflect.Descriptor
// A set of imports that have been used in the course of linking and
// interpreting options.
usedImports map[string]struct{}
// A map of AST nodes that represent identifiers in ast.FieldReferenceNodes
// to their fully-qualified name. The identifiers are for field names in
// message literals (in option values) that are extension fields. These names
// are resolved during linking and stored here, to be used to interpret options.
optionQualifiedNames map[ast.IdentValueNode]string
imports fileImports
messages msgDescriptors
enums enumDescriptors
extensions extDescriptors
services svcDescriptors
srcLocations srcLocs
}
var _ protoreflect.FileDescriptor = (*result)(nil)
var _ Result = (*result)(nil)
var _ protoutil.DescriptorProtoWrapper = (*result)(nil)
var _ editions.HasEdition = (*result)(nil)
func (r *result) RemoveAST() {
r.Result = parser.ResultWithoutAST(r.FileDescriptorProto())
r.optionQualifiedNames = nil
}
func (r *result) AsProto() proto.Message {
return r.FileDescriptorProto()
}
func (r *result) ParentFile() protoreflect.FileDescriptor {
return r
}
func (r *result) Parent() protoreflect.Descriptor {
return nil
}
func (r *result) Index() int {
return 0
}
func (r *result) Syntax() protoreflect.Syntax {
switch r.FileDescriptorProto().GetSyntax() {
case "proto2", "":
return protoreflect.Proto2
case "proto3":
return protoreflect.Proto3
case "editions":
return protoreflect.Editions
default:
return 0 // ???
}
}
func (r *result) Edition() int32 {
switch r.Syntax() {
case protoreflect.Proto2:
return int32(descriptorpb.Edition_EDITION_PROTO2)
case protoreflect.Proto3:
return int32(descriptorpb.Edition_EDITION_PROTO3)
case protoreflect.Editions:
return int32(r.FileDescriptorProto().GetEdition())
default:
return int32(descriptorpb.Edition_EDITION_UNKNOWN) // ???
}
}
func (r *result) Name() protoreflect.Name {
return ""
}
func (r *result) FullName() protoreflect.FullName {
return r.Package()
}
func (r *result) IsPlaceholder() bool {
return false
}
func (r *result) Options() protoreflect.ProtoMessage {
return r.FileDescriptorProto().Options
}
func (r *result) Path() string {
return r.FileDescriptorProto().GetName()
}
func (r *result) Package() protoreflect.FullName {
return protoreflect.FullName(r.FileDescriptorProto().GetPackage())
}
func (r *result) Imports() protoreflect.FileImports {
return &r.imports
}
func (r *result) Enums() protoreflect.EnumDescriptors {
return &r.enums
}
func (r *result) Messages() protoreflect.MessageDescriptors {
return &r.messages
}
func (r *result) Extensions() protoreflect.ExtensionDescriptors {
return &r.extensions
}
func (r *result) Services() protoreflect.ServiceDescriptors {
return &r.services
}
func (r *result) PopulateSourceCodeInfo() {
srcLocProtos, srcLocIndex := asSourceLocations(r.FileDescriptorProto().GetSourceCodeInfo().GetLocation())
r.srcLocations = srcLocs{file: r, locs: srcLocProtos, index: srcLocIndex}
}
func (r *result) SourceLocations() protoreflect.SourceLocations {
return &r.srcLocations
}
func asSourceLocations(srcInfoProtos []*descriptorpb.SourceCodeInfo_Location) ([]protoreflect.SourceLocation, map[sourcePathKey]int) {
locs := make([]protoreflect.SourceLocation, len(srcInfoProtos))
index := make(map[sourcePathKey]int, len(srcInfoProtos))
prev := make(map[sourcePathKey]*protoreflect.SourceLocation, len(srcInfoProtos))
for i, loc := range srcInfoProtos {
var stLin, stCol, enLin, enCol int
if len(loc.Span) == 3 {
stLin, stCol, enCol = int(loc.Span[0]), int(loc.Span[1]), int(loc.Span[2])
enLin = stLin
} else {
stLin, stCol, enLin, enCol = int(loc.Span[0]), int(loc.Span[1]), int(loc.Span[2]), int(loc.Span[3])
}
locs[i] = protoreflect.SourceLocation{
Path: loc.Path,
LeadingComments: loc.GetLeadingComments(),
LeadingDetachedComments: loc.GetLeadingDetachedComments(),
TrailingComments: loc.GetTrailingComments(),
StartLine: stLin,
StartColumn: stCol,
EndLine: enLin,
EndColumn: enCol,
}
str := pathKey(loc.Path)
pr := prev[str]
if pr == nil {
index[str] = i
} else {
pr.Next = i
}
prev[str] = &locs[i]
}
return locs, index
}
type fileImports struct {
protoreflect.FileImports
files []protoreflect.FileImport
}
func (r *result) createImports() fileImports {
fd := r.FileDescriptorProto()
imps := make([]protoreflect.FileImport, len(fd.Dependency))
for i, dep := range fd.Dependency {
desc := r.deps.FindFileByPath(dep)
imps[i] = protoreflect.FileImport{FileDescriptor: unwrap(desc)}
}
for _, publicIndex := range fd.PublicDependency {
imps[int(publicIndex)].IsPublic = true
}
for _, weakIndex := range fd.WeakDependency {
//nolint:staticcheck // yes, is_weak is deprecated; but we still have to set it to compile the file
imps[int(weakIndex)].IsWeak = true
}
return fileImports{files: imps}
}
func unwrap(descriptor protoreflect.FileDescriptor) protoreflect.FileDescriptor {
wrapped, ok := descriptor.(interface {
Unwrap() protoreflect.FileDescriptor
})
if !ok {
return descriptor
}
unwrapped := wrapped.Unwrap()
if unwrapped == nil {
return descriptor // shouldn't ever happen
}
return unwrapped
}
func (f *fileImports) Len() int {
return len(f.files)
}
func (f *fileImports) Get(i int) protoreflect.FileImport {
return f.files[i]
}
type srcLocs struct {
protoreflect.SourceLocations
file *result
locs []protoreflect.SourceLocation
index map[sourcePathKey]int
}
func (s *srcLocs) Len() int {
return len(s.locs)
}
func (s *srcLocs) Get(i int) protoreflect.SourceLocation {
return s.locs[i]
}
func (s *srcLocs) ByPath(p protoreflect.SourcePath) protoreflect.SourceLocation {
index, ok := s.index[pathKeyNoCopy(p)]
if !ok {
return protoreflect.SourceLocation{}
}
return s.locs[index]
}
func (s *srcLocs) ByDescriptor(d protoreflect.Descriptor) protoreflect.SourceLocation {
if d.ParentFile() != s.file {
return protoreflect.SourceLocation{}
}
path, ok := internal.ComputePath(d)
if !ok {
return protoreflect.SourceLocation{}
}
return s.ByPath(path)
}
type msgDescriptors struct {
protoreflect.MessageDescriptors
msgs []msgDescriptor
}
func (r *result) createMessages(prefix string, parent protoreflect.Descriptor, msgProtos []*descriptorpb.DescriptorProto, pool *allocPool) msgDescriptors {
msgs := pool.getMessages(len(msgProtos))
for i, msgProto := range msgProtos {
r.createMessageDescriptor(&msgs[i], msgProto, parent, i, prefix+msgProto.GetName(), pool)
}
return msgDescriptors{msgs: msgs}
}
func (m *msgDescriptors) Len() int {
return len(m.msgs)
}
func (m *msgDescriptors) Get(i int) protoreflect.MessageDescriptor {
return &m.msgs[i]
}
func (m *msgDescriptors) ByName(s protoreflect.Name) protoreflect.MessageDescriptor {
for i := range m.msgs {
msg := &m.msgs[i]
if msg.Name() == s {
return msg
}
}
return nil
}
type msgDescriptor struct {
protoreflect.MessageDescriptor
file *result
parent protoreflect.Descriptor
index int
proto *descriptorpb.DescriptorProto
fqn string
fields fldDescriptors
oneofs oneofDescriptors
nestedMessages msgDescriptors
nestedEnums enumDescriptors
nestedExtensions extDescriptors
extRanges fieldRanges
rsvdRanges fieldRanges
rsvdNames names
}
var _ protoreflect.MessageDescriptor = (*msgDescriptor)(nil)
var _ protoutil.DescriptorProtoWrapper = (*msgDescriptor)(nil)
func (r *result) createMessageDescriptor(ret *msgDescriptor, md *descriptorpb.DescriptorProto, parent protoreflect.Descriptor, index int, fqn string, pool *allocPool) {
r.descriptors[fqn] = ret
ret.MessageDescriptor = noOpMessage
ret.file = r
ret.parent = parent
ret.index = index
ret.proto = md
ret.fqn = fqn
prefix := fqn + "."
// NB: We MUST create fields before oneofs so that we can populate the
// set of fields that belong to the oneof
ret.fields = r.createFields(prefix, ret, md.Field, pool)
ret.oneofs = r.createOneofs(prefix, ret, md.OneofDecl, pool)
ret.nestedMessages = r.createMessages(prefix, ret, md.NestedType, pool)
ret.nestedEnums = r.createEnums(prefix, ret, md.EnumType, pool)
ret.nestedExtensions = r.createExtensions(prefix, ret, md.Extension, pool)
ret.extRanges = createFieldRanges(md.ExtensionRange)
ret.rsvdRanges = createFieldRanges(md.ReservedRange)
ret.rsvdNames = names{s: md.ReservedName}
}
func (m *msgDescriptor) MessageDescriptorProto() *descriptorpb.DescriptorProto {
return m.proto
}
func (m *msgDescriptor) AsProto() proto.Message {
return m.proto
}
func (m *msgDescriptor) ParentFile() protoreflect.FileDescriptor {
return m.file
}
func (m *msgDescriptor) Parent() protoreflect.Descriptor {
return m.parent
}
func (m *msgDescriptor) Index() int {
return m.index
}
func (m *msgDescriptor) Syntax() protoreflect.Syntax {
return m.file.Syntax()
}
func (m *msgDescriptor) Name() protoreflect.Name {
return protoreflect.Name(m.proto.GetName())
}
func (m *msgDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(m.fqn)
}
func (m *msgDescriptor) IsPlaceholder() bool {
return false
}
func (m *msgDescriptor) Options() protoreflect.ProtoMessage {
return m.proto.Options
}
func (m *msgDescriptor) IsMapEntry() bool {
return m.proto.Options.GetMapEntry()
}
func (m *msgDescriptor) Fields() protoreflect.FieldDescriptors {
return &m.fields
}
func (m *msgDescriptor) Oneofs() protoreflect.OneofDescriptors {
return &m.oneofs
}
func (m *msgDescriptor) ReservedNames() protoreflect.Names {
return m.rsvdNames
}
func (m *msgDescriptor) ReservedRanges() protoreflect.FieldRanges {
return m.rsvdRanges
}
func (m *msgDescriptor) RequiredNumbers() protoreflect.FieldNumbers {
var indexes fieldNums
for _, fld := range m.proto.Field {
if fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED {
indexes.s = append(indexes.s, fld.GetNumber())
}
}
return indexes
}
func (m *msgDescriptor) ExtensionRanges() protoreflect.FieldRanges {
return m.extRanges
}
func (m *msgDescriptor) ExtensionRangeOptions(i int) protoreflect.ProtoMessage {
return m.proto.ExtensionRange[i].Options
}
func (m *msgDescriptor) Enums() protoreflect.EnumDescriptors {
return &m.nestedEnums
}
func (m *msgDescriptor) Messages() protoreflect.MessageDescriptors {
return &m.nestedMessages
}
func (m *msgDescriptor) Extensions() protoreflect.ExtensionDescriptors {
return &m.nestedExtensions
}
type names struct {
protoreflect.Names
s []string
}
func (n names) Len() int {
return len(n.s)
}
func (n names) Get(i int) protoreflect.Name {
return protoreflect.Name(n.s[i])
}
func (n names) Has(s protoreflect.Name) bool {
for _, name := range n.s {
if name == string(s) {
return true
}
}
return false
}
type fieldNums struct {
protoreflect.FieldNumbers
s []int32
}
func (n fieldNums) Len() int {
return len(n.s)
}
func (n fieldNums) Get(i int) protoreflect.FieldNumber {
return protoreflect.FieldNumber(n.s[i])
}
func (n fieldNums) Has(s protoreflect.FieldNumber) bool {
for _, num := range n.s {
if num == int32(s) {
return true
}
}
return false
}
type fieldRanges struct {
protoreflect.FieldRanges
ranges [][2]protoreflect.FieldNumber
}
type fieldRange interface {
GetStart() int32
GetEnd() int32
}
func createFieldRanges[T fieldRange](rangeProtos []T) fieldRanges {
ranges := make([][2]protoreflect.FieldNumber, len(rangeProtos))
for i, r := range rangeProtos {
ranges[i] = [2]protoreflect.FieldNumber{
protoreflect.FieldNumber(r.GetStart()),
protoreflect.FieldNumber(r.GetEnd()),
}
}
return fieldRanges{ranges: ranges}
}
func (f fieldRanges) Len() int {
return len(f.ranges)
}
func (f fieldRanges) Get(i int) [2]protoreflect.FieldNumber {
return f.ranges[i]
}
func (f fieldRanges) Has(n protoreflect.FieldNumber) bool {
for _, r := range f.ranges {
if r[0] <= n && r[1] > n {
return true
}
}
return false
}
type enumDescriptors struct {
protoreflect.EnumDescriptors
enums []enumDescriptor
}
func (r *result) createEnums(prefix string, parent protoreflect.Descriptor, enumProtos []*descriptorpb.EnumDescriptorProto, pool *allocPool) enumDescriptors {
enums := pool.getEnums(len(enumProtos))
for i, enumProto := range enumProtos {
r.createEnumDescriptor(&enums[i], enumProto, parent, i, prefix+enumProto.GetName(), pool)
}
return enumDescriptors{enums: enums}
}
func (e *enumDescriptors) Len() int {
return len(e.enums)
}
func (e *enumDescriptors) Get(i int) protoreflect.EnumDescriptor {
return &e.enums[i]
}
func (e *enumDescriptors) ByName(s protoreflect.Name) protoreflect.EnumDescriptor {
for i := range e.enums {
enum := &e.enums[i]
if enum.Name() == s {
return enum
}
}
return nil
}
type enumDescriptor struct {
protoreflect.EnumDescriptor
file *result
parent protoreflect.Descriptor
index int
proto *descriptorpb.EnumDescriptorProto
fqn string
values enValDescriptors
rsvdRanges enumRanges
rsvdNames names
}
var _ protoreflect.EnumDescriptor = (*enumDescriptor)(nil)
var _ protoutil.DescriptorProtoWrapper = (*enumDescriptor)(nil)
func (r *result) createEnumDescriptor(ret *enumDescriptor, ed *descriptorpb.EnumDescriptorProto, parent protoreflect.Descriptor, index int, fqn string, pool *allocPool) {
r.descriptors[fqn] = ret
ret.EnumDescriptor = noOpEnum
ret.file = r
ret.parent = parent
ret.index = index
ret.proto = ed
ret.fqn = fqn
// Unlike all other elements, the fully-qualified names of enum values
// are NOT scoped to their parent element (the enum), but rather to
// the enum's parent element. This follows C++ scoping rules for
// enum values.
prefix := strings.TrimSuffix(fqn, ed.GetName())
ret.values = r.createEnumValues(prefix, ret, ed.Value, pool)
ret.rsvdRanges = createEnumRanges(ed.ReservedRange)
ret.rsvdNames = names{s: ed.ReservedName}
}
func (e *enumDescriptor) EnumDescriptorProto() *descriptorpb.EnumDescriptorProto {
return e.proto
}
func (e *enumDescriptor) AsProto() proto.Message {
return e.proto
}
func (e *enumDescriptor) ParentFile() protoreflect.FileDescriptor {
return e.file
}
func (e *enumDescriptor) Parent() protoreflect.Descriptor {
return e.parent
}
func (e *enumDescriptor) Index() int {
return e.index
}
func (e *enumDescriptor) Syntax() protoreflect.Syntax {
return e.file.Syntax()
}
func (e *enumDescriptor) Name() protoreflect.Name {
return protoreflect.Name(e.proto.GetName())
}
func (e *enumDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(e.fqn)
}
func (e *enumDescriptor) IsPlaceholder() bool {
return false
}
func (e *enumDescriptor) Options() protoreflect.ProtoMessage {
return e.proto.Options
}
func (e *enumDescriptor) Values() protoreflect.EnumValueDescriptors {
return &e.values
}
func (e *enumDescriptor) ReservedNames() protoreflect.Names {
return e.rsvdNames
}
func (e *enumDescriptor) ReservedRanges() protoreflect.EnumRanges {
return e.rsvdRanges
}
func (e *enumDescriptor) IsClosed() bool {
enumType := resolveFeature(e, enumTypeField)
return descriptorpb.FeatureSet_EnumType(enumType.Enum()) == descriptorpb.FeatureSet_CLOSED
}
type enumRanges struct {
protoreflect.EnumRanges
ranges [][2]protoreflect.EnumNumber
}
func createEnumRanges(rangeProtos []*descriptorpb.EnumDescriptorProto_EnumReservedRange) enumRanges {
ranges := make([][2]protoreflect.EnumNumber, len(rangeProtos))
for i, r := range rangeProtos {
ranges[i] = [2]protoreflect.EnumNumber{
protoreflect.EnumNumber(r.GetStart()),
protoreflect.EnumNumber(r.GetEnd()),
}
}
return enumRanges{ranges: ranges}
}
func (e enumRanges) Len() int {
return len(e.ranges)
}
func (e enumRanges) Get(i int) [2]protoreflect.EnumNumber {
return e.ranges[i]
}
func (e enumRanges) Has(n protoreflect.EnumNumber) bool {
for _, r := range e.ranges {
if r[0] <= n && r[1] >= n {
return true
}
}
return false
}
type enValDescriptors struct {
protoreflect.EnumValueDescriptors
vals []enValDescriptor
}
func (r *result) createEnumValues(prefix string, parent *enumDescriptor, enValProtos []*descriptorpb.EnumValueDescriptorProto, pool *allocPool) enValDescriptors {
vals := pool.getEnumValues(len(enValProtos))
for i, enValProto := range enValProtos {
r.createEnumValueDescriptor(&vals[i], enValProto, parent, i, prefix+enValProto.GetName())
}
return enValDescriptors{vals: vals}
}
func (e *enValDescriptors) Len() int {
return len(e.vals)
}
func (e *enValDescriptors) Get(i int) protoreflect.EnumValueDescriptor {
return &e.vals[i]
}
func (e *enValDescriptors) ByName(s protoreflect.Name) protoreflect.EnumValueDescriptor {
for i := range e.vals {
val := &e.vals[i]
if val.Name() == s {
return val
}
}
return nil
}
func (e *enValDescriptors) ByNumber(n protoreflect.EnumNumber) protoreflect.EnumValueDescriptor {
for i := range e.vals {
val := &e.vals[i]
if val.Number() == n {
return val
}
}
return nil
}
type enValDescriptor struct {
protoreflect.EnumValueDescriptor
file *result
parent *enumDescriptor
index int
proto *descriptorpb.EnumValueDescriptorProto
fqn string
}
var _ protoreflect.EnumValueDescriptor = (*enValDescriptor)(nil)
var _ protoutil.DescriptorProtoWrapper = (*enValDescriptor)(nil)
func (r *result) createEnumValueDescriptor(ret *enValDescriptor, ed *descriptorpb.EnumValueDescriptorProto, parent *enumDescriptor, index int, fqn string) {
r.descriptors[fqn] = ret
ret.EnumValueDescriptor = noOpEnumValue
ret.file = r
ret.parent = parent
ret.index = index
ret.proto = ed
ret.fqn = fqn
}
func (e *enValDescriptor) EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto {
return e.proto
}
func (e *enValDescriptor) AsProto() proto.Message {
return e.proto
}
func (e *enValDescriptor) ParentFile() protoreflect.FileDescriptor {
return e.file
}
func (e *enValDescriptor) Parent() protoreflect.Descriptor {
return e.parent
}
func (e *enValDescriptor) Index() int {
return e.index
}
func (e *enValDescriptor) Syntax() protoreflect.Syntax {
return e.file.Syntax()
}
func (e *enValDescriptor) Name() protoreflect.Name {
return protoreflect.Name(e.proto.GetName())
}
func (e *enValDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(e.fqn)
}
func (e *enValDescriptor) IsPlaceholder() bool {
return false
}
func (e *enValDescriptor) Options() protoreflect.ProtoMessage {
return e.proto.Options
}
func (e *enValDescriptor) Number() protoreflect.EnumNumber {
return protoreflect.EnumNumber(e.proto.GetNumber())
}
type extDescriptors struct {
protoreflect.ExtensionDescriptors
exts []extTypeDescriptor
}
func (r *result) createExtensions(prefix string, parent protoreflect.Descriptor, extProtos []*descriptorpb.FieldDescriptorProto, pool *allocPool) extDescriptors {
exts := pool.getExtensions(len(extProtos))
for i, extProto := range extProtos {
r.createExtTypeDescriptor(&exts[i], extProto, parent, i, prefix+extProto.GetName())
}
return extDescriptors{exts: exts}
}
func (e *extDescriptors) Len() int {
return len(e.exts)
}
func (e *extDescriptors) Get(i int) protoreflect.ExtensionDescriptor {
return &e.exts[i]
}
func (e *extDescriptors) ByName(s protoreflect.Name) protoreflect.ExtensionDescriptor {
for i := range e.exts {
ext := &e.exts[i]
if ext.Name() == s {
return ext
}
}
return nil
}
type extTypeDescriptor struct {
protoreflect.ExtensionTypeDescriptor
field fldDescriptor
}
var _ protoutil.DescriptorProtoWrapper = &extTypeDescriptor{}
func (r *result) createExtTypeDescriptor(ret *extTypeDescriptor, fd *descriptorpb.FieldDescriptorProto, parent protoreflect.Descriptor, index int, fqn string) {
r.descriptors[fqn] = ret
ret.field = fldDescriptor{FieldDescriptor: noOpExtension, file: r, parent: parent, index: index, proto: fd, fqn: fqn}
ret.ExtensionTypeDescriptor = dynamicpb.NewExtensionType(&ret.field).TypeDescriptor()
}
func (e *extTypeDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto {
return e.field.proto
}
func (e *extTypeDescriptor) AsProto() proto.Message {
return e.field.proto
}
type fldDescriptors struct {
protoreflect.FieldDescriptors
// We use pointers here, instead of flattened slice, because oneofs
// also have fields, but need to point to values in the parent
// message's fields. Even though they are pointers, in the containing
// message, we always allocate a flattened slice and then point into
// that, so we're still doing fewer allocations (2 per set of fields
// instead of 1 per each field).
fields []*fldDescriptor
}
func (r *result) createFields(prefix string, parent *msgDescriptor, fldProtos []*descriptorpb.FieldDescriptorProto, pool *allocPool) fldDescriptors {
fields := pool.getFields(len(fldProtos))
fieldPtrs := make([]*fldDescriptor, len(fldProtos))
for i, fldProto := range fldProtos {
r.createFieldDescriptor(&fields[i], fldProto, parent, i, prefix+fldProto.GetName())
fieldPtrs[i] = &fields[i]
}
return fldDescriptors{fields: fieldPtrs}
}
func (f *fldDescriptors) Len() int {
return len(f.fields)
}
func (f *fldDescriptors) Get(i int) protoreflect.FieldDescriptor {
return f.fields[i]
}
func (f *fldDescriptors) ByName(s protoreflect.Name) protoreflect.FieldDescriptor {
for _, fld := range f.fields {
if fld.Name() == s {
return fld
}
}
return nil
}
func (f *fldDescriptors) ByJSONName(s string) protoreflect.FieldDescriptor {
for _, fld := range f.fields {
if fld.JSONName() == s {
return fld
}
}
return nil
}
func (f *fldDescriptors) ByTextName(s string) protoreflect.FieldDescriptor {
fld := f.ByName(protoreflect.Name(s))
if fld != nil {
return fld
}
// Groups use type name instead, so we fallback to slow search
for _, fld := range f.fields {
if fld.TextName() == s {
return fld
}
}
return nil
}
func (f *fldDescriptors) ByNumber(n protoreflect.FieldNumber) protoreflect.FieldDescriptor {
for _, fld := range f.fields {
if fld.Number() == n {
return fld
}
}
return nil
}
type fldDescriptor struct {
protoreflect.FieldDescriptor
file *result
parent protoreflect.Descriptor
index int
proto *descriptorpb.FieldDescriptorProto
fqn string
msgType protoreflect.MessageDescriptor
extendee protoreflect.MessageDescriptor
enumType protoreflect.EnumDescriptor
oneof protoreflect.OneofDescriptor
}
var _ protoreflect.FieldDescriptor = (*fldDescriptor)(nil)
var _ protoutil.DescriptorProtoWrapper = (*fldDescriptor)(nil)
func (r *result) createFieldDescriptor(ret *fldDescriptor, fd *descriptorpb.FieldDescriptorProto, parent *msgDescriptor, index int, fqn string) {
r.descriptors[fqn] = ret
ret.FieldDescriptor = noOpField
ret.file = r
ret.parent = parent
ret.index = index
ret.proto = fd
ret.fqn = fqn
}
func (f *fldDescriptor) FieldDescriptorProto() *descriptorpb.FieldDescriptorProto {
return f.proto
}
func (f *fldDescriptor) AsProto() proto.Message {
return f.proto
}
func (f *fldDescriptor) ParentFile() protoreflect.FileDescriptor {
return f.file
}
func (f *fldDescriptor) Parent() protoreflect.Descriptor {
return f.parent
}
func (f *fldDescriptor) Index() int {
return f.index
}
func (f *fldDescriptor) Syntax() protoreflect.Syntax {
return f.file.Syntax()
}
func (f *fldDescriptor) Name() protoreflect.Name {
return protoreflect.Name(f.proto.GetName())
}
func (f *fldDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(f.fqn)
}
func (f *fldDescriptor) IsPlaceholder() bool {
return false
}
func (f *fldDescriptor) Options() protoreflect.ProtoMessage {
return f.proto.Options
}
func (f *fldDescriptor) Number() protoreflect.FieldNumber {
return protoreflect.FieldNumber(f.proto.GetNumber())
}
func (f *fldDescriptor) Cardinality() protoreflect.Cardinality {
switch f.proto.GetLabel() {
case descriptorpb.FieldDescriptorProto_LABEL_REPEATED:
return protoreflect.Repeated
case descriptorpb.FieldDescriptorProto_LABEL_REQUIRED:
return protoreflect.Required
case descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL:
if f.Syntax() == protoreflect.Editions {
// Editions does not use label to indicate required. It instead
// uses a feature, and label is always optional.
fieldPresence := descriptorpb.FeatureSet_FieldPresence(resolveFeature(f, fieldPresenceField).Enum())
if fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED {
return protoreflect.Required
}
}
return protoreflect.Optional
default:
return 0
}
}
func (f *fldDescriptor) Kind() protoreflect.Kind {
if f.proto.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && f.Syntax() == protoreflect.Editions &&
!f.IsMap() && !f.parentIsMap() {
// In editions, "group encoding" (aka "delimited encoding") is toggled
// via a feature. So we report group kind when that feature is enabled.
messageEncoding := resolveFeature(f, messageEncodingField)
if descriptorpb.FeatureSet_MessageEncoding(messageEncoding.Enum()) == descriptorpb.FeatureSet_DELIMITED {
return protoreflect.GroupKind
}
}
return protoreflect.Kind(f.proto.GetType())
}
func (f *fldDescriptor) HasJSONName() bool {
return f.proto.JsonName != nil
}
func (f *fldDescriptor) JSONName() string {
if f.IsExtension() {
return f.TextName()
}
return f.proto.GetJsonName()
}
func (f *fldDescriptor) TextName() string {
if f.IsExtension() {
return fmt.Sprintf("[%s]", f.FullName())
}
if f.looksLikeGroup() {
// groups use the type name
return string(protoreflect.FullName(f.proto.GetTypeName()).Name())
}
return string(f.Name())
}
func (f *fldDescriptor) looksLikeGroup() bool {
// It looks like a group if it uses group/delimited encoding (checked via f.Kind)
// and the message type is a sibling whose name is a mixed-case version of the field name.
return f.Kind() == protoreflect.GroupKind &&
f.Message().FullName().Parent() == f.FullName().Parent() &&
string(f.Name()) == strings.ToLower(string(f.Message().Name()))
}
func (f *fldDescriptor) HasPresence() bool {
if f.proto.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
return false
}
if f.IsExtension() ||
f.Kind() == protoreflect.MessageKind || f.Kind() == protoreflect.GroupKind ||
f.proto.OneofIndex != nil {
return true
}
fieldPresence := descriptorpb.FeatureSet_FieldPresence(resolveFeature(f, fieldPresenceField).Enum())
return fieldPresence == descriptorpb.FeatureSet_EXPLICIT || fieldPresence == descriptorpb.FeatureSet_LEGACY_REQUIRED
}
func (f *fldDescriptor) IsExtension() bool {
return f.proto.GetExtendee() != ""
}
func (f *fldDescriptor) HasOptionalKeyword() bool {
if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL {
return false
}
if f.proto.GetProto3Optional() {
// NB: This smells weird to return false here. If the proto3_optional field
// is set, it's because the keyword WAS present. However, the Go runtime
// returns false for this case, so we mirror that behavior.
return !f.IsExtension()
}
// If it's optional, but not a proto3 optional, then the keyword is only
// present for proto2 files, for fields that are not part of a oneof.
return f.file.Syntax() == protoreflect.Proto2 && f.proto.OneofIndex == nil
}
func (f *fldDescriptor) IsWeak() bool {
return f.proto.Options.GetWeak() //nolint:staticcheck // yes, is_weak is deprecated; but we still have to query it to implement this interface
}
func (f *fldDescriptor) IsPacked() bool {
if f.Cardinality() != protoreflect.Repeated || !internal.CanPack(f.Kind()) {
return false
}
opts := f.proto.GetOptions()
if opts != nil && opts.Packed != nil {
// packed option is set explicitly
return *opts.Packed
}
fieldEncoding := resolveFeature(f, repeatedFieldEncodingField)
return descriptorpb.FeatureSet_RepeatedFieldEncoding(fieldEncoding.Enum()) == descriptorpb.FeatureSet_PACKED
}
func (f *fldDescriptor) IsList() bool {
if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
return false
}
return !f.isMapEntry()
}
func (f *fldDescriptor) IsMap() bool {
if f.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
return false
}
if f.IsExtension() {
return false
}
return f.isMapEntry()
}
func (f *fldDescriptor) isMapEntry() bool {
if f.proto.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE {
return false
}
return f.Message().IsMapEntry()
}
func (f *fldDescriptor) parentIsMap() bool {
parent, ok := f.parent.(protoreflect.MessageDescriptor)
return ok && parent.IsMapEntry()
}
func (f *fldDescriptor) MapKey() protoreflect.FieldDescriptor {
if !f.IsMap() {
return nil
}
return f.Message().Fields().ByNumber(1)
}
func (f *fldDescriptor) MapValue() protoreflect.FieldDescriptor {
if !f.IsMap() {
return nil
}
return f.Message().Fields().ByNumber(2)
}
func (f *fldDescriptor) HasDefault() bool {
return f.proto.DefaultValue != nil
}
func (f *fldDescriptor) Default() protoreflect.Value {
// We only return a valid value for scalar fields
if f.proto.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED ||
f.Kind() == protoreflect.GroupKind || f.Kind() == protoreflect.MessageKind {
return protoreflect.Value{}
}
if f.proto.DefaultValue != nil {
defVal := f.parseDefaultValue(f.proto.GetDefaultValue())
if defVal.IsValid() {
return defVal
}
// if we cannot parse a valid value, fall back to zero value below
}
// No custom default value, so return the zero value for the type
switch f.Kind() {
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
return protoreflect.ValueOfInt32(0)
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
return protoreflect.ValueOfInt64(0)
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
return protoreflect.ValueOfUint32(0)
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
return protoreflect.ValueOfUint64(0)
case protoreflect.FloatKind:
return protoreflect.ValueOfFloat32(0)
case protoreflect.DoubleKind:
return protoreflect.ValueOfFloat64(0)
case protoreflect.BoolKind:
return protoreflect.ValueOfBool(false)
case protoreflect.BytesKind:
return protoreflect.ValueOfBytes(nil)
case protoreflect.StringKind:
return protoreflect.ValueOfString("")
case protoreflect.EnumKind:
return protoreflect.ValueOfEnum(f.Enum().Values().Get(0).Number())
case protoreflect.GroupKind, protoreflect.MessageKind:
return protoreflect.ValueOfMessage(dynamicpb.NewMessage(f.Message()))
default:
panic(fmt.Sprintf("unknown kind: %v", f.Kind()))
}
}
func (f *fldDescriptor) parseDefaultValue(val string) protoreflect.Value {
switch f.Kind() {
case protoreflect.EnumKind:
vd := f.Enum().Values().ByName(protoreflect.Name(val))
if vd != nil {
return protoreflect.ValueOfEnum(vd.Number())
}
return protoreflect.Value{}
case protoreflect.BoolKind:
switch val {
case "true":
return protoreflect.ValueOfBool(true)
case "false":
return protoreflect.ValueOfBool(false)
default:
return protoreflect.Value{}
}
case protoreflect.BytesKind:
return protoreflect.ValueOfBytes([]byte(unescape(val)))
case protoreflect.StringKind:
return protoreflect.ValueOfString(val)
case protoreflect.FloatKind:
if f, err := strconv.ParseFloat(val, 32); err == nil {
return protoreflect.ValueOfFloat32(float32(f))
}
return protoreflect.Value{}
case protoreflect.DoubleKind:
if f, err := strconv.ParseFloat(val, 64); err == nil {
return protoreflect.ValueOfFloat64(f)
}
return protoreflect.Value{}
case protoreflect.Int32Kind, protoreflect.Sint32Kind, protoreflect.Sfixed32Kind:
if i, err := strconv.ParseInt(val, 10, 32); err == nil {
return protoreflect.ValueOfInt32(int32(i))
}
return protoreflect.Value{}
case protoreflect.Uint32Kind, protoreflect.Fixed32Kind:
if i, err := strconv.ParseUint(val, 10, 32); err == nil {
return protoreflect.ValueOfUint32(uint32(i))
}
return protoreflect.Value{}
case protoreflect.Int64Kind, protoreflect.Sint64Kind, protoreflect.Sfixed64Kind:
if i, err := strconv.ParseInt(val, 10, 64); err == nil {
return protoreflect.ValueOfInt64(i)
}
return protoreflect.Value{}
case protoreflect.Uint64Kind, protoreflect.Fixed64Kind:
if i, err := strconv.ParseUint(val, 10, 64); err == nil {
return protoreflect.ValueOfUint64(i)
}
return protoreflect.Value{}
default:
return protoreflect.Value{}
}
}
func unescape(s string) string {
// protoc encodes default values for 'bytes' fields using C escaping,
// so this function reverses that escaping
out := make([]byte, 0, len(s))
var buf [4]byte
for len(s) > 0 {
if s[0] != '\\' || len(s) < 2 {
// not escape sequence, or too short to be well-formed escape
out = append(out, s[0])
s = s[1:]
continue
}
nextIndex := 2 // by default, skip '\' + escaped character
switch s[1] {
case 'x', 'X':
n := matchPrefix(s[2:], 2, isHex)
if n == 0 {
// bad escape
out = append(out, s[:2]...)
} else {
c, err := strconv.ParseUint(s[2:2+n], 16, 8)
if err != nil {
// shouldn't really happen...
out = append(out, s[:2+n]...)
} else {
out = append(out, byte(c))
}
nextIndex = 2 + n
}
case '0', '1', '2', '3', '4', '5', '6', '7':
n := 1 + matchPrefix(s[2:], 2, isOctal)
c, err := strconv.ParseUint(s[1:1+n], 8, 8)
if err != nil || c > 0xff {
out = append(out, s[:1+n]...)
} else {
out = append(out, byte(c))
}
nextIndex = 1 + n
case 'u':
if len(s) < 6 {
// bad escape
out = append(out, s...)
nextIndex = len(s)
} else {
c, err := strconv.ParseUint(s[2:6], 16, 16)
if err != nil {
// bad escape
out = append(out, s[:6]...)
} else {
w := utf8.EncodeRune(buf[:], rune(c))
out = append(out, buf[:w]...)
}
nextIndex = 6
}
case 'U':
if len(s) < 10 {
// bad escape
out = append(out, s...)
nextIndex = len(s)
} else {
c, err := strconv.ParseUint(s[2:10], 16, 32)
if err != nil || c > 0x10ffff {
// bad escape
out = append(out, s[:10]...)
} else {
w := utf8.EncodeRune(buf[:], rune(c))
out = append(out, buf[:w]...)
}
nextIndex = 10
}
case 'a':
out = append(out, '\a')
case 'b':
out = append(out, '\b')
case 'f':
out = append(out, '\f')
case 'n':
out = append(out, '\n')
case 'r':
out = append(out, '\r')
case 't':
out = append(out, '\t')
case 'v':
out = append(out, '\v')
case '\\', '\'', '"', '?':
out = append(out, s[1])
default:
// invalid escape, just copy it as-is
out = append(out, s[:2]...)
}
s = s[nextIndex:]
}
return string(out)
}
func isOctal(b byte) bool { return b >= '0' && b <= '7' }
func isHex(b byte) bool {
return (b >= '0' && b <= '9') || (b >= 'a' && b <= 'f') || (b >= 'A' && b <= 'F')
}
func matchPrefix(s string, limit int, fn func(byte) bool) int {
l := len(s)
if l > limit {
l = limit
}
i := 0
for ; i < l; i++ {
if !fn(s[i]) {
return i
}
}
return i
}
func (f *fldDescriptor) DefaultEnumValue() protoreflect.EnumValueDescriptor {
ed := f.Enum()
if ed == nil {
return nil
}
if f.proto.DefaultValue != nil {
if val := ed.Values().ByName(protoreflect.Name(f.proto.GetDefaultValue())); val != nil {
return val
}
}
// if no default specified in source, return nil
return nil
}
func (f *fldDescriptor) ContainingOneof() protoreflect.OneofDescriptor {
return f.oneof
}
func (f *fldDescriptor) ContainingMessage() protoreflect.MessageDescriptor {
if f.extendee != nil {
return f.extendee
}
return f.parent.(protoreflect.MessageDescriptor) //nolint:errcheck
}
func (f *fldDescriptor) Enum() protoreflect.EnumDescriptor {
return f.enumType
}
func (f *fldDescriptor) Message() protoreflect.MessageDescriptor {
return f.msgType
}
type oneofDescriptors struct {
protoreflect.OneofDescriptors
oneofs []oneofDescriptor
}
func (r *result) createOneofs(prefix string, parent *msgDescriptor, ooProtos []*descriptorpb.OneofDescriptorProto, pool *allocPool) oneofDescriptors {
oos := pool.getOneofs(len(ooProtos))
for i, fldProto := range ooProtos {
r.createOneofDescriptor(&oos[i], fldProto, parent, i, prefix+fldProto.GetName())
}
return oneofDescriptors{oneofs: oos}
}
func (o *oneofDescriptors) Len() int {
return len(o.oneofs)
}
func (o *oneofDescriptors) Get(i int) protoreflect.OneofDescriptor {
return &o.oneofs[i]
}
func (o *oneofDescriptors) ByName(s protoreflect.Name) protoreflect.OneofDescriptor {
for i := range o.oneofs {
oo := &o.oneofs[i]
if oo.Name() == s {
return oo
}
}
return nil
}
type oneofDescriptor struct {
protoreflect.OneofDescriptor
file *result
parent *msgDescriptor
index int
proto *descriptorpb.OneofDescriptorProto
fqn string
fields fldDescriptors
}
var _ protoreflect.OneofDescriptor = (*oneofDescriptor)(nil)
var _ protoutil.DescriptorProtoWrapper = (*oneofDescriptor)(nil)
func (r *result) createOneofDescriptor(ret *oneofDescriptor, ood *descriptorpb.OneofDescriptorProto, parent *msgDescriptor, index int, fqn string) {
r.descriptors[fqn] = ret
ret.OneofDescriptor = noOpOneof
ret.file = r
ret.parent = parent
ret.index = index
ret.proto = ood
ret.fqn = fqn
var fields []*fldDescriptor
for _, fld := range parent.fields.fields {
if fld.proto.OneofIndex != nil && int(fld.proto.GetOneofIndex()) == index {
fields = append(fields, fld)
}
}
ret.fields = fldDescriptors{fields: fields}
}
func (o *oneofDescriptor) OneofDescriptorProto() *descriptorpb.OneofDescriptorProto {
return o.proto
}
func (o *oneofDescriptor) AsProto() proto.Message {
return o.proto
}
func (o *oneofDescriptor) ParentFile() protoreflect.FileDescriptor {
return o.file
}
func (o *oneofDescriptor) Parent() protoreflect.Descriptor {
return o.parent
}
func (o *oneofDescriptor) Index() int {
return o.index
}
func (o *oneofDescriptor) Syntax() protoreflect.Syntax {
return o.file.Syntax()
}
func (o *oneofDescriptor) Name() protoreflect.Name {
return protoreflect.Name(o.proto.GetName())
}
func (o *oneofDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(o.fqn)
}
func (o *oneofDescriptor) IsPlaceholder() bool {
return false
}
func (o *oneofDescriptor) Options() protoreflect.ProtoMessage {
return o.proto.Options
}
func (o *oneofDescriptor) IsSynthetic() bool {
for _, fld := range o.parent.proto.GetField() {
if fld.OneofIndex != nil && int(fld.GetOneofIndex()) == o.index {
return fld.GetProto3Optional()
}
}
return false // NB: we should never get here
}
func (o *oneofDescriptor) Fields() protoreflect.FieldDescriptors {
return &o.fields
}
type svcDescriptors struct {
protoreflect.ServiceDescriptors
svcs []svcDescriptor
}
func (r *result) createServices(prefix string, svcProtos []*descriptorpb.ServiceDescriptorProto, pool *allocPool) svcDescriptors {
svcs := pool.getServices(len(svcProtos))
for i, svcProto := range svcProtos {
r.createServiceDescriptor(&svcs[i], svcProto, i, prefix+svcProto.GetName(), pool)
}
return svcDescriptors{svcs: svcs}
}
func (s *svcDescriptors) Len() int {
return len(s.svcs)
}
func (s *svcDescriptors) Get(i int) protoreflect.ServiceDescriptor {
return &s.svcs[i]
}
func (s *svcDescriptors) ByName(n protoreflect.Name) protoreflect.ServiceDescriptor {
for i := range s.svcs {
svc := &s.svcs[i]
if svc.Name() == n {
return svc
}
}
return nil
}
type svcDescriptor struct {
protoreflect.ServiceDescriptor
file *result
index int
proto *descriptorpb.ServiceDescriptorProto
fqn string
methods mtdDescriptors
}
var _ protoreflect.ServiceDescriptor = (*svcDescriptor)(nil)
var _ protoutil.DescriptorProtoWrapper = (*svcDescriptor)(nil)
func (r *result) createServiceDescriptor(ret *svcDescriptor, sd *descriptorpb.ServiceDescriptorProto, index int, fqn string, pool *allocPool) {
r.descriptors[fqn] = ret
ret.ServiceDescriptor = noOpService
ret.file = r
ret.index = index
ret.proto = sd
ret.fqn = fqn
prefix := fqn + "."
ret.methods = r.createMethods(prefix, ret, sd.Method, pool)
}
func (s *svcDescriptor) ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto {
return s.proto
}
func (s *svcDescriptor) AsProto() proto.Message {
return s.proto
}
func (s *svcDescriptor) ParentFile() protoreflect.FileDescriptor {
return s.file
}
func (s *svcDescriptor) Parent() protoreflect.Descriptor {
return s.file
}
func (s *svcDescriptor) Index() int {
return s.index
}
func (s *svcDescriptor) Syntax() protoreflect.Syntax {
return s.file.Syntax()
}
func (s *svcDescriptor) Name() protoreflect.Name {
return protoreflect.Name(s.proto.GetName())
}
func (s *svcDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(s.fqn)
}
func (s *svcDescriptor) IsPlaceholder() bool {
return false
}
func (s *svcDescriptor) Options() protoreflect.ProtoMessage {
return s.proto.Options
}
func (s *svcDescriptor) Methods() protoreflect.MethodDescriptors {
return &s.methods
}
type mtdDescriptors struct {
protoreflect.MethodDescriptors
mtds []mtdDescriptor
}
func (r *result) createMethods(prefix string, parent *svcDescriptor, mtdProtos []*descriptorpb.MethodDescriptorProto, pool *allocPool) mtdDescriptors {
mtds := pool.getMethods(len(mtdProtos))
for i, mtdProto := range mtdProtos {
r.createMethodDescriptor(&mtds[i], mtdProto, parent, i, prefix+mtdProto.GetName())
}
return mtdDescriptors{mtds: mtds}
}
func (m *mtdDescriptors) Len() int {
return len(m.mtds)
}
func (m *mtdDescriptors) Get(i int) protoreflect.MethodDescriptor {
return &m.mtds[i]
}
func (m *mtdDescriptors) ByName(n protoreflect.Name) protoreflect.MethodDescriptor {
for i := range m.mtds {
mtd := &m.mtds[i]
if mtd.Name() == n {
return mtd
}
}
return nil
}
type mtdDescriptor struct {
protoreflect.MethodDescriptor
file *result
parent *svcDescriptor
index int
proto *descriptorpb.MethodDescriptorProto
fqn string
inputType, outputType protoreflect.MessageDescriptor
}
var _ protoreflect.MethodDescriptor = (*mtdDescriptor)(nil)
var _ protoutil.DescriptorProtoWrapper = (*mtdDescriptor)(nil)
func (r *result) createMethodDescriptor(ret *mtdDescriptor, mtd *descriptorpb.MethodDescriptorProto, parent *svcDescriptor, index int, fqn string) {
r.descriptors[fqn] = ret
ret.MethodDescriptor = noOpMethod
ret.file = r
ret.parent = parent
ret.index = index
ret.proto = mtd
ret.fqn = fqn
}
func (m *mtdDescriptor) MethodDescriptorProto() *descriptorpb.MethodDescriptorProto {
return m.proto
}
func (m *mtdDescriptor) AsProto() proto.Message {
return m.proto
}
func (m *mtdDescriptor) ParentFile() protoreflect.FileDescriptor {
return m.file
}
func (m *mtdDescriptor) Parent() protoreflect.Descriptor {
return m.parent
}
func (m *mtdDescriptor) Index() int {
return m.index
}
func (m *mtdDescriptor) Syntax() protoreflect.Syntax {
return m.file.Syntax()
}
func (m *mtdDescriptor) Name() protoreflect.Name {
return protoreflect.Name(m.proto.GetName())
}
func (m *mtdDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(m.fqn)
}
func (m *mtdDescriptor) IsPlaceholder() bool {
return false
}
func (m *mtdDescriptor) Options() protoreflect.ProtoMessage {
return m.proto.Options
}
func (m *mtdDescriptor) Input() protoreflect.MessageDescriptor {
return m.inputType
}
func (m *mtdDescriptor) Output() protoreflect.MessageDescriptor {
return m.outputType
}
func (m *mtdDescriptor) IsStreamingClient() bool {
return m.proto.GetClientStreaming()
}
func (m *mtdDescriptor) IsStreamingServer() bool {
return m.proto.GetServerStreaming()
}
func (r *result) FindImportByPath(path string) File {
return r.deps.FindFileByPath(path)
}
func (r *result) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor {
return findExtension(r, msg, tag)
}
func (r *result) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor {
fqn := strings.TrimPrefix(string(name), ".")
return r.descriptors[fqn]
}
func (r *result) hasSource() bool {
n := r.FileNode()
_, ok := n.(*ast.FileNode)
return ok
}
// resolveFeature resolves a feature for the given descriptor. If the given element
// is in a proto2 or proto3 syntax file, this skips resolution and just returns the
// relevant default (since such files are not allowed to override features).
//
// If neither the given element nor any of its ancestors override the given feature,
// the relevant default is returned.
func resolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) protoreflect.Value {
edition := editions.GetEdition(element)
if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 {
// these syntax levels can't specify features, so we can short-circuit the search
// through the descriptor hierarchy for feature overrides
defaults := editions.GetEditionDefaults(edition)
return defaults.ProtoReflect().Get(feature) // returns default value if field is not present
}
val, err := editions.ResolveFeature(element, feature)
if err == nil && val.IsValid() {
return val
}
defaults := editions.GetEditionDefaults(edition)
return defaults.ProtoReflect().Get(feature)
}
func isJSONCompliant(d protoreflect.Descriptor) bool {
jsonFormat := resolveFeature(d, jsonFormatField)
return descriptorpb.FeatureSet_JsonFormat(jsonFormat.Enum()) == descriptorpb.FeatureSet_ALLOW
}
type sourcePathKey string
func pathKey(p protoreflect.SourcePath) sourcePathKey {
return pathKeyNoCopy(slices.Clone(p))
}
func pathKeyNoCopy(p protoreflect.SourcePath) sourcePathKey {
return sourcePathKey(unsafex.StringAlias(p))
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linker
import (
"fmt"
"strings"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/dynamicpb"
"github.com/bufbuild/protocompile/walk"
)
// File is like a super-powered protoreflect.FileDescriptor. It includes helpful
// methods for looking up elements in the descriptor and can be used to create a
// resolver for the entire transitive closure of the file's dependencies. (See
// ResolverFromFile.)
type File interface {
protoreflect.FileDescriptor
// FindDescriptorByName returns the given named element that is defined in
// this file. If no such element exists, nil is returned.
FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor
// FindImportByPath returns the File corresponding to the given import path.
// If this file does not import the given path, nil is returned.
FindImportByPath(path string) File
// FindExtensionByNumber returns the extension descriptor for the given tag
// that extends the given message name. If no such extension is defined in this
// file, nil is returned.
FindExtensionByNumber(message protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor
}
// NewFile converts a protoreflect.FileDescriptor to a File. The given deps must
// contain all dependencies/imports of f. Also see NewFileRecursive.
func NewFile(f protoreflect.FileDescriptor, deps Files) (File, error) {
if asFile, ok := f.(File); ok {
return asFile, nil
}
checkedDeps := make(Files, f.Imports().Len())
for i := range f.Imports().Len() {
imprt := f.Imports().Get(i)
dep := deps.FindFileByPath(imprt.Path())
if dep == nil {
return nil, fmt.Errorf("cannot create File for %q: missing dependency for %q", f.Path(), imprt.Path())
}
checkedDeps[i] = dep
}
return newFile(f, checkedDeps)
}
func newFile(f protoreflect.FileDescriptor, deps Files) (File, error) {
descs := map[protoreflect.FullName]protoreflect.Descriptor{}
err := walk.Descriptors(f, func(d protoreflect.Descriptor) error {
if _, ok := descs[d.FullName()]; ok {
return fmt.Errorf("file %q contains multiple elements with the name %s", f.Path(), d.FullName())
}
descs[d.FullName()] = d
return nil
})
if err != nil {
return nil, err
}
return &file{
FileDescriptor: f,
descs: descs,
deps: deps,
}, nil
}
// NewFileRecursive recursively converts a protoreflect.FileDescriptor to a File.
// If f has any dependencies/imports, they are converted, too, including any and
// all transitive dependencies.
//
// If f already implements File, it is returned unchanged.
func NewFileRecursive(f protoreflect.FileDescriptor) (File, error) {
if asFile, ok := f.(File); ok {
return asFile, nil
}
return newFileRecursive(f, map[protoreflect.FileDescriptor]File{})
}
func newFileRecursive(fd protoreflect.FileDescriptor, seen map[protoreflect.FileDescriptor]File) (File, error) {
if res, ok := seen[fd]; ok {
if res == nil {
return nil, fmt.Errorf("import cycle encountered: file %s transitively imports itself", fd.Path())
}
return res, nil
}
if f, ok := fd.(File); ok {
seen[fd] = f
return f, nil
}
seen[fd] = nil
deps := make([]File, fd.Imports().Len())
for i := range fd.Imports().Len() {
imprt := fd.Imports().Get(i)
dep, err := newFileRecursive(imprt, seen)
if err != nil {
return nil, err
}
deps[i] = dep
}
f, err := newFile(fd, deps)
if err != nil {
return nil, err
}
seen[fd] = f
return f, nil
}
type file struct {
protoreflect.FileDescriptor
descs map[protoreflect.FullName]protoreflect.Descriptor
deps Files
}
var _ File = (*file)(nil)
func (f *file) FindDescriptorByName(name protoreflect.FullName) protoreflect.Descriptor {
return f.descs[name]
}
func (f *file) FindImportByPath(path string) File {
return f.deps.FindFileByPath(path)
}
func (f *file) FindExtensionByNumber(msg protoreflect.FullName, tag protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor {
return findExtension(f, msg, tag)
}
func (f *file) Unwrap() protoreflect.FileDescriptor {
return f.FileDescriptor
}
// Files represents a set of protobuf files. It is a slice of File values, but
// also provides a method for easily looking up files by path and name.
type Files []File
// FindFileByPath finds a file in f that has the given path and name. If f
// contains no such file, nil is returned.
func (f Files) FindFileByPath(path string) File {
for _, file := range f {
if file.Path() == path {
return file
}
}
return nil
}
// AsResolver returns a Resolver that uses f as the source of descriptors. If
// a given query cannot be answered with the files in f, the query will fail
// with a protoregistry.NotFound error. The implementation just delegates calls
// to each file until a result is found.
//
// Also see ResolverFromFile.
func (f Files) AsResolver() Resolver {
return filesResolver(f)
}
// Resolver is an interface that can resolve various kinds of queries about
// descriptors. It satisfies the resolver interfaces defined in protodesc
// and protoregistry packages.
type Resolver interface {
protodesc.Resolver
protoregistry.MessageTypeResolver
protoregistry.ExtensionTypeResolver
}
// ResolverFromFile returns a Resolver that can resolve any element that is
// visible to the given file. It will search the given file, its imports, and
// any transitive public imports.
//
// Note that this function does not compute any additional indexes for efficient
// search, so queries generally take linear time, O(n) where n is the number of
// files whose elements are visible to the given file. Queries for an extension
// by number have runtime complexity that is linear with the number of messages
// and extensions defined across those files.
func ResolverFromFile(f File) Resolver {
return fileResolver{f: f}
}
type fileResolver struct {
f File
}
func (r fileResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
return resolveInFile(r.f, false, nil, func(f File) (protoreflect.FileDescriptor, error) {
if f.Path() == path {
return f, nil
}
return nil, protoregistry.NotFound
})
}
func (r fileResolver) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
return resolveInFile(r.f, false, nil, func(f File) (protoreflect.Descriptor, error) {
if d := f.FindDescriptorByName(name); d != nil {
return d, nil
}
return nil, protoregistry.NotFound
})
}
func (r fileResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
return resolveInFile(r.f, false, nil, func(f File) (protoreflect.MessageType, error) {
d := f.FindDescriptorByName(message)
if d != nil {
md, ok := d.(protoreflect.MessageDescriptor)
if !ok {
return nil, fmt.Errorf("%q is %s, not a message", message, descriptorTypeWithArticle(d))
}
return dynamicpb.NewMessageType(md), nil
}
return nil, protoregistry.NotFound
})
}
func (r fileResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
fullName := messageNameFromURL(url)
return r.FindMessageByName(protoreflect.FullName(fullName))
}
func messageNameFromURL(url string) string {
lastSlash := strings.LastIndexByte(url, '/')
return url[lastSlash+1:]
}
func (r fileResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
return resolveInFile(r.f, false, nil, func(f File) (protoreflect.ExtensionType, error) {
d := f.FindDescriptorByName(field)
if d != nil {
fld, ok := d.(protoreflect.FieldDescriptor)
if !ok || !fld.IsExtension() {
return nil, fmt.Errorf("%q is %s, not an extension", field, descriptorTypeWithArticle(d))
}
if extd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok {
return extd.Type(), nil
}
return dynamicpb.NewExtensionType(fld), nil
}
return nil, protoregistry.NotFound
})
}
func (r fileResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
return resolveInFile(r.f, false, nil, func(f File) (protoreflect.ExtensionType, error) {
ext := findExtension(f, message, field)
if ext != nil {
return ext.Type(), nil
}
return nil, protoregistry.NotFound
})
}
type filesResolver []File
func (r filesResolver) FindFileByPath(path string) (protoreflect.FileDescriptor, error) {
for _, f := range r {
if f.Path() == path {
return f, nil
}
}
return nil, protoregistry.NotFound
}
func (r filesResolver) FindDescriptorByName(name protoreflect.FullName) (protoreflect.Descriptor, error) {
for _, f := range r {
result := f.FindDescriptorByName(name)
if result != nil {
return result, nil
}
}
return nil, protoregistry.NotFound
}
func (r filesResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
for _, f := range r {
d := f.FindDescriptorByName(message)
if d != nil {
if md, ok := d.(protoreflect.MessageDescriptor); ok {
return dynamicpb.NewMessageType(md), nil
}
return nil, protoregistry.NotFound
}
}
return nil, protoregistry.NotFound
}
func (r filesResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
name := messageNameFromURL(url)
return r.FindMessageByName(protoreflect.FullName(name))
}
func (r filesResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
for _, f := range r {
d := f.FindDescriptorByName(field)
if d != nil {
if extd, ok := d.(protoreflect.ExtensionTypeDescriptor); ok {
return extd.Type(), nil
}
if fld, ok := d.(protoreflect.FieldDescriptor); ok && fld.IsExtension() {
return dynamicpb.NewExtensionType(fld), nil
}
return nil, protoregistry.NotFound
}
}
return nil, protoregistry.NotFound
}
func (r filesResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
for _, f := range r {
ext := findExtension(f, message, field)
if ext != nil {
return ext.Type(), nil
}
}
return nil, protoregistry.NotFound
}
type hasExtensionsAndMessages interface {
Messages() protoreflect.MessageDescriptors
Extensions() protoreflect.ExtensionDescriptors
}
func findExtension(d hasExtensionsAndMessages, message protoreflect.FullName, field protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor {
for i := range d.Extensions().Len() {
if extType := isExtensionMatch(d.Extensions().Get(i), message, field); extType != nil {
return extType
}
}
for i := range d.Messages().Len() {
if extType := findExtension(d.Messages().Get(i), message, field); extType != nil {
return extType
}
}
return nil // could not be found
}
func isExtensionMatch(ext protoreflect.ExtensionDescriptor, message protoreflect.FullName, field protoreflect.FieldNumber) protoreflect.ExtensionTypeDescriptor {
if ext.Number() != field || ext.ContainingMessage().FullName() != message {
return nil
}
if extType, ok := ext.(protoreflect.ExtensionTypeDescriptor); ok {
return extType
}
return dynamicpb.NewExtensionType(ext).TypeDescriptor()
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linker
import (
"fmt"
"google.golang.org/protobuf/reflect/protoreflect"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/parser"
"github.com/bufbuild/protocompile/reporter"
)
// Link handles linking a parsed descriptor proto into a fully-linked descriptor.
// If the given parser.Result has imports, they must all be present in the given
// dependencies.
//
// The symbols value is optional and may be nil. If it is not nil, it must be the
// same instance used to create and link all of the given result's dependencies
// (or otherwise already have all dependencies imported). Otherwise, linking may
// fail with spurious errors resolving symbols.
//
// The handler value is used to report any link errors. If any such errors are
// reported, this function returns a non-nil error. The Result value returned
// also implements protoreflect.FileDescriptor.
//
// Note that linking does NOT interpret options. So options messages in the
// returned value have all values stored in UninterpretedOptions fields.
func Link(parsed parser.Result, dependencies Files, symbols *Symbols, handler *reporter.Handler) (Result, error) {
if symbols == nil {
symbols = &Symbols{}
}
prefix := parsed.FileDescriptorProto().GetPackage()
if prefix != "" {
prefix += "."
}
for _, imp := range parsed.FileDescriptorProto().Dependency {
dep := dependencies.FindFileByPath(imp)
if dep == nil {
return nil, fmt.Errorf("dependencies is missing import %q", imp)
}
if err := symbols.Import(dep, handler); err != nil {
return nil, err
}
}
r := &result{
FileDescriptor: noOpFile,
Result: parsed,
deps: dependencies,
descriptors: map[string]protoreflect.Descriptor{},
usedImports: map[string]struct{}{},
prefix: prefix,
optionQualifiedNames: map[ast.IdentValueNode]string{},
}
// First, we create the hierarchy of descendant descriptors.
r.createDescendants()
// Then we can put all symbols into a single pool, which lets us ensure there
// are no duplicate symbols and will also let us resolve and revise all type
// references in next step.
if err := symbols.importResult(r, handler); err != nil {
return nil, err
}
// After we've populated the pool, we can now try to resolve all type
// references. All references must be checked for correct type, any fields
// with enum types must be corrected (since we parse them as if they are
// message references since we don't actually know message or enum until
// link time), and references will be re-written to be fully-qualified
// references (e.g. start with a dot ".").
if err := r.resolveReferences(handler, symbols); err != nil {
return nil, err
}
return r, handler.Error()
}
// Result is the result of linking. This is a protoreflect.FileDescriptor, but
// with some additional methods for exposing additional information, such as the
// for accessing the input AST or file descriptor.
//
// It also provides Resolve* methods, for looking up enums, messages, and
// extensions that are available to the protobuf source file this result
// represents. An element is "available" if it meets any of the following
// criteria:
// 1. The element is defined in this file itself.
// 2. The element is defined in a file that is directly imported by this file.
// 3. The element is "available" to a file that is directly imported by this
// file as a public import.
//
// Other elements, even if in the transitive closure of this file, are not
// available and thus won't be returned by these methods.
type Result interface {
File
parser.Result
// ResolveMessageLiteralExtensionName returns the fully qualified name for
// an identifier for extension field names in message literals.
ResolveMessageLiteralExtensionName(ast.IdentValueNode) string
// ValidateOptions runs some validation checks on the descriptor that can only
// be done after options are interpreted. Any errors or warnings encountered
// will be reported via the given handler. If any error is reported, this
// function returns a non-nil error.
ValidateOptions(handler *reporter.Handler, symbols *Symbols) error
// CheckForUnusedImports is used to report warnings for unused imports. This
// should be called after options have been interpreted. Otherwise, the logic
// could incorrectly report imports as unused if the only symbol used were a
// custom option.
CheckForUnusedImports(handler *reporter.Handler)
// PopulateSourceCodeInfo is used to populate source code info for the file
// descriptor. This step requires that the underlying descriptor proto have
// its `source_code_info` field populated. This is typically a post-process
// step separate from linking, because computing source code info requires
// interpreting options (which is done after linking).
PopulateSourceCodeInfo()
// RemoveAST drops the AST information from this result.
RemoveAST()
}
// ErrorUnusedImport may be passed to a warning reporter when an unused
// import is detected. The error the reporter receives will be wrapped
// with source position that indicates the file and line where the import
// statement appeared.
type ErrorUnusedImport interface {
error
UnusedImport() string
}
type errUnusedImport string
func (e errUnusedImport) Error() string {
return fmt.Sprintf("import %q not used", string(e))
}
func (e errUnusedImport) UnusedImport() string {
return string(e)
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linker
import "google.golang.org/protobuf/types/descriptorpb"
// allocPool helps allocate descriptor instances. Instead of allocating
// them one at a time, we allocate a pool -- a large, flat slice to hold
// all descriptors of a particular kind for a file. We then use capacity
// in the pool when we need space for individual descriptors.
type allocPool struct {
numMessages int
numFields int
numOneofs int
numEnums int
numEnumValues int
numExtensions int
numServices int
numMethods int
messages []msgDescriptor
fields []fldDescriptor
oneofs []oneofDescriptor
enums []enumDescriptor
enumVals []enValDescriptor
extensions []extTypeDescriptor
services []svcDescriptor
methods []mtdDescriptor
}
func newAllocPool(file *descriptorpb.FileDescriptorProto) *allocPool {
var pool allocPool
pool.countElements(file)
pool.messages = make([]msgDescriptor, pool.numMessages)
pool.fields = make([]fldDescriptor, pool.numFields)
pool.oneofs = make([]oneofDescriptor, pool.numOneofs)
pool.enums = make([]enumDescriptor, pool.numEnums)
pool.enumVals = make([]enValDescriptor, pool.numEnumValues)
pool.extensions = make([]extTypeDescriptor, pool.numExtensions)
pool.services = make([]svcDescriptor, pool.numServices)
pool.methods = make([]mtdDescriptor, pool.numMethods)
return &pool
}
func (p *allocPool) getMessages(count int) []msgDescriptor {
allocated := p.messages[:count]
p.messages = p.messages[count:]
return allocated
}
func (p *allocPool) getFields(count int) []fldDescriptor {
allocated := p.fields[:count]
p.fields = p.fields[count:]
return allocated
}
func (p *allocPool) getOneofs(count int) []oneofDescriptor {
allocated := p.oneofs[:count]
p.oneofs = p.oneofs[count:]
return allocated
}
func (p *allocPool) getEnums(count int) []enumDescriptor {
allocated := p.enums[:count]
p.enums = p.enums[count:]
return allocated
}
func (p *allocPool) getEnumValues(count int) []enValDescriptor {
allocated := p.enumVals[:count]
p.enumVals = p.enumVals[count:]
return allocated
}
func (p *allocPool) getExtensions(count int) []extTypeDescriptor {
allocated := p.extensions[:count]
p.extensions = p.extensions[count:]
return allocated
}
func (p *allocPool) getServices(count int) []svcDescriptor {
allocated := p.services[:count]
p.services = p.services[count:]
return allocated
}
func (p *allocPool) getMethods(count int) []mtdDescriptor {
allocated := p.methods[:count]
p.methods = p.methods[count:]
return allocated
}
func (p *allocPool) countElements(file *descriptorpb.FileDescriptorProto) {
p.countElementsInMessages(file.MessageType)
p.countElementsInEnums(file.EnumType)
p.numExtensions += len(file.Extension)
p.numServices += len(file.Service)
for _, svc := range file.Service {
p.numMethods += len(svc.Method)
}
}
func (p *allocPool) countElementsInMessages(msgs []*descriptorpb.DescriptorProto) {
p.numMessages += len(msgs)
for _, msg := range msgs {
p.numFields += len(msg.Field)
p.numOneofs += len(msg.OneofDecl)
p.countElementsInMessages(msg.NestedType)
p.countElementsInEnums(msg.EnumType)
p.numExtensions += len(msg.Extension)
}
}
func (p *allocPool) countElementsInEnums(enums []*descriptorpb.EnumDescriptorProto) {
p.numEnums += len(enums)
for _, enum := range enums {
p.numEnumValues += len(enum.Value)
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linker
import (
"errors"
"fmt"
"strings"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
"github.com/bufbuild/protocompile/reporter"
"github.com/bufbuild/protocompile/walk"
)
func (r *result) ResolveMessageLiteralExtensionName(node ast.IdentValueNode) string {
return r.optionQualifiedNames[node]
}
func (r *result) resolveElement(name protoreflect.FullName, checkedCache []string) protoreflect.Descriptor {
if len(name) > 0 && name[0] == '.' {
name = name[1:]
}
res, _ := resolveInFile(r, false, checkedCache[:0], func(f File) (protoreflect.Descriptor, error) {
d := resolveElementInFile(name, f)
if d != nil {
return d, nil
}
return nil, protoregistry.NotFound
})
return res
}
func resolveInFile[T any](f File, publicImportsOnly bool, checked []string, fn func(File) (T, error)) (T, error) {
var zero T
path := f.Path()
for _, str := range checked {
if str == path {
// already checked
return zero, protoregistry.NotFound
}
}
checked = append(checked, path)
res, err := fn(f)
if err == nil {
// found it
return res, nil
}
if !errors.Is(err, protoregistry.NotFound) {
return zero, err
}
imports := f.Imports()
for i, l := 0, imports.Len(); i < l; i++ {
imp := imports.Get(i)
if publicImportsOnly && !imp.IsPublic {
continue
}
res, err := resolveInFile(f.FindImportByPath(imp.Path()), true, checked, fn)
if errors.Is(err, protoregistry.NotFound) {
continue
}
if err != nil {
return zero, err
}
if !imp.IsPublic {
if r, ok := f.(*result); ok {
r.markUsed(imp.Path())
}
}
return res, nil
}
return zero, err
}
func (r *result) markUsed(importPath string) {
r.usedImports[importPath] = struct{}{}
}
func (r *result) CheckForUnusedImports(handler *reporter.Handler) {
fd := r.FileDescriptorProto()
file, _ := r.FileNode().(*ast.FileNode)
for i, dep := range fd.Dependency {
if _, ok := r.usedImports[dep]; !ok {
isPublic := false
// it's fine if it's a public import
for _, j := range fd.PublicDependency {
if i == int(j) {
isPublic = true
break
}
}
if isPublic {
continue
}
span := ast.UnknownSpan(fd.GetName())
if file != nil {
for _, decl := range file.Decls {
imp, ok := decl.(*ast.ImportNode)
if ok && imp.Name.AsString() == dep {
span = file.NodeInfo(imp)
}
}
}
handler.HandleWarningWithPos(span, errUnusedImport(dep))
}
}
}
func descriptorTypeWithArticle(d protoreflect.Descriptor) string {
switch d := d.(type) {
case protoreflect.MessageDescriptor:
return "a message"
case protoreflect.FieldDescriptor:
if d.IsExtension() {
return "an extension"
}
return "a field"
case protoreflect.OneofDescriptor:
return "a oneof"
case protoreflect.EnumDescriptor:
return "an enum"
case protoreflect.EnumValueDescriptor:
return "an enum value"
case protoreflect.ServiceDescriptor:
return "a service"
case protoreflect.MethodDescriptor:
return "a method"
case protoreflect.FileDescriptor:
return "a file"
default:
// shouldn't be possible
return fmt.Sprintf("a %T", d)
}
}
func (r *result) createDescendants() {
fd := r.FileDescriptorProto()
pool := newAllocPool(fd)
prefix := ""
if fd.GetPackage() != "" {
prefix = fd.GetPackage() + "."
}
r.imports = r.createImports()
r.messages = r.createMessages(prefix, r, fd.MessageType, pool)
r.enums = r.createEnums(prefix, r, fd.EnumType, pool)
r.extensions = r.createExtensions(prefix, r, fd.Extension, pool)
r.services = r.createServices(prefix, fd.Service, pool)
}
func (r *result) resolveReferences(handler *reporter.Handler, s *Symbols) error {
fd := r.FileDescriptorProto()
checkedCache := make([]string, 0, 16)
scopes := []scope{fileScope(r, checkedCache)}
if fd.Options != nil {
if err := r.resolveOptions(handler, "file", protoreflect.FullName(fd.GetName()), fd.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
// This is to de-dupe extendee-releated error messages when the same
// extendee is referenced from multiple extension field definitions.
// We leave it nil if there's no AST.
var extendeeNodes map[ast.Node]struct{}
return walk.DescriptorsEnterAndExit(r,
func(d protoreflect.Descriptor) error {
fqn := d.FullName()
switch d := d.(type) {
case *msgDescriptor:
// Strangely, when protoc resolves extension names, it uses the *enclosing* scope
// instead of the message's scope. So if the message contains an extension named "i",
// an option cannot refer to it as simply "i" but must qualify it (at a minimum "Msg.i").
// So we don't add this messages scope to our scopes slice until *after* we do options.
if d.proto.Options != nil {
if err := r.resolveOptions(handler, "message", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
scopes = append(scopes, messageScope(r, fqn)) // push new scope on entry
// walk only visits descriptors, so we need to loop over extension ranges ourselves
for _, er := range d.proto.ExtensionRange {
if er.Options != nil {
erName := protoreflect.FullName(fmt.Sprintf("%s:%d-%d", fqn, er.GetStart(), er.GetEnd()-1))
if err := r.resolveOptions(handler, "extension range", erName, er.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
}
case *extTypeDescriptor:
if d.field.proto.Options != nil {
if err := r.resolveOptions(handler, "extension", fqn, d.field.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
if extendeeNodes == nil && r.AST() != nil {
extendeeNodes = map[ast.Node]struct{}{}
}
if err := resolveFieldTypes(&d.field, handler, extendeeNodes, s, scopes, checkedCache); err != nil {
return err
}
if r.Syntax() == protoreflect.Proto3 && !allowedProto3Extendee(d.field.proto.GetExtendee()) {
file := r.FileNode()
node := r.FieldNode(d.field.proto).FieldExtendee()
if err := handler.HandleErrorf(file.NodeInfo(node), "extend blocks in proto3 can only be used to define custom options"); err != nil {
return err
}
}
case *fldDescriptor:
if d.proto.Options != nil {
if err := r.resolveOptions(handler, "field", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
if err := resolveFieldTypes(d, handler, nil, s, scopes, checkedCache); err != nil {
return err
}
case *oneofDescriptor:
if d.proto.Options != nil {
if err := r.resolveOptions(handler, "oneof", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
case *enumDescriptor:
if d.proto.Options != nil {
if err := r.resolveOptions(handler, "enum", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
case *enValDescriptor:
if d.proto.Options != nil {
if err := r.resolveOptions(handler, "enum value", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
case *svcDescriptor:
if d.proto.Options != nil {
if err := r.resolveOptions(handler, "service", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
// not a message, but same scoping rules for nested elements as if it were
scopes = append(scopes, messageScope(r, fqn)) // push new scope on entry
case *mtdDescriptor:
if d.proto.Options != nil {
if err := r.resolveOptions(handler, "method", fqn, d.proto.Options.UninterpretedOption, scopes, checkedCache); err != nil {
return err
}
}
if err := resolveMethodTypes(d, handler, scopes, checkedCache); err != nil {
return err
}
}
return nil
},
func(d protoreflect.Descriptor) error {
switch d.(type) {
case protoreflect.MessageDescriptor, protoreflect.ServiceDescriptor:
// pop message scope on exit
scopes = scopes[:len(scopes)-1]
}
return nil
})
}
var allowedProto3Extendees = map[string]struct{}{
".google.protobuf.FileOptions": {},
".google.protobuf.MessageOptions": {},
".google.protobuf.FieldOptions": {},
".google.protobuf.OneofOptions": {},
".google.protobuf.ExtensionRangeOptions": {},
".google.protobuf.EnumOptions": {},
".google.protobuf.EnumValueOptions": {},
".google.protobuf.ServiceOptions": {},
".google.protobuf.MethodOptions": {},
}
func allowedProto3Extendee(n string) bool {
if n == "" {
// not an extension, allowed
return true
}
_, ok := allowedProto3Extendees[n]
return ok
}
func resolveFieldTypes(f *fldDescriptor, handler *reporter.Handler, extendees map[ast.Node]struct{}, s *Symbols, scopes []scope, checkedCache []string) error {
r := f.file
fld := f.proto
file := r.FileNode()
node := r.FieldNode(fld)
kind := "field"
if fld.GetExtendee() != "" {
kind = "extension"
var alreadyReported bool
if extendees != nil {
_, alreadyReported = extendees[node.FieldExtendee()]
if !alreadyReported {
extendees[node.FieldExtendee()] = struct{}{}
}
}
dsc := r.resolve(fld.GetExtendee(), false, scopes, checkedCache)
if dsc == nil {
if alreadyReported {
return nil
}
var extendeePrefix string
if extendees == nil {
extendeePrefix = kind + " " + f.fqn + ": "
}
return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sunknown extendee type %s", extendeePrefix, fld.GetExtendee())
}
if isSentinelDescriptor(dsc) {
if alreadyReported {
return nil
}
var extendeePrefix string
if extendees == nil {
extendeePrefix = kind + " " + f.fqn + ": "
}
return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sunknown extendee type %s; resolved to %s which is not defined; consider using a leading dot", extendeePrefix, fld.GetExtendee(), dsc.FullName())
}
extd, ok := dsc.(protoreflect.MessageDescriptor)
if !ok {
if alreadyReported {
return nil
}
var extendeePrefix string
if extendees == nil {
extendeePrefix = kind + " " + f.fqn + ": "
}
return handler.HandleErrorf(file.NodeInfo(node.FieldExtendee()), "%sextendee is invalid: %s is %s, not a message", extendeePrefix, dsc.FullName(), descriptorTypeWithArticle(dsc))
}
f.extendee = extd
extendeeName := "." + string(dsc.FullName())
if fld.GetExtendee() != extendeeName {
fld.Extendee = proto.String(extendeeName)
}
// make sure the tag number is in range
found := false
tag := protoreflect.FieldNumber(fld.GetNumber())
for i := range extd.ExtensionRanges().Len() {
rng := extd.ExtensionRanges().Get(i)
if tag >= rng[0] && tag < rng[1] {
found = true
break
}
}
if !found {
if err := handler.HandleErrorf(file.NodeInfo(node.FieldTag()), "%s %s: tag %d is not in valid range for extended type %s", kind, f.fqn, tag, dsc.FullName()); err != nil {
return err
}
} else {
// make sure tag is not a duplicate
if err := s.AddExtension(packageFor(dsc), dsc.FullName(), tag, file.NodeInfo(node.FieldTag()), handler); err != nil {
return err
}
}
} else if f.proto.OneofIndex != nil {
parent := f.parent.(protoreflect.MessageDescriptor) //nolint:errcheck
index := int(f.proto.GetOneofIndex())
f.oneof = parent.Oneofs().Get(index)
}
if fld.GetTypeName() == "" {
// scalar type; no further resolution required
return nil
}
dsc := r.resolve(fld.GetTypeName(), true, scopes, checkedCache)
if dsc == nil {
return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: unknown type %s", kind, f.fqn, fld.GetTypeName())
}
if isSentinelDescriptor(dsc) {
return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: unknown type %s; resolved to %s which is not defined; consider using a leading dot", kind, f.fqn, fld.GetTypeName(), dsc.FullName())
}
switch dsc := dsc.(type) {
case protoreflect.MessageDescriptor:
if dsc.IsMapEntry() {
isValid := false
switch node.(type) {
case *ast.MapFieldNode:
// We have an AST for this file and can see this field is from a map declaration
isValid = true
case *ast.NoSourceNode:
// We don't have an AST for the file (it came from a provided descriptor). So we
// need to validate that it's not an illegal reference. To be valid, the field
// must be repeated and the entry type must be nested in the same enclosing
// message as the field.
isValid = isValidMap(f, dsc)
if isValid && f.index > 0 {
// also make sure there are no earlier fields that are valid for this map entry
flds := f.Parent().(protoreflect.MessageDescriptor).Fields() //nolint:errcheck
for i := range f.index {
if isValidMap(flds.Get(i), dsc) {
isValid = false
break
}
}
}
}
if !isValid {
return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: %s is a synthetic map entry and may not be referenced explicitly", kind, f.fqn, dsc.FullName())
}
}
typeName := "." + string(dsc.FullName())
if fld.GetTypeName() != typeName {
fld.TypeName = proto.String(typeName)
}
if fld.Type == nil {
// if type was tentatively unset, we now know it's actually a message
fld.Type = descriptorpb.FieldDescriptorProto_TYPE_MESSAGE.Enum()
} else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE && fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_GROUP {
return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: descriptor proto indicates type %v but should be %v", kind, f.fqn, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_MESSAGE)
}
f.msgType = dsc
case protoreflect.EnumDescriptor:
typeName := "." + string(dsc.FullName())
if fld.GetTypeName() != typeName {
fld.TypeName = proto.String(typeName)
}
if fld.Type == nil {
// the type was tentatively unset, but now we know it's actually an enum
fld.Type = descriptorpb.FieldDescriptorProto_TYPE_ENUM.Enum()
} else if fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_ENUM {
return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: descriptor proto indicates type %v but should be %v", kind, f.fqn, fld.GetType(), descriptorpb.FieldDescriptorProto_TYPE_ENUM)
}
f.enumType = dsc
default:
return handler.HandleErrorf(file.NodeInfo(node.FieldType()), "%s %s: invalid type: %s is %s, not a message or enum", kind, f.fqn, dsc.FullName(), descriptorTypeWithArticle(dsc))
}
return nil
}
func packageFor(dsc protoreflect.Descriptor) protoreflect.FullName {
if dsc.ParentFile() != nil {
return dsc.ParentFile().Package()
}
// Can't access package? Make a best effort guess.
return dsc.FullName().Parent()
}
func isValidMap(mapField protoreflect.FieldDescriptor, mapEntry protoreflect.MessageDescriptor) bool {
return !mapField.IsExtension() &&
mapEntry.Parent() == mapField.ContainingMessage() &&
mapField.Cardinality() == protoreflect.Repeated &&
string(mapEntry.Name()) == internal.InitCap(internal.JSONName(string(mapField.Name())))+"Entry"
}
func resolveMethodTypes(m *mtdDescriptor, handler *reporter.Handler, scopes []scope, checkedCache []string) error {
scope := "method " + m.fqn
r := m.file
mtd := m.proto
file := r.FileNode()
node := r.MethodNode(mtd)
dsc := r.resolve(mtd.GetInputType(), false, scopes, checkedCache)
if dsc == nil {
if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: unknown request type %s", scope, mtd.GetInputType()); err != nil {
return err
}
} else if isSentinelDescriptor(dsc) {
if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: unknown request type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetInputType(), dsc.FullName()); err != nil {
return err
}
} else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok {
if err := handler.HandleErrorf(file.NodeInfo(node.GetInputType()), "%s: invalid request type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil {
return err
}
} else {
typeName := "." + string(dsc.FullName())
if mtd.GetInputType() != typeName {
mtd.InputType = proto.String(typeName)
}
m.inputType = msg
}
// TODO: make input and output type resolution more DRY
dsc = r.resolve(mtd.GetOutputType(), false, scopes, checkedCache)
if dsc == nil {
if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: unknown response type %s", scope, mtd.GetOutputType()); err != nil {
return err
}
} else if isSentinelDescriptor(dsc) {
if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: unknown response type %s; resolved to %s which is not defined; consider using a leading dot", scope, mtd.GetOutputType(), dsc.FullName()); err != nil {
return err
}
} else if msg, ok := dsc.(protoreflect.MessageDescriptor); !ok {
if err := handler.HandleErrorf(file.NodeInfo(node.GetOutputType()), "%s: invalid response type: %s is %s, not a message", scope, dsc.FullName(), descriptorTypeWithArticle(dsc)); err != nil {
return err
}
} else {
typeName := "." + string(dsc.FullName())
if mtd.GetOutputType() != typeName {
mtd.OutputType = proto.String(typeName)
}
m.outputType = msg
}
return nil
}
func (r *result) resolveOptions(handler *reporter.Handler, elemType string, elemName protoreflect.FullName, opts []*descriptorpb.UninterpretedOption, scopes []scope, checkedCache []string) error {
mc := &internal.MessageContext{
File: r,
ElementName: string(elemName),
ElementType: elemType,
}
file := r.FileNode()
opts:
for _, opt := range opts {
// resolve any extension names found in option names
for _, nm := range opt.Name {
if nm.GetIsExtension() {
node := r.OptionNamePartNode(nm)
fqn, err := r.resolveExtensionName(nm.GetNamePart(), scopes, checkedCache)
if err != nil {
if err := handler.HandleErrorf(file.NodeInfo(node), "%v%v", mc, err); err != nil {
return err
}
continue opts
}
nm.NamePart = proto.String(fqn)
}
}
// also resolve any extension names found inside message literals in option values
mc.Option = opt
optVal := r.OptionNode(opt).GetValue()
if err := r.resolveOptionValue(handler, mc, optVal, scopes, checkedCache); err != nil {
return err
}
mc.Option = nil
}
return nil
}
func (r *result) resolveOptionValue(handler *reporter.Handler, mc *internal.MessageContext, val ast.ValueNode, scopes []scope, checkedCache []string) error {
optVal := val.Value()
switch optVal := optVal.(type) {
case []ast.ValueNode:
origPath := mc.OptAggPath
defer func() {
mc.OptAggPath = origPath
}()
for i, v := range optVal {
mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, i)
if err := r.resolveOptionValue(handler, mc, v, scopes, checkedCache); err != nil {
return err
}
}
case []*ast.MessageFieldNode:
origPath := mc.OptAggPath
defer func() {
mc.OptAggPath = origPath
}()
for _, fld := range optVal {
// check for extension name
if fld.Name.IsExtension() {
// Confusingly, an extension reference inside a message literal cannot refer to
// elements in the same enclosing message without a qualifier. Basically, we
// treat this as if there were no message scopes, so only the package name is
// used for resolving relative references. (Inconsistent protoc behavior, but
// likely due to how it re-uses C++ text format implementation, and normal text
// format doesn't expect that kind of relative reference.)
scopes := scopes[:1] // first scope is file, the rest are enclosing messages
fqn, err := r.resolveExtensionName(string(fld.Name.Name.AsIdentifier()), scopes, checkedCache)
if err != nil {
if err := handler.HandleErrorf(r.FileNode().NodeInfo(fld.Name.Name), "%v%v", mc, err); err != nil {
return err
}
} else {
r.optionQualifiedNames[fld.Name.Name] = fqn
}
}
// recurse into value
mc.OptAggPath = origPath
if origPath != "" {
mc.OptAggPath += "."
}
if fld.Name.IsExtension() {
mc.OptAggPath = fmt.Sprintf("%s[%s]", mc.OptAggPath, string(fld.Name.Name.AsIdentifier()))
} else {
mc.OptAggPath = fmt.Sprintf("%s%s", mc.OptAggPath, string(fld.Name.Name.AsIdentifier()))
}
if err := r.resolveOptionValue(handler, mc, fld.Val, scopes, checkedCache); err != nil {
return err
}
}
}
return nil
}
func (r *result) resolveExtensionName(name string, scopes []scope, checkedCache []string) (string, error) {
dsc := r.resolve(name, false, scopes, checkedCache)
if dsc == nil {
return "", fmt.Errorf("unknown extension %s", name)
}
if isSentinelDescriptor(dsc) {
return "", fmt.Errorf("unknown extension %s; resolved to %s which is not defined; consider using a leading dot", name, dsc.FullName())
}
if ext, ok := dsc.(protoreflect.FieldDescriptor); !ok {
return "", fmt.Errorf("invalid extension: %s is %s, not an extension", name, descriptorTypeWithArticle(dsc))
} else if !ext.IsExtension() {
return "", fmt.Errorf("invalid extension: %s is a field but not an extension", name)
}
return string("." + dsc.FullName()), nil
}
func (r *result) resolve(name string, onlyTypes bool, scopes []scope, checkedCache []string) protoreflect.Descriptor {
if strings.HasPrefix(name, ".") {
// already fully-qualified
return r.resolveElement(protoreflect.FullName(name[1:]), checkedCache)
}
// unqualified, so we look in the enclosing (last) scope first and move
// towards outermost (first) scope, trying to resolve the symbol
pos := strings.IndexByte(name, '.')
firstName := name
if pos > 0 {
firstName = name[:pos]
}
var bestGuess protoreflect.Descriptor
for i := len(scopes) - 1; i >= 0; i-- {
d := scopes[i](firstName, name)
if d != nil {
// In `protoc`, it will skip a match of the wrong type and move on
// to the next scope, but only if the reference is unqualified. So
// we mirror that behavior here. When we skip and move on, we go
// ahead and save the match of the wrong type so we can at least use
// it to construct a better error in the event that we don't find
// any match of the right type.
if !onlyTypes || isType(d) || firstName != name {
return d
}
if bestGuess == nil {
bestGuess = d
}
}
}
// we return best guess, even though it was not an allowed kind of
// descriptor, so caller can print a better error message (e.g.
// indicating that the name was found but that it's the wrong type)
return bestGuess
}
func isType(d protoreflect.Descriptor) bool {
switch d.(type) {
case protoreflect.MessageDescriptor, protoreflect.EnumDescriptor:
return true
}
return false
}
// scope represents a lexical scope in a proto file in which messages and enums
// can be declared.
type scope func(firstName, fullName string) protoreflect.Descriptor
func fileScope(r *result, checkedCache []string) scope {
// we search symbols in this file, but also symbols in other files that have
// the same package as this file or a "parent" package (in protobuf,
// packages are a hierarchy like C++ namespaces)
prefixes := internal.CreatePrefixList(r.FileDescriptorProto().GetPackage())
querySymbol := func(n string) protoreflect.Descriptor {
return r.resolveElement(protoreflect.FullName(n), checkedCache)
}
return func(firstName, fullName string) protoreflect.Descriptor {
for _, prefix := range prefixes {
var n1, n string
if prefix == "" {
// exhausted all prefixes, so it must be in this one
n1, n = fullName, fullName
} else {
n = prefix + "." + fullName
n1 = prefix + "." + firstName
}
d := resolveElementRelative(n1, n, querySymbol)
if d != nil {
return d
}
}
return nil
}
}
func messageScope(r *result, messageName protoreflect.FullName) scope {
querySymbol := func(n string) protoreflect.Descriptor {
return resolveElementInFile(protoreflect.FullName(n), r)
}
return func(firstName, fullName string) protoreflect.Descriptor {
n1 := string(messageName) + "." + firstName
n := string(messageName) + "." + fullName
return resolveElementRelative(n1, n, querySymbol)
}
}
func resolveElementRelative(firstName, fullName string, query func(name string) protoreflect.Descriptor) protoreflect.Descriptor {
d := query(firstName)
if d == nil {
return nil
}
if firstName == fullName {
return d
}
if !isAggregateDescriptor(d) {
// can't possibly find the rest of full name if
// the first name indicated a leaf descriptor
return nil
}
d = query(fullName)
if d == nil {
return newSentinelDescriptor(fullName)
}
return d
}
func resolveElementInFile(name protoreflect.FullName, f File) protoreflect.Descriptor {
d := f.FindDescriptorByName(name)
if d != nil {
return d
}
if matchesPkgNamespace(name, f.Package()) {
// this sentinel means the name is a valid namespace but
// does not refer to a descriptor
return newSentinelDescriptor(string(name))
}
return nil
}
func matchesPkgNamespace(fqn, pkg protoreflect.FullName) bool {
if pkg == "" {
return false
}
if fqn == pkg {
return true
}
if len(pkg) > len(fqn) && strings.HasPrefix(string(pkg), string(fqn)) {
// if char after fqn is a dot, then fqn is a namespace
if pkg[len(fqn)] == '.' {
return true
}
}
return false
}
func isAggregateDescriptor(d protoreflect.Descriptor) bool {
if isSentinelDescriptor(d) {
// this indicates the name matched a package, not a
// descriptor, but a package is an aggregate, so
// we return true
return true
}
switch d.(type) {
case protoreflect.MessageDescriptor, protoreflect.EnumDescriptor, protoreflect.ServiceDescriptor:
return true
default:
return false
}
}
func isSentinelDescriptor(d protoreflect.Descriptor) bool {
_, ok := d.(*sentinelDescriptor)
return ok
}
func newSentinelDescriptor(name string) protoreflect.Descriptor {
return &sentinelDescriptor{name: name}
}
// sentinelDescriptor is a placeholder descriptor. It is used instead of nil to
// distinguish between two situations:
// 1. The given name could not be found.
// 2. The given name *cannot* be a valid result so stop searching.
//
// In these cases, attempts to resolve an element name will return nil for the
// first case and will return a sentinelDescriptor in the second. The sentinel
// contains the fully-qualified name which caused the search to stop (which may
// be a prefix of the actual name being resolved).
type sentinelDescriptor struct {
protoreflect.Descriptor
name string
}
func (p *sentinelDescriptor) ParentFile() protoreflect.FileDescriptor {
return nil
}
func (p *sentinelDescriptor) Parent() protoreflect.Descriptor {
return nil
}
func (p *sentinelDescriptor) Index() int {
return 0
}
func (p *sentinelDescriptor) Syntax() protoreflect.Syntax {
return 0
}
func (p *sentinelDescriptor) Name() protoreflect.Name {
return protoreflect.Name(p.name)
}
func (p *sentinelDescriptor) FullName() protoreflect.FullName {
return protoreflect.FullName(p.name)
}
func (p *sentinelDescriptor) IsPlaceholder() bool {
return false
}
func (p *sentinelDescriptor) Options() protoreflect.ProtoMessage {
return nil
}
var _ protoreflect.Descriptor = (*sentinelDescriptor)(nil)
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linker
import (
"strings"
"sync"
"google.golang.org/protobuf/reflect/protoreflect"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
"github.com/bufbuild/protocompile/protoutil"
"github.com/bufbuild/protocompile/reporter"
"github.com/bufbuild/protocompile/walk"
)
const unknownFilePath = "<unknown file>"
// Symbols is a symbol table that maps names for all program elements to their
// location in source. It also tracks extension tag numbers. This can be used
// to enforce uniqueness for symbol names and tag numbers across many files and
// many link operations.
//
// This type is thread-safe.
type Symbols struct {
pkgTrie packageSymbols
// We don't know the packages for these symbols, so we can't
// keep them in the pkgTrie. In vast majority of cases, this
// will always be empty/unused. When used, it ensures that
// multiple extension declarations don't refer to the same
// extension.
extDeclsMu sync.Mutex
extDecls map[protoreflect.FullName]extDecl
}
type packageSymbols struct {
mu sync.RWMutex
children map[protoreflect.FullName]*packageSymbols
files map[protoreflect.FileDescriptor]struct{}
symbols map[protoreflect.FullName]symbolEntry
exts map[extNumber]ast.SourceSpan
}
type extNumber struct {
extendee protoreflect.FullName
tag protoreflect.FieldNumber
}
type symbolEntry struct {
span ast.SourceSpan
isEnumValue bool
isPackage bool
}
type extDecl struct {
span ast.SourceSpan
extendee protoreflect.FullName
tag protoreflect.FieldNumber
}
// Import populates the symbol table with all symbols/elements and extension
// tags present in the given file descriptor. If s is nil or if fd has already
// been imported into s, this returns immediately without doing anything. If any
// collisions in symbol names or extension tags are identified, an error will be
// returned and the symbol table will not be updated.
func (s *Symbols) Import(fd protoreflect.FileDescriptor, handler *reporter.Handler) error {
if s == nil {
return nil
}
if f, ok := fd.(protoreflect.FileImport); ok {
// unwrap any import instance
fd = f.FileDescriptor
}
if f, ok := fd.(*file); ok {
// unwrap any file instance
fd = f.FileDescriptor
}
var pkgSpan ast.SourceSpan
if res, ok := fd.(*result); ok {
pkgSpan = packageNameSpan(res)
} else {
pkgSpan = sourceSpanForPackage(fd)
}
pkg, err := s.importPackages(pkgSpan, fd.Package(), handler)
if err != nil || pkg == nil {
return err
}
pkg.mu.RLock()
_, alreadyImported := pkg.files[fd]
pkg.mu.RUnlock()
if alreadyImported {
return nil
}
for i := range fd.Imports().Len() {
if err := s.Import(fd.Imports().Get(i).FileDescriptor, handler); err != nil {
return err
}
}
if res, ok := fd.(*result); ok && res.hasSource() {
return s.importResultWithExtensions(pkg, res, handler)
}
return s.importFileWithExtensions(pkg, fd, handler)
}
func (s *Symbols) importFileWithExtensions(pkg *packageSymbols, fd protoreflect.FileDescriptor, handler *reporter.Handler) error {
imported, err := pkg.importFile(fd, handler)
if err != nil {
return err
}
if !imported {
// nothing else to do
return nil
}
return walk.Descriptors(fd, func(d protoreflect.Descriptor) error {
fld, ok := d.(protoreflect.FieldDescriptor)
if !ok || !fld.IsExtension() {
return nil
}
span := sourceSpanForNumber(fld)
extendee := fld.ContainingMessage()
return s.AddExtension(packageFor(extendee), extendee.FullName(), fld.Number(), span, handler)
})
}
func (s *packageSymbols) importFile(fd protoreflect.FileDescriptor, handler *reporter.Handler) (bool, error) {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.files[fd]; ok {
// have to double-check if it's already imported, in case
// it was added after above read-locked check
return false, nil
}
// first pass: check for conflicts
if err := s.checkFileLocked(fd, handler); err != nil {
return false, err
}
if err := handler.Error(); err != nil {
return false, err
}
// second pass: commit all symbols
s.commitFileLocked(fd)
return true, nil
}
func (s *Symbols) importPackages(pkgSpan ast.SourceSpan, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) {
if pkg == "" {
return &s.pkgTrie, nil
}
cur := &s.pkgTrie
enumerator := nameEnumerator{name: pkg}
for {
p, ok := enumerator.next()
if !ok {
return cur, nil
}
var err error
cur, err = cur.importPackage(pkgSpan, p, handler)
if err != nil {
return nil, err
}
if cur == nil {
return nil, nil
}
}
}
func (s *packageSymbols) importPackage(pkgSpan ast.SourceSpan, pkg protoreflect.FullName, handler *reporter.Handler) (*packageSymbols, error) {
s.mu.RLock()
existing, ok := s.symbols[pkg]
var child *packageSymbols
if ok && existing.isPackage {
child = s.children[pkg]
}
s.mu.RUnlock()
if ok && existing.isPackage {
// package already exists
return child, nil
} else if ok {
return nil, reportSymbolCollision(pkgSpan, pkg, false, existing, handler)
}
s.mu.Lock()
defer s.mu.Unlock()
// have to double-check in case it was added while upgrading to write lock
existing, ok = s.symbols[pkg]
if ok && existing.isPackage {
// package already exists
return s.children[pkg], nil
} else if ok {
return nil, reportSymbolCollision(pkgSpan, pkg, false, existing, handler)
}
if s.symbols == nil {
s.symbols = map[protoreflect.FullName]symbolEntry{}
}
s.symbols[pkg] = symbolEntry{span: pkgSpan, isPackage: true}
child = &packageSymbols{}
if s.children == nil {
s.children = map[protoreflect.FullName]*packageSymbols{}
}
s.children[pkg] = child
return child, nil
}
func (s *Symbols) getPackage(pkg protoreflect.FullName, exact bool) *packageSymbols {
if pkg == "" {
return &s.pkgTrie
}
cur := &s.pkgTrie
enumerator := nameEnumerator{name: pkg}
for {
p, ok := enumerator.next()
if !ok {
return cur
}
cur.mu.RLock()
next := cur.children[p]
cur.mu.RUnlock()
if next == nil {
if exact {
return nil
}
return cur
}
cur = next
}
}
func reportSymbolCollision(span ast.SourceSpan, fqn protoreflect.FullName, additionIsEnumVal bool, existing symbolEntry, handler *reporter.Handler) error {
// because of weird scoping for enum values, provide more context in error message
// if this conflict is with an enum value
var isPkg, suffix string
if additionIsEnumVal || existing.isEnumValue {
suffix = "; protobuf uses C++ scoping rules for enum values, so they exist in the scope enclosing the enum"
}
if existing.isPackage {
isPkg = " as a package"
}
orig := existing.span
conflict := span
if posLess(conflict.Start(), orig.Start()) {
orig, conflict = conflict, orig
}
return handler.HandleErrorf(conflict, "symbol %q already defined%s at %v%s", fqn, isPkg, orig.Start(), suffix)
}
func posLess(a, b ast.SourcePos) bool {
if a.Filename == b.Filename {
if a.Line == b.Line {
return a.Col < b.Col
}
return a.Line < b.Line
}
return false
}
func (s *packageSymbols) checkFileLocked(f protoreflect.FileDescriptor, handler *reporter.Handler) error {
return walk.Descriptors(f, func(d protoreflect.Descriptor) error {
span := sourceSpanFor(d)
if existing, ok := s.symbols[d.FullName()]; ok {
_, isEnumVal := d.(protoreflect.EnumValueDescriptor)
if err := reportSymbolCollision(span, d.FullName(), isEnumVal, existing, handler); err != nil {
return err
}
}
return nil
})
}
func sourceSpanForPackage(fd protoreflect.FileDescriptor) ast.SourceSpan {
loc := fd.SourceLocations().ByPath([]int32{internal.FilePackageTag})
if internal.IsZeroLocation(loc) {
return ast.UnknownSpan(fd.Path())
}
return ast.NewSourceSpan(
ast.SourcePos{
Filename: fd.Path(),
Line: loc.StartLine,
Col: loc.StartColumn,
},
ast.SourcePos{
Filename: fd.Path(),
Line: loc.EndLine,
Col: loc.EndColumn,
},
)
}
func sourceSpanFor(d protoreflect.Descriptor) ast.SourceSpan {
file := d.ParentFile()
if file == nil {
return ast.UnknownSpan(unknownFilePath)
}
if result, ok := file.(*result); ok {
return nameSpan(result.FileNode(), result.Node(protoutil.ProtoFromDescriptor(d)))
}
path, ok := internal.ComputePath(d)
if !ok {
return ast.UnknownSpan(file.Path())
}
namePath := path
switch d.(type) {
case protoreflect.FieldDescriptor:
namePath = append(namePath, internal.FieldNameTag)
case protoreflect.MessageDescriptor:
namePath = append(namePath, internal.MessageNameTag)
case protoreflect.OneofDescriptor:
namePath = append(namePath, internal.OneofNameTag)
case protoreflect.EnumDescriptor:
namePath = append(namePath, internal.EnumNameTag)
case protoreflect.EnumValueDescriptor:
namePath = append(namePath, internal.EnumValNameTag)
case protoreflect.ServiceDescriptor:
namePath = append(namePath, internal.ServiceNameTag)
case protoreflect.MethodDescriptor:
namePath = append(namePath, internal.MethodNameTag)
default:
// NB: shouldn't really happen, but just in case fall back to path to
// descriptor, sans name field
}
loc := file.SourceLocations().ByPath(namePath)
if internal.IsZeroLocation(loc) {
loc = file.SourceLocations().ByPath(path)
if internal.IsZeroLocation(loc) {
return ast.UnknownSpan(file.Path())
}
}
return ast.NewSourceSpan(
ast.SourcePos{
Filename: file.Path(),
Line: loc.StartLine,
Col: loc.StartColumn,
},
ast.SourcePos{
Filename: file.Path(),
Line: loc.EndLine,
Col: loc.EndColumn,
},
)
}
func sourceSpanForNumber(fd protoreflect.FieldDescriptor) ast.SourceSpan {
file := fd.ParentFile()
if file == nil {
return ast.UnknownSpan(unknownFilePath)
}
path, ok := internal.ComputePath(fd)
if !ok {
return ast.UnknownSpan(file.Path())
}
numberPath := path
numberPath = append(numberPath, internal.FieldNumberTag)
loc := file.SourceLocations().ByPath(numberPath)
if internal.IsZeroLocation(loc) {
loc = file.SourceLocations().ByPath(path)
if internal.IsZeroLocation(loc) {
return ast.UnknownSpan(file.Path())
}
}
return ast.NewSourceSpan(
ast.SourcePos{
Filename: file.Path(),
Line: loc.StartLine,
Col: loc.StartColumn,
},
ast.SourcePos{
Filename: file.Path(),
Line: loc.EndLine,
Col: loc.EndColumn,
},
)
}
func (s *packageSymbols) commitFileLocked(f protoreflect.FileDescriptor) {
if s.symbols == nil {
s.symbols = map[protoreflect.FullName]symbolEntry{}
}
if s.exts == nil {
s.exts = map[extNumber]ast.SourceSpan{}
}
_ = walk.Descriptors(f, func(d protoreflect.Descriptor) error {
span := sourceSpanFor(d)
name := d.FullName()
_, isEnumValue := d.(protoreflect.EnumValueDescriptor)
s.symbols[name] = symbolEntry{span: span, isEnumValue: isEnumValue}
return nil
})
if s.files == nil {
s.files = map[protoreflect.FileDescriptor]struct{}{}
}
s.files[f] = struct{}{}
}
func (s *Symbols) importResultWithExtensions(pkg *packageSymbols, r *result, handler *reporter.Handler) error {
imported, err := pkg.importResult(r, handler)
if err != nil {
return err
}
if !imported {
// nothing else to do
return nil
}
return walk.Descriptors(r, func(d protoreflect.Descriptor) error {
fd, ok := d.(*extTypeDescriptor)
if !ok {
return nil
}
file := r.FileNode()
node := r.FieldNode(fd.FieldDescriptorProto())
info := file.NodeInfo(node.FieldTag())
extendee := fd.ContainingMessage()
return s.AddExtension(packageFor(extendee), extendee.FullName(), fd.Number(), info, handler)
})
}
func (s *Symbols) importResult(r *result, handler *reporter.Handler) error {
pkg, err := s.importPackages(packageNameSpan(r), r.Package(), handler)
if err != nil || pkg == nil {
return err
}
_, err = pkg.importResult(r, handler)
return err
}
func (s *packageSymbols) importResult(r *result, handler *reporter.Handler) (bool, error) {
s.mu.Lock()
defer s.mu.Unlock()
if _, ok := s.files[r]; ok {
// already imported
return false, nil
}
// first pass: check for conflicts
if err := s.checkResultLocked(r, handler); err != nil {
return false, err
}
if err := handler.Error(); err != nil {
return false, err
}
// second pass: commit all symbols
s.commitFileLocked(r)
return true, nil
}
func (s *packageSymbols) checkResultLocked(r *result, handler *reporter.Handler) error {
resultSyms := map[protoreflect.FullName]symbolEntry{}
return walk.Descriptors(r, func(d protoreflect.Descriptor) error {
_, isEnumVal := d.(protoreflect.EnumValueDescriptor)
file := r.FileNode()
name := d.FullName()
node := r.Node(protoutil.ProtoFromDescriptor(d))
span := nameSpan(file, node)
// check symbols already in this symbol table
if existing, ok := s.symbols[name]; ok {
if err := reportSymbolCollision(span, name, isEnumVal, existing, handler); err != nil {
return err
}
}
// also check symbols from this result (that are not yet in symbol table)
if existing, ok := resultSyms[name]; ok {
if err := reportSymbolCollision(span, name, isEnumVal, existing, handler); err != nil {
return err
}
}
resultSyms[name] = symbolEntry{
span: span,
isEnumValue: isEnumVal,
}
return nil
})
}
func packageNameSpan(r *result) ast.SourceSpan {
if node, ok := r.FileNode().(*ast.FileNode); ok {
for _, decl := range node.Decls {
if pkgNode, ok := decl.(*ast.PackageNode); ok {
return r.FileNode().NodeInfo(pkgNode.Name)
}
}
}
return ast.UnknownSpan(r.Path())
}
func nameSpan(file ast.FileDeclNode, n ast.Node) ast.SourceSpan {
// TODO: maybe ast package needs a NamedNode interface to simplify this?
switch n := n.(type) {
case ast.FieldDeclNode:
return file.NodeInfo(n.FieldName())
case ast.MessageDeclNode:
return file.NodeInfo(n.MessageName())
case ast.OneofDeclNode:
return file.NodeInfo(n.OneofName())
case ast.EnumValueDeclNode:
return file.NodeInfo(n.GetName())
case *ast.EnumNode:
return file.NodeInfo(n.Name)
case *ast.ServiceNode:
return file.NodeInfo(n.Name)
case ast.RPCDeclNode:
return file.NodeInfo(n.GetName())
default:
return file.NodeInfo(n)
}
}
// AddExtension records the given extension, which is used to ensure that no two files
// attempt to extend the same message using the same tag. The given pkg should be the
// package that defines extendee.
func (s *Symbols) AddExtension(pkg, extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error {
if pkg != "" {
if !strings.HasPrefix(string(extendee), string(pkg)+".") {
return handler.HandleErrorf(span, "could not register extension: extendee %q does not match package %q", extendee, pkg)
}
}
pkgSyms := s.getPackage(pkg, true)
if pkgSyms == nil {
// should never happen
return handler.HandleErrorf(span, "could not register extension: missing package symbols for %q", pkg)
}
return pkgSyms.addExtension(extendee, tag, span, handler)
}
func (s *packageSymbols) addExtension(extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error {
s.mu.Lock()
defer s.mu.Unlock()
extNum := extNumber{extendee: extendee, tag: tag}
if existing, ok := s.exts[extNum]; ok {
return handler.HandleErrorf(span, "extension with tag %d for message %s already defined at %v", tag, extendee, existing.Start())
}
if s.exts == nil {
s.exts = map[extNumber]ast.SourceSpan{}
}
s.exts[extNum] = span
return nil
}
// AddExtensionDeclaration records the given extension declaration, which is used to
// ensure that no two declarations refer to the same extension.
func (s *Symbols) AddExtensionDeclaration(extension, extendee protoreflect.FullName, tag protoreflect.FieldNumber, span ast.SourceSpan, handler *reporter.Handler) error {
s.extDeclsMu.Lock()
defer s.extDeclsMu.Unlock()
existing, ok := s.extDecls[extension]
if ok {
if existing.extendee == extendee && existing.tag == tag {
// This is a declaration that has already been added. Ignore.
return nil
}
return handler.HandleErrorf(span, "extension %s already declared as extending %s with tag %d at %v", extension, existing.extendee, existing.tag, existing.span.Start())
}
if s.extDecls == nil {
s.extDecls = map[protoreflect.FullName]extDecl{}
}
s.extDecls[extension] = extDecl{
span: span,
extendee: extendee,
tag: tag,
}
return nil
}
// Lookup finds the registered location of the given name. If the given name has
// not been seen/registered, nil is returned.
func (s *Symbols) Lookup(name protoreflect.FullName) ast.SourceSpan {
// note: getPackage never returns nil when exact=false
pkgSyms := s.getPackage(name, false)
if entry, ok := pkgSyms.symbols[name]; ok {
return entry.span
}
return nil
}
// LookupExtension finds the registered location of the given extension. If the given
// extension has not been seen/registered, nil is returned.
func (s *Symbols) LookupExtension(messageName protoreflect.FullName, extensionNumber protoreflect.FieldNumber) ast.SourceSpan {
// note: getPackage never returns nil when exact=false
pkgSyms := s.getPackage(messageName, false)
return pkgSyms.exts[extNumber{messageName, extensionNumber}]
}
type nameEnumerator struct {
name protoreflect.FullName
start int
}
func (e *nameEnumerator) next() (protoreflect.FullName, bool) {
if e.start < 0 {
return "", false
}
pos := strings.IndexByte(string(e.name[e.start:]), '.')
if pos == -1 {
e.start = -1
return e.name, true
}
pos += e.start
e.start = pos + 1
return e.name[:pos], true
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package linker
import (
"fmt"
"math"
"strings"
"unicode"
"unicode/utf8"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
"github.com/bufbuild/protocompile/protoutil"
"github.com/bufbuild/protocompile/reporter"
"github.com/bufbuild/protocompile/walk"
)
// ValidateOptions runs some validation checks on the result that can only
// be done after options are interpreted.
func (r *result) ValidateOptions(handler *reporter.Handler, symbols *Symbols) error {
if err := r.validateFile(handler); err != nil {
return err
}
return walk.Descriptors(r, func(d protoreflect.Descriptor) error {
switch d := d.(type) {
case protoreflect.FieldDescriptor:
if err := r.validateField(d, handler); err != nil {
return err
}
case protoreflect.MessageDescriptor:
if symbols == nil {
symbols = &Symbols{}
}
if err := r.validateMessage(d, handler, symbols); err != nil {
return err
}
case protoreflect.EnumDescriptor:
if err := r.validateEnum(d, handler); err != nil {
return err
}
}
return nil
})
}
func (r *result) validateFile(handler *reporter.Handler) error {
opts := r.FileDescriptorProto().GetOptions()
if opts.GetOptimizeFor() != descriptorpb.FileOptions_LITE_RUNTIME {
// Non-lite files may not import lite files.
imports := r.Imports()
for i, length := 0, imports.Len(); i < length; i++ {
dep := imports.Get(i)
depOpts, ok := dep.Options().(*descriptorpb.FileOptions)
if !ok {
continue // what else to do?
}
if depOpts.GetOptimizeFor() == descriptorpb.FileOptions_LITE_RUNTIME {
err := handler.HandleErrorf(r.getImportLocation(dep.Path()), "a file that does not use optimize_for=LITE_RUNTIME may not import file %q that does", dep.Path())
if err != nil {
return err
}
}
}
}
if isEditions(r) {
// Validate features
if opts.GetFeatures().GetFieldPresence() == descriptorpb.FeatureSet_LEGACY_REQUIRED {
span := r.findOptionSpan(r, internal.FileOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag)
err := handler.HandleErrorf(span, "LEGACY_REQUIRED field presence cannot be set as the default for a file")
if err != nil {
return err
}
}
if opts != nil && opts.JavaStringCheckUtf8 != nil {
span := r.findOptionSpan(r, internal.FileOptionsJavaStringCheckUTF8Tag)
err := handler.HandleErrorf(span, `file option java_string_check_utf8 is not allowed with editions; import "google/protobuf/java_features.proto" and use (pb.java).utf8_validation instead`)
if err != nil {
return err
}
}
}
return nil
}
func (r *result) validateField(fld protoreflect.FieldDescriptor, handler *reporter.Handler) error {
if xtd, ok := fld.(protoreflect.ExtensionTypeDescriptor); ok {
fld = xtd.Descriptor()
}
fd, ok := fld.(*fldDescriptor)
if !ok {
// should not be possible
return fmt.Errorf("field descriptor is wrong type: expecting %T, got %T", (*fldDescriptor)(nil), fld)
}
if err := r.validatePacked(fd, handler); err != nil {
return err
}
if fd.Kind() == protoreflect.EnumKind {
requiresOpen := !fd.IsList() && !fd.HasPresence()
if requiresOpen && fd.Enum().IsClosed() {
// Fields in a proto3 message cannot refer to proto2 enums.
// In editions, this translates to implicit presence fields
// not being able to refer to closed enums.
// TODO: This really should be based solely on whether the enum's first
// value is zero, NOT based on if it's open vs closed.
// https://github.com/protocolbuffers/protobuf/issues/16249
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldType())
if err := handler.HandleErrorf(info, "cannot use closed enum %s in a field with implicit presence", fd.Enum().FullName()); err != nil {
return err
}
}
}
if fd.HasDefault() && !fd.HasPresence() {
span := r.findScalarOptionSpan(r.FieldNode(fd.proto), "default")
err := handler.HandleErrorf(span, "default value is not allowed on fields with implicit presence")
if err != nil {
return err
}
}
if fd.proto.Options != nil && fd.proto.Options.Ctype != nil {
if descriptorpb.Edition(r.Edition()) >= descriptorpb.Edition_EDITION_2024 {
// We don't support edition 2024 yet, but we went ahead and mimic'ed this check
// from protoc, which currently has experimental support for 2024.
span := r.findOptionSpan(fd, internal.FieldOptionsCTypeTag)
if err := handler.HandleErrorf(span, "ctype option cannot be used as of edition 2024; use features.string_type instead"); err != nil {
return err
}
}
}
if (fd.proto.Options.GetLazy() || fd.proto.Options.GetUnverifiedLazy()) && fd.Kind() != protoreflect.MessageKind {
var span ast.SourceSpan
var optionName string
if fd.proto.Options.GetLazy() {
span = r.findOptionSpan(fd, internal.FieldOptionsLazyTag)
optionName = "lazy"
} else {
span = r.findOptionSpan(fd, internal.FieldOptionsUnverifiedLazyTag)
optionName = "unverified_lazy"
}
var suffix string
if fd.Kind() == protoreflect.GroupKind {
if isEditions(r) {
suffix = " that use length-prefixed encoding"
} else {
suffix = ", not groups"
}
}
if err := handler.HandleErrorf(span, "%s option can only be used with message fields%s", optionName, suffix); err != nil {
return err
}
}
if fd.proto.Options.GetJstype() != descriptorpb.FieldOptions_JS_NORMAL {
switch fd.Kind() {
case protoreflect.Int64Kind, protoreflect.Uint64Kind, protoreflect.Sint64Kind,
protoreflect.Fixed64Kind, protoreflect.Sfixed64Kind:
// allowed only for 64-bit integer types
default:
span := r.findOptionSpan(fd, internal.FieldOptionsJSTypeTag)
err := handler.HandleErrorf(span, "only 64-bit integer fields (int64, uint64, sint64, fixed64, and sfixed64) can specify a jstype other than JS_NORMAL")
if err != nil {
return err
}
}
}
if isEditions(r) {
if err := r.validateFieldFeatures(fd, handler); err != nil {
return err
}
}
if fld.IsExtension() {
// More checks if this is an extension field.
if err := r.validateExtension(fd, handler); err != nil {
return err
}
}
return nil
}
func (r *result) validateExtension(fd *fldDescriptor, handler *reporter.Handler) error {
// NB: It's a little gross that we don't enforce these in validateBasic().
// But it requires linking to resolve the extendee, so we can interrogate
// its descriptor.
msg := fd.ContainingMessage()
if msg.Options().(*descriptorpb.MessageOptions).GetMessageSetWireFormat() { //nolint:errcheck
// Message set wire format requires that all extensions be messages
// themselves (no scalar extensions)
if fd.Kind() != protoreflect.MessageKind {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldType())
err := handler.HandleErrorf(info, "messages with message-set wire format cannot contain scalar extensions, only messages")
if err != nil {
return err
}
}
if fd.Cardinality() == protoreflect.Repeated {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel())
err := handler.HandleErrorf(info, "messages with message-set wire format cannot contain repeated extensions, only optional")
if err != nil {
return err
}
}
} else if fd.Number() > internal.MaxNormalTag {
// In validateBasic() we just made sure these were within bounds for any message. But
// now that things are linked, we can check if the extendee is messageset wire format
// and, if not, enforce tighter limit.
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag())
err := handler.HandleErrorf(info, "tag number %d is higher than max allowed tag number (%d)", fd.Number(), internal.MaxNormalTag)
if err != nil {
return err
}
}
fileOpts := r.FileDescriptorProto().GetOptions()
if fileOpts.GetOptimizeFor() == descriptorpb.FileOptions_LITE_RUNTIME {
extendeeFileOpts, _ := msg.ParentFile().Options().(*descriptorpb.FileOptions)
if extendeeFileOpts.GetOptimizeFor() != descriptorpb.FileOptions_LITE_RUNTIME {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto))
err := handler.HandleErrorf(info, "extensions in a file that uses optimize_for=LITE_RUNTIME may not extend messages in file %q which does not", msg.ParentFile().Path())
if err != nil {
return err
}
}
}
// If the extendee uses extension declarations, make sure this extension matches.
md := protoutil.ProtoFromMessageDescriptor(msg)
for i, extRange := range md.ExtensionRange {
if int32(fd.Number()) < extRange.GetStart() || int32(fd.Number()) >= extRange.GetEnd() {
continue
}
extRangeOpts := extRange.GetOptions()
if extRangeOpts == nil {
break
}
if len(extRangeOpts.Declaration) == 0 && extRangeOpts.GetVerification() != descriptorpb.ExtensionRangeOptions_DECLARATION {
break
}
var found bool
for j, extDecl := range extRangeOpts.Declaration {
if extDecl.GetNumber() != int32(fd.Number()) {
continue
}
found = true
if extDecl.GetReserved() {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag())
span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationReservedTag)
err := handler.HandleErrorf(info, "cannot use field number %d for an extension because it is reserved in declaration at %v",
fd.Number(), span.Start())
if err != nil {
return err
}
break
}
if extDecl.GetFullName() != "."+string(fd.FullName()) {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldName())
span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationFullNameTag)
err := handler.HandleErrorf(info, "expected extension with number %d to be named %s, not %s, per declaration at %v",
fd.Number(), strings.TrimPrefix(extDecl.GetFullName(), "."), fd.FullName(), span.Start())
if err != nil {
return err
}
}
if extDecl.GetType() != getTypeName(fd) {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldType())
span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationTypeTag)
err := handler.HandleErrorf(info, "expected extension with number %d to have type %s, not %s, per declaration at %v",
fd.Number(), strings.TrimPrefix(extDecl.GetType(), "."), getTypeName(fd), span.Start())
if err != nil {
return err
}
}
if extDecl.GetRepeated() != (fd.Cardinality() == protoreflect.Repeated) {
expected, actual := "repeated", "optional"
if !extDecl.GetRepeated() {
expected, actual = actual, expected
}
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel())
span, _ := findExtensionRangeOptionSpan(msg.ParentFile(), msg, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(j), internal.ExtensionRangeOptionsDeclarationRepeatedTag)
err := handler.HandleErrorf(info, "expected extension with number %d to be %s, not %s, per declaration at %v",
fd.Number(), expected, actual, span.Start())
if err != nil {
return err
}
}
break
}
if !found {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldTag())
span, _ := findExtensionRangeOptionSpan(fd.ParentFile(), msg, i, extRange,
internal.ExtensionRangeOptionsVerificationTag)
err := handler.HandleErrorf(info, "expected extension with number %d to be declared in type %s, but no declaration found at %v",
fd.Number(), fd.ContainingMessage().FullName(), span.Start())
if err != nil {
return err
}
}
}
return nil
}
func (r *result) validatePacked(fd *fldDescriptor, handler *reporter.Handler) error {
if fd.proto.Options != nil && fd.proto.Options.Packed != nil && isEditions(r) {
span := r.findOptionSpan(fd, internal.FieldOptionsPackedTag)
err := handler.HandleErrorf(span, "packed option cannot be used with editions; use features.repeated_field_encoding=PACKED instead")
if err != nil {
return err
}
}
if !fd.proto.GetOptions().GetPacked() {
// if packed isn't true, nothing to validate
return nil
}
if fd.proto.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldLabel())
err := handler.HandleErrorf(info, "packed option is only allowed on repeated fields")
if err != nil {
return err
}
}
switch fd.proto.GetType() {
case descriptorpb.FieldDescriptorProto_TYPE_STRING, descriptorpb.FieldDescriptorProto_TYPE_BYTES,
descriptorpb.FieldDescriptorProto_TYPE_MESSAGE, descriptorpb.FieldDescriptorProto_TYPE_GROUP:
file := r.FileNode()
info := file.NodeInfo(r.FieldNode(fd.proto).FieldType())
err := handler.HandleErrorf(info, "packed option is only allowed on numeric, boolean, and enum fields")
if err != nil {
return err
}
}
return nil
}
func (r *result) validateFieldFeatures(fld *fldDescriptor, handler *reporter.Handler) error {
if msg, ok := fld.Parent().(*msgDescriptor); ok && msg.proto.GetOptions().GetMapEntry() {
// Skip validating features on fields of synthetic map entry messages.
// We blindly propagate them from the map field's features, but some may
// really only apply to the map field and not to a key or value entry field.
return nil
}
features := fld.proto.GetOptions().GetFeatures()
if features == nil {
// No features to validate.
return nil
}
if features.FieldPresence != nil {
switch {
case fld.proto.OneofIndex != nil:
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag)
if err := handler.HandleErrorf(span, "oneof fields may not specify field presence"); err != nil {
return err
}
case fld.Cardinality() == protoreflect.Repeated:
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag)
if err := handler.HandleErrorf(span, "repeated fields may not specify field presence"); err != nil {
return err
}
case fld.IsExtension():
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag)
if err := handler.HandleErrorf(span, "extension fields may not specify field presence"); err != nil {
return err
}
case fld.Message() != nil && features.GetFieldPresence() == descriptorpb.FeatureSet_IMPLICIT:
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetFieldPresenceTag)
if err := handler.HandleErrorf(span, "message fields may not specify implicit presence"); err != nil {
return err
}
}
}
if features.RepeatedFieldEncoding != nil {
if fld.Cardinality() != protoreflect.Repeated {
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetRepeatedFieldEncodingTag)
if err := handler.HandleErrorf(span, "only repeated fields may specify repeated field encoding"); err != nil {
return err
}
} else if !internal.CanPack(fld.Kind()) && features.GetRepeatedFieldEncoding() == descriptorpb.FeatureSet_PACKED {
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetRepeatedFieldEncodingTag)
if err := handler.HandleErrorf(span, "only repeated primitive fields may specify packed encoding"); err != nil {
return err
}
}
}
if features.Utf8Validation != nil {
isMap := fld.IsMap()
if (!isMap && fld.Kind() != protoreflect.StringKind) ||
(isMap &&
fld.MapKey().Kind() != protoreflect.StringKind &&
fld.MapValue().Kind() != protoreflect.StringKind) {
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetUTF8ValidationTag)
if err := handler.HandleErrorf(span, "only string fields may specify UTF8 validation"); err != nil {
return err
}
}
}
if features.MessageEncoding != nil {
if fld.Message() == nil || fld.IsMap() {
span := r.findOptionSpan(fld, internal.FieldOptionsFeaturesTag, internal.FeatureSetMessageEncodingTag)
if err := handler.HandleErrorf(span, "only message fields may specify message encoding"); err != nil {
return err
}
}
}
return nil
}
func (r *result) validateMessage(d protoreflect.MessageDescriptor, handler *reporter.Handler, symbols *Symbols) error {
md, ok := d.(*msgDescriptor)
if !ok {
// should not be possible
return fmt.Errorf("message descriptor is wrong type: expecting %T, got %T", (*msgDescriptor)(nil), d)
}
if err := r.validateJSONNamesInMessage(md, handler); err != nil {
return err
}
return r.validateExtensionDeclarations(md, handler, symbols)
}
func (r *result) validateJSONNamesInMessage(md *msgDescriptor, handler *reporter.Handler) error {
if err := r.validateFieldJSONNames(md, false, handler); err != nil {
return err
}
if err := r.validateFieldJSONNames(md, true, handler); err != nil {
return err
}
return nil
}
func (r *result) validateEnum(d protoreflect.EnumDescriptor, handler *reporter.Handler) error {
ed, ok := d.(*enumDescriptor)
if !ok {
// should not be possible
return fmt.Errorf("enum descriptor is wrong type: expecting %T, got %T", (*enumDescriptor)(nil), d)
}
firstValue := ed.Values().Get(0)
if !ed.IsClosed() && firstValue.Number() != 0 {
// TODO: This check doesn't really belong here. Whether the
// first value is zero s/b orthogonal to whether the
// allowed values are open or closed.
// https://github.com/protocolbuffers/protobuf/issues/16249
file := r.FileNode()
evd, ok := firstValue.(*enValDescriptor)
if !ok {
// should not be possible
return fmt.Errorf("enum value descriptor is wrong type: expecting %T, got %T", (*enValDescriptor)(nil), firstValue)
}
info := file.NodeInfo(r.EnumValueNode(evd.proto).GetNumber())
if err := handler.HandleErrorf(info, "first value of open enum %s must have numeric value zero", ed.FullName()); err != nil {
return err
}
}
if err := r.validateJSONNamesInEnum(ed, handler); err != nil {
return err
}
return nil
}
func (r *result) validateJSONNamesInEnum(ed *enumDescriptor, handler *reporter.Handler) error {
seen := map[string]*descriptorpb.EnumValueDescriptorProto{}
for _, evd := range ed.proto.GetValue() {
scope := "enum value " + ed.proto.GetName() + "." + evd.GetName()
name := canonicalEnumValueName(evd.GetName(), ed.proto.GetName())
if existing, ok := seen[name]; ok && evd.GetNumber() != existing.GetNumber() {
fldNode := r.EnumValueNode(evd)
existingNode := r.EnumValueNode(existing)
conflictErr := fmt.Errorf("%s: camel-case name (with optional enum name prefix removed) %q conflicts with camel-case name of enum value %s, defined at %v",
scope, name, existing.GetName(), r.FileNode().NodeInfo(existingNode).Start())
// Since proto2 did not originally have a JSON format, we report conflicts as just warnings.
// With editions, not fully supporting JSON is allowed via feature: json_format == BEST_EFFORT
if !isJSONCompliant(ed) {
handler.HandleWarningWithPos(r.FileNode().NodeInfo(fldNode), conflictErr)
} else if err := handler.HandleErrorWithPos(r.FileNode().NodeInfo(fldNode), conflictErr); err != nil {
return err
}
} else {
seen[name] = evd
}
}
return nil
}
func (r *result) validateFieldJSONNames(md *msgDescriptor, useCustom bool, handler *reporter.Handler) error {
type jsonName struct {
source *descriptorpb.FieldDescriptorProto
// true if orig is a custom JSON name (vs. the field's default JSON name)
custom bool
}
seen := map[string]jsonName{}
for _, fd := range md.proto.GetField() {
scope := "field " + md.proto.GetName() + "." + fd.GetName()
defaultName := internal.JSONName(fd.GetName())
name := defaultName
custom := false
if useCustom {
n := fd.GetJsonName()
if n != defaultName || r.hasCustomJSONName(fd) {
name = n
custom = true
}
}
if existing, ok := seen[name]; ok {
// When useCustom is true, we'll only report an issue when a conflict is
// due to a custom name. That way, we don't double report conflicts on
// non-custom names.
if !useCustom || custom || existing.custom {
fldNode := r.FieldNode(fd)
customStr, srcCustomStr := "custom", "custom"
if !custom {
customStr = "default"
}
if !existing.custom {
srcCustomStr = "default"
}
info := r.FileNode().NodeInfo(fldNode)
conflictErr := reporter.Errorf(info, "%s: %s JSON name %q conflicts with %s JSON name of field %s, defined at %v",
scope, customStr, name, srcCustomStr, existing.source.GetName(), r.FileNode().NodeInfo(r.FieldNode(existing.source)).Start())
// Since proto2 did not originally have default JSON names, we report conflicts
// between default names (neither is a custom name) as just warnings.
// With editions, not fully supporting JSON is allowed via feature: json_format == BEST_EFFORT
if !isJSONCompliant(md) && !custom && !existing.custom {
handler.HandleWarning(conflictErr)
} else if err := handler.HandleError(conflictErr); err != nil {
return err
}
}
} else {
seen[name] = jsonName{source: fd, custom: custom}
}
}
return nil
}
func (r *result) validateExtensionDeclarations(md *msgDescriptor, handler *reporter.Handler, symbols *Symbols) error {
for i, extRange := range md.proto.ExtensionRange {
opts := extRange.GetOptions()
if len(opts.GetDeclaration()) == 0 {
// nothing to check
continue
}
// If any declarations are present, verification is assumed to be
// DECLARATION. It's an error for declarations to be present but the
// verification field explicitly set to something other than that.
if opts.Verification != nil && opts.GetVerification() != descriptorpb.ExtensionRangeOptions_DECLARATION {
span, ok := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsVerificationTag)
if !ok {
span, _ = findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, 0)
}
if err := handler.HandleErrorf(span, "extension range cannot have declarations and have verification of %s", opts.GetVerification()); err != nil {
return err
}
}
declsByTag := map[int32]ast.SourcePos{}
for i, extDecl := range extRange.GetOptions().GetDeclaration() {
if extDecl.Number == nil {
span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i))
if err := handler.HandleErrorf(span, "extension declaration is missing required field number"); err != nil {
return err
}
} else {
extensionNumberSpan, _ := findExtensionRangeOptionSpan(r, md, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationNumberTag)
if extDecl.GetNumber() < extRange.GetStart() || extDecl.GetNumber() >= extRange.GetEnd() {
// Number is out of range.
// See if one of the other ranges on the same extends statement includes the number,
// so we can provide a helpful message.
var suffix string
if extRange, ok := r.ExtensionsNode(extRange).(*ast.ExtensionRangeNode); ok {
for _, rng := range extRange.Ranges {
start, _ := rng.StartVal.AsInt64()
var end int64
switch {
case rng.Max != nil:
end = math.MaxInt64
case rng.EndVal != nil:
end, _ = rng.EndVal.AsInt64()
default:
end = start
}
if int64(extDecl.GetNumber()) >= start && int64(extDecl.GetNumber()) <= end {
// Found another range that matches
suffix = "; when using declarations, extends statements should indicate only a single span of field numbers"
break
}
}
}
err := handler.HandleErrorf(extensionNumberSpan, "extension declaration has number outside the range: %d not in [%d,%d]%s",
extDecl.GetNumber(), extRange.GetStart(), extRange.GetEnd()-1, suffix)
if err != nil {
return err
}
} else {
// Valid number; make sure it's not a duplicate
if existing, ok := declsByTag[extDecl.GetNumber()]; ok {
err := handler.HandleErrorf(extensionNumberSpan, "extension for tag number %d already declared at %v",
extDecl.GetNumber(), existing)
if err != nil {
return err
}
} else {
declsByTag[extDecl.GetNumber()] = extensionNumberSpan.Start()
}
}
}
if extDecl.FullName == nil && !extDecl.GetReserved() {
span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i))
if err := handler.HandleErrorf(span, "extension declaration that is not marked reserved must have a full_name"); err != nil {
return err
}
} else if extDecl.FullName != nil {
var extensionFullName protoreflect.FullName
extensionNameSpan, _ := findExtensionRangeOptionSpan(r, md, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationFullNameTag)
if !strings.HasPrefix(extDecl.GetFullName(), ".") {
if err := handler.HandleErrorf(extensionNameSpan, "extension declaration full name %q should start with a leading dot (.)", extDecl.GetFullName()); err != nil {
return err
}
extensionFullName = protoreflect.FullName(extDecl.GetFullName())
} else {
extensionFullName = protoreflect.FullName(extDecl.GetFullName()[1:])
}
if !extensionFullName.IsValid() {
if err := handler.HandleErrorf(extensionNameSpan, "extension declaration full name %q is not a valid qualified name", extDecl.GetFullName()); err != nil {
return err
}
}
if err := symbols.AddExtensionDeclaration(extensionFullName, md.FullName(), protoreflect.FieldNumber(extDecl.GetNumber()), extensionNameSpan, handler); err != nil {
return err
}
}
if extDecl.Type == nil && !extDecl.GetReserved() {
span, _ := findExtensionRangeOptionSpan(r, md, i, extRange, internal.ExtensionRangeOptionsDeclarationTag, int32(i))
if err := handler.HandleErrorf(span, "extension declaration that is not marked reserved must have a type"); err != nil {
return err
}
} else if extDecl.Type != nil {
if strings.HasPrefix(extDecl.GetType(), ".") {
if !protoreflect.FullName(extDecl.GetType()[1:]).IsValid() {
span, _ := findExtensionRangeOptionSpan(r, md, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationTypeTag)
if err := handler.HandleErrorf(span, "extension declaration type %q is not a valid qualified name", extDecl.GetType()); err != nil {
return err
}
}
} else if !isBuiltinTypeName(extDecl.GetType()) {
span, _ := findExtensionRangeOptionSpan(r, md, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(i), internal.ExtensionRangeOptionsDeclarationTypeTag)
if err := handler.HandleErrorf(span, "extension declaration type %q must be a builtin type or start with a leading dot (.)", extDecl.GetType()); err != nil {
return err
}
}
}
if extDecl.GetReserved() && (extDecl.FullName == nil) != (extDecl.Type == nil) {
var fieldTag int32
if extDecl.FullName != nil {
fieldTag = internal.ExtensionRangeOptionsDeclarationFullNameTag
} else {
fieldTag = internal.ExtensionRangeOptionsDeclarationTypeTag
}
span, _ := findExtensionRangeOptionSpan(r, md, i, extRange,
internal.ExtensionRangeOptionsDeclarationTag, int32(i), fieldTag)
if err := handler.HandleErrorf(span, "extension declarations that are reserved should specify both full_name and type or neither"); err != nil {
return err
}
}
}
}
return nil
}
func (r *result) hasCustomJSONName(fdProto *descriptorpb.FieldDescriptorProto) bool {
// if we have the AST, we can more precisely determine if there was a custom
// JSON named defined, even if it is explicitly configured to tbe the same
// as the default JSON name for the field.
opts := r.FieldNode(fdProto).GetOptions()
if opts == nil {
return false
}
for _, opt := range opts.Options {
if len(opt.Name.Parts) == 1 &&
opt.Name.Parts[0].Name.AsIdentifier() == "json_name" &&
!opt.Name.Parts[0].IsExtension() {
return true
}
}
return false
}
func canonicalEnumValueName(enumValueName, enumName string) string {
return enumValCamelCase(removePrefix(enumValueName, enumName))
}
// removePrefix is used to remove the given prefix from the given str. It does not require
// an exact match and ignores case and underscores. If the all non-underscore characters
// would be removed from str, str is returned unchanged. If str does not have the given
// prefix (even with the very lenient matching, in regard to case and underscores), then
// str is returned unchanged.
//
// The algorithm is adapted from the protoc source:
//
// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L922
func removePrefix(str, prefix string) string {
j := 0
for i, r := range str {
if r == '_' {
// skip underscores in the input
continue
}
p, sz := utf8.DecodeRuneInString(prefix[j:])
for p == '_' {
j += sz // consume/skip underscore
p, sz = utf8.DecodeRuneInString(prefix[j:])
}
if j == len(prefix) {
// matched entire prefix; return rest of str
// but skipping any leading underscores
result := strings.TrimLeft(str[i:], "_")
if len(result) == 0 {
// result can't be empty string
return str
}
return result
}
if unicode.ToLower(r) != unicode.ToLower(p) {
// does not match prefix
return str
}
j += sz // consume matched rune of prefix
}
return str
}
// enumValCamelCase converts the given string to upper-camel-case.
//
// The algorithm is adapted from the protoc source:
//
// https://github.com/protocolbuffers/protobuf/blob/v21.3/src/google/protobuf/descriptor.cc#L887
func enumValCamelCase(name string) string {
var js []rune
nextUpper := true
for _, r := range name {
if r == '_' {
nextUpper = true
continue
}
if nextUpper {
nextUpper = false
js = append(js, unicode.ToUpper(r))
} else {
js = append(js, unicode.ToLower(r))
}
}
return string(js)
}
func isBuiltinTypeName(typeName string) bool {
switch typeName {
case "int32", "int64", "uint32", "uint64", "sint32", "sint64",
"fixed32", "fixed64", "sfixed32", "sfixed64",
"bool", "double", "float", "string", "bytes":
return true
default:
return false
}
}
func getTypeName(fd protoreflect.FieldDescriptor) string {
switch fd.Kind() {
case protoreflect.MessageKind, protoreflect.GroupKind:
return "." + string(fd.Message().FullName())
case protoreflect.EnumKind:
return "." + string(fd.Enum().FullName())
default:
return fd.Kind().String()
}
}
func findExtensionRangeOptionSpan(
file protoreflect.FileDescriptor,
extended protoreflect.MessageDescriptor,
extRangeIndex int,
extRange *descriptorpb.DescriptorProto_ExtensionRange,
path ...int32,
) (ast.SourceSpan, bool) {
// NB: Typically, we have an AST for a file and NOT source code info, because the
// compiler validates options before computing source code info. However, we might
// be validating an extension (whose source/AST we have), but whose extendee (and
// thus extension range options for declarations) could be in some other file, which
// could be provided to the compiler as an already-compiled descriptor. So this
// function can fallback to using source code info if an AST is not available.
if r, ok := file.(Result); ok && r.AST() != nil {
// Find the location using the AST, which will generally be higher fidelity
// than what we might find in a file descriptor's source code info.
exts := r.ExtensionsNode(extRange)
return findOptionSpan(r.FileNode(), exts, extRange.Options.ProtoReflect().Descriptor(), path...)
}
srcLocs := file.SourceLocations()
if srcLocs.Len() == 0 {
// no source code info, can't do any better than the filename. We
// return true as the boolean so the caller doesn't try again with
// an alternate path, since we won't be able to do any better.
return ast.UnknownSpan(file.Path()), true
}
msgPath, ok := internal.ComputePath(extended)
if !ok {
// Same as above: return true since no subsequent query can do better.
return ast.UnknownSpan(file.Path()), true
}
//nolint:gocritic // intentionally assigning to different slice variables
extRangePath := append(msgPath, internal.MessageExtensionRangesTag, int32(extRangeIndex))
optsPath := append(extRangePath, internal.ExtensionRangeOptionsTag) //nolint:gocritic
fullPath := append(optsPath, path...) //nolint:gocritic
srcLoc := srcLocs.ByPath(fullPath)
if srcLoc.Path != nil {
// found it
return asSpan(file.Path(), srcLoc), true
}
// Slow path to find closest match :/
// We look for longest matching path that is at least len(extRangePath)
// long. If we find a path that is longer (meaning a path that points INSIDE
// the request element), accept the first such location.
var bestMatch protoreflect.SourceLocation
var bestMatchPathLen int
for i, length := 0, srcLocs.Len(); i < length; i++ {
srcLoc := srcLocs.Get(i)
if len(srcLoc.Path) >= len(extRangePath) &&
isDescendantPath(fullPath, srcLoc.Path) &&
len(srcLoc.Path) > bestMatchPathLen {
bestMatch = srcLoc
bestMatchPathLen = len(srcLoc.Path)
} else if isDescendantPath(srcLoc.Path, path) {
return asSpan(file.Path(), srcLoc), false
}
}
if bestMatchPathLen > 0 {
return asSpan(file.Path(), bestMatch), false
}
return ast.UnknownSpan(file.Path()), false
}
func (r *result) findScalarOptionSpan(
root ast.NodeWithOptions,
name string,
) ast.SourceSpan {
match := ast.Node(root)
root.RangeOptions(func(n *ast.OptionNode) bool {
if len(n.Name.Parts) == 1 && !n.Name.Parts[0].IsExtension() &&
string(n.Name.Parts[0].Name.AsIdentifier()) == name {
match = n
return false
}
return true
})
return r.FileNode().NodeInfo(match)
}
func (r *result) findOptionSpan(
d protoutil.DescriptorProtoWrapper,
path ...int32,
) ast.SourceSpan {
node := r.Node(d.AsProto())
nodeWithOpts, ok := node.(ast.NodeWithOptions)
if !ok {
return r.FileNode().NodeInfo(node)
}
span, _ := findOptionSpan(r.FileNode(), nodeWithOpts, d.Options().ProtoReflect().Descriptor(), path...)
return span
}
func findOptionSpan(
file ast.FileDeclNode,
root ast.NodeWithOptions,
md protoreflect.MessageDescriptor,
path ...int32,
) (ast.SourceSpan, bool) {
bestMatch := ast.Node(root)
var bestMatchLen int
var repeatedIndices []int
root.RangeOptions(func(n *ast.OptionNode) bool {
desc := md
limit := len(n.Name.Parts)
if limit > len(path) {
limit = len(path)
}
var nextIsIndex bool
for i := range limit {
if desc == nil || nextIsIndex {
// Can't match anymore. Try next option.
return true
}
wantField := desc.Fields().ByNumber(protoreflect.FieldNumber(path[i]))
if wantField == nil {
// Should not be possible... next option won't fare any better since
// it's a disagreement between given path and given descriptor so bail.
return false
}
if n.Name.Parts[i].Open != nil ||
string(n.Name.Parts[i].Name.AsIdentifier()) != string(wantField.Name()) {
// This is an extension/custom option or indicates the wrong name.
// Try the next one.
return true
}
desc = wantField.Message()
nextIsIndex = wantField.Cardinality() == protoreflect.Repeated
}
// If we made it this far, we've matched everything so far.
if len(n.Name.Parts) >= len(path) {
// Either an exact match (if equal) or this option points *inside* the
// item we care about (if greater). Either way, the first such result
// is a keeper.
bestMatch = n.Name.Parts[len(path)-1]
bestMatchLen = len(n.Name.Parts)
return false
}
// We've got more path elements to try to match with the value.
match, matchLen := findMatchingValueNode(
desc,
path[len(n.Name.Parts):],
nextIsIndex,
0,
&repeatedIndices,
n,
n.Val)
if match != nil {
totalMatchLen := matchLen + len(n.Name.Parts)
if totalMatchLen > bestMatchLen {
bestMatch, bestMatchLen = match, totalMatchLen
}
}
return bestMatchLen != len(path) // no exact match, so keep looking
})
return file.NodeInfo(bestMatch), bestMatchLen == len(path)
}
func findMatchingValueNode(
md protoreflect.MessageDescriptor,
path protoreflect.SourcePath,
currIsRepeated bool,
repeatedCount int,
repeatedIndices *[]int,
node ast.Node,
val ast.ValueNode,
) (ast.Node, int) {
var matchLen int
var index int
if currIsRepeated {
// Compute the index of the current value (or, if an array literal, the
// index of the first value in the array).
if len(*repeatedIndices) > repeatedCount {
(*repeatedIndices)[repeatedCount]++
index = (*repeatedIndices)[repeatedCount]
} else {
*repeatedIndices = append(*repeatedIndices, 0)
index = 0
}
repeatedCount++
}
if arrayVal, ok := val.(*ast.ArrayLiteralNode); ok {
if !currIsRepeated {
// This should not happen.
return nil, 0
}
offset := int(path[0]) - index
if offset >= len(arrayVal.Elements) {
// The index we are looking for is not in this array.
return nil, 0
}
elem := arrayVal.Elements[offset]
// We've matched the index!
matchLen++
path = path[1:]
// Recurse into array element.
nextMatch, nextMatchLen := findMatchingValueNode(
md,
path,
false,
repeatedCount,
repeatedIndices,
elem,
elem,
)
return nextMatch, nextMatchLen + matchLen
}
if currIsRepeated {
if index != int(path[0]) {
// Not a match!
return nil, 0
}
// We've matched the index!
matchLen++
path = path[1:]
if len(path) == 0 {
// We're done matching!
return node, matchLen
}
}
msgValue, ok := val.(*ast.MessageLiteralNode)
if !ok {
// We can't go any further
return node, matchLen
}
var wantField protoreflect.FieldDescriptor
if md != nil {
wantField = md.Fields().ByNumber(protoreflect.FieldNumber(path[0]))
}
if wantField == nil {
// Should not be possible... next option won't fare any better since
// it's a disagreement between given path and given descriptor so bail.
return nil, 0
}
for _, field := range msgValue.Elements {
if field.Name.Open != nil ||
string(field.Name.Name.AsIdentifier()) != string(wantField.Name()) {
// This is an extension/custom option or indicates the wrong name.
// Try the next one.
continue
}
// We've matched this field.
matchLen++
path = path[1:]
if len(path) == 0 {
// Perfect match!
return field, matchLen
}
nextMatch, nextMatchLen := findMatchingValueNode(
wantField.Message(),
path,
wantField.Cardinality() == protoreflect.Repeated,
repeatedCount,
repeatedIndices,
field,
field.Val,
)
return nextMatch, nextMatchLen + matchLen
}
// If we didn't find the right field, just return what we have so far.
return node, matchLen
}
func isDescendantPath(descendant, ancestor protoreflect.SourcePath) bool {
if len(descendant) < len(ancestor) {
return false
}
for i := range ancestor {
if descendant[i] != ancestor[i] {
return false
}
}
return true
}
func asSpan(file string, srcLoc protoreflect.SourceLocation) ast.SourceSpan {
return ast.NewSourceSpan(
ast.SourcePos{
Filename: file,
Line: srcLoc.StartLine + 1,
Col: srcLoc.StartColumn + 1,
},
ast.SourcePos{
Filename: file,
Line: srcLoc.EndLine + 1,
Col: srcLoc.EndColumn + 1,
},
)
}
func (r *result) getImportLocation(path string) ast.SourceSpan {
node, ok := r.FileNode().(*ast.FileNode)
if !ok {
return ast.UnknownSpan(path)
}
for _, decl := range node.Decls {
imp, ok := decl.(*ast.ImportNode)
if !ok {
continue
}
if imp.Name.AsString() == path {
return node.NodeInfo(imp.Name)
}
}
// Couldn't find it? Should never happen...
return ast.UnknownSpan(path)
}
func isEditions(r *result) bool {
return descriptorpb.Edition(r.Edition()) >= descriptorpb.Edition_EDITION_2023
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package options contains the logic for interpreting options. The parse step
// of compilation stores the options in uninterpreted form, which contains raw
// identifiers and literal values.
//
// The process of interpreting an option is to resolve identifiers, by examining
// descriptors for the google.protobuf.*Options types and their available
// extensions (custom options). As field names are resolved, the values can be
// type-checked against the types indicated in field descriptors.
//
// On success, the various fields and extensions of the options message are
// populated and the field holding the uninterpreted form is cleared.
package options
import (
"bytes"
"errors"
"fmt"
"math"
"strings"
"google.golang.org/protobuf/encoding/prototext"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/dynamicpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
"github.com/bufbuild/protocompile/internal/messageset"
"github.com/bufbuild/protocompile/linker"
"github.com/bufbuild/protocompile/parser"
"github.com/bufbuild/protocompile/reporter"
"github.com/bufbuild/protocompile/sourceinfo"
)
type interpreter struct {
file file
resolver linker.Resolver
overrideDescriptorProto linker.File
index sourceinfo.OptionIndex
pathBuffer []int32
reporter *reporter.Handler
lenient bool
// lenienceEnabled is set to true when errors reported to reporter
// should be lenient
lenienceEnabled bool
lenientErrReported bool
}
type file interface {
parser.Result
ResolveMessageLiteralExtensionName(ast.IdentValueNode) string
}
type noResolveFile struct {
parser.Result
}
func (n noResolveFile) ResolveMessageLiteralExtensionName(ast.IdentValueNode) string {
return ""
}
// InterpreterOption is an option that can be passed to InterpretOptions and
// its variants.
type InterpreterOption func(*interpreter)
// WithOverrideDescriptorProto returns an option that indicates that the given file
// should be consulted when looking up a definition for an option type. The given
// file should usually have the path "google/protobuf/descriptor.proto". The given
// file will only be consulted if the option type is otherwise not visible to the
// file whose options are being interpreted.
func WithOverrideDescriptorProto(f linker.File) InterpreterOption {
return func(interp *interpreter) {
interp.overrideDescriptorProto = f
}
}
// InterpretOptions interprets options in the given linked result, returning
// an index that can be used to generate source code info. This step mutates
// the linked result's underlying proto to move option elements out of the
// "uninterpreted_option" fields and into proper option fields and extensions.
//
// The given handler is used to report errors and warnings. If any errors are
// reported, this function returns a non-nil error.
func InterpretOptions(linked linker.Result, handler *reporter.Handler, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) {
return interpretOptions(false, linked, linker.ResolverFromFile(linked), handler, opts)
}
// InterpretOptionsLenient interprets options in a lenient/best-effort way in
// the given linked result, returning an index that can be used to generate
// source code info. This step mutates the linked result's underlying proto to
// move option elements out of the "uninterpreted_option" fields and into proper
// option fields and extensions.
//
// In lenient more, errors resolving option names and type errors are ignored.
// Any options that are uninterpretable (due to such errors) will remain in the
// "uninterpreted_option" fields.
func InterpretOptionsLenient(linked linker.Result, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) {
return interpretOptions(true, linked, linker.ResolverFromFile(linked), reporter.NewHandler(nil), opts)
}
// InterpretUnlinkedOptions does a best-effort attempt to interpret options in
// the given parsed result, returning an index that can be used to generate
// source code info. This step mutates the parsed result's underlying proto to
// move option elements out of the "uninterpreted_option" fields and into proper
// option fields and extensions.
//
// This is the same as InterpretOptionsLenient except that it accepts an
// unlinked result. Because the file is unlinked, custom options cannot be
// interpreted. Other errors resolving option names or type errors will be
// effectively ignored. Any options that are uninterpretable (due to such
// errors) will remain in the "uninterpreted_option" fields.
func InterpretUnlinkedOptions(parsed parser.Result, opts ...InterpreterOption) (sourceinfo.OptionIndex, error) {
return interpretOptions(true, noResolveFile{parsed}, nil, reporter.NewHandler(nil), opts)
}
func interpretOptions(lenient bool, file file, res linker.Resolver, handler *reporter.Handler, interpOpts []InterpreterOption) (sourceinfo.OptionIndex, error) {
interp := &interpreter{
file: file,
resolver: res,
lenient: lenient,
reporter: handler,
index: sourceinfo.OptionIndex{},
pathBuffer: make([]int32, 0, 16),
}
for _, opt := range interpOpts {
opt(interp)
}
// We have to do this in two phases. First we interpret non-custom options.
// This allows us to handle standard options and features that may needed to
// correctly reference the custom options in the second phase.
if err := interp.interpretFileOptions(file, false); err != nil {
return nil, err
}
// Now we can do custom options.
if err := interp.interpretFileOptions(file, true); err != nil {
return nil, err
}
return interp.index, nil
}
func (interp *interpreter) handleErrorf(span ast.SourceSpan, msg string, args ...any) error {
if interp.lenienceEnabled {
interp.lenientErrReported = true
return nil
}
return interp.reporter.HandleErrorf(span, msg, args...)
}
func (interp *interpreter) handleErrorWithPos(span ast.SourceSpan, err error) error {
if interp.lenienceEnabled {
interp.lenientErrReported = true
return nil
}
return interp.reporter.HandleErrorWithPos(span, err)
}
func (interp *interpreter) handleError(err error) error {
if interp.lenienceEnabled {
interp.lenientErrReported = true
return nil
}
return interp.reporter.HandleError(err)
}
func (interp *interpreter) interpretFileOptions(file file, customOpts bool) error {
fd := file.FileDescriptorProto()
prefix := fd.GetPackage()
if prefix != "" {
prefix += "."
}
err := interpretElementOptions(interp, fd.GetName(), targetTypeFile, fd, customOpts)
if err != nil {
return err
}
for _, md := range fd.GetMessageType() {
fqn := prefix + md.GetName()
if err := interp.interpretMessageOptions(fqn, md, customOpts); err != nil {
return err
}
}
for _, fld := range fd.GetExtension() {
fqn := prefix + fld.GetName()
if err := interp.interpretFieldOptions(fqn, fld, customOpts); err != nil {
return err
}
}
for _, ed := range fd.GetEnumType() {
fqn := prefix + ed.GetName()
if err := interp.interpretEnumOptions(fqn, ed, customOpts); err != nil {
return err
}
}
for _, sd := range fd.GetService() {
fqn := prefix + sd.GetName()
err := interpretElementOptions(interp, fqn, targetTypeService, sd, customOpts)
if err != nil {
return err
}
for _, mtd := range sd.GetMethod() {
mtdFqn := fqn + "." + mtd.GetName()
err := interpretElementOptions(interp, mtdFqn, targetTypeMethod, mtd, customOpts)
if err != nil {
return err
}
}
}
return nil
}
func resolveDescriptor[T protoreflect.Descriptor](res linker.Resolver, name string) T {
var zero T
if res == nil {
return zero
}
if len(name) > 0 && name[0] == '.' {
name = name[1:]
}
desc, _ := res.FindDescriptorByName(protoreflect.FullName(name))
typedDesc, ok := desc.(T)
if ok {
return typedDesc
}
return zero
}
func (interp *interpreter) resolveExtensionType(name string) (protoreflect.ExtensionTypeDescriptor, error) {
if interp.resolver == nil {
return nil, protoregistry.NotFound
}
if len(name) > 0 && name[0] == '.' {
name = name[1:]
}
ext, err := interp.resolver.FindExtensionByName(protoreflect.FullName(name))
if err != nil {
return nil, err
}
return ext.TypeDescriptor(), nil
}
func (interp *interpreter) resolveOptionsType(name string) protoreflect.MessageDescriptor {
md := resolveDescriptor[protoreflect.MessageDescriptor](interp.resolver, name)
if md != nil {
return md
}
if interp.overrideDescriptorProto == nil {
return nil
}
if len(name) > 0 && name[0] == '.' {
name = name[1:]
}
desc := interp.overrideDescriptorProto.FindDescriptorByName(protoreflect.FullName(name))
if md, ok := desc.(protoreflect.MessageDescriptor); ok {
return md
}
return nil
}
func (interp *interpreter) nodeInfo(n ast.Node) ast.NodeInfo {
return interp.file.FileNode().NodeInfo(n)
}
func (interp *interpreter) interpretMessageOptions(fqn string, md *descriptorpb.DescriptorProto, customOpts bool) error {
err := interpretElementOptions(interp, fqn, targetTypeMessage, md, customOpts)
if err != nil {
return err
}
for _, fld := range md.GetField() {
fldFqn := fqn + "." + fld.GetName()
if err := interp.interpretFieldOptions(fldFqn, fld, customOpts); err != nil {
return err
}
}
for _, ood := range md.GetOneofDecl() {
oodFqn := fqn + "." + ood.GetName()
err := interpretElementOptions(interp, oodFqn, targetTypeOneof, ood, customOpts)
if err != nil {
return err
}
}
for _, fld := range md.GetExtension() {
fldFqn := fqn + "." + fld.GetName()
if err := interp.interpretFieldOptions(fldFqn, fld, customOpts); err != nil {
return err
}
}
for _, er := range md.GetExtensionRange() {
erFqn := fmt.Sprintf("%s.%d-%d", fqn, er.GetStart(), er.GetEnd())
err := interpretElementOptions(interp, erFqn, targetTypeExtensionRange, er, customOpts)
if err != nil {
return err
}
}
for _, nmd := range md.GetNestedType() {
nmdFqn := fqn + "." + nmd.GetName()
if err := interp.interpretMessageOptions(nmdFqn, nmd, customOpts); err != nil {
return err
}
}
for _, ed := range md.GetEnumType() {
edFqn := fqn + "." + ed.GetName()
if err := interp.interpretEnumOptions(edFqn, ed, customOpts); err != nil {
return err
}
}
// We also copy features for map fields down to their synthesized key and value fields.
for _, fld := range md.GetField() {
entryName := internal.InitCap(internal.JSONName(fld.GetName())) + "Entry"
if fld.GetLabel() != descriptorpb.FieldDescriptorProto_LABEL_REPEATED ||
fld.GetType() != descriptorpb.FieldDescriptorProto_TYPE_MESSAGE &&
fld.GetTypeName() != "."+fqn+"."+entryName {
// can't be a map field
continue
}
if fld.Options == nil || fld.Options.Features == nil {
// no features to propagate
continue
}
for _, nmd := range md.GetNestedType() {
if nmd.GetName() == entryName {
// found the entry message
if !nmd.GetOptions().GetMapEntry() {
break // not a map
}
for _, mapField := range nmd.Field {
if mapField.Options == nil {
mapField.Options = &descriptorpb.FieldOptions{}
}
features := proto.Clone(fld.Options.Features).(*descriptorpb.FeatureSet) //nolint:errcheck
if mapField.Options.Features != nil {
proto.Merge(features, mapField.Options.Features)
}
mapField.Options.Features = features
}
break
}
}
}
return nil
}
var emptyFieldOptions = &descriptorpb.FieldOptions{}
func (interp *interpreter) interpretFieldOptions(fqn string, fld *descriptorpb.FieldDescriptorProto, customOpts bool) error {
opts := fld.GetOptions()
emptyOptionsAlreadyPresent := opts != nil && len(opts.GetUninterpretedOption()) == 0
// For non-custom phase, first process pseudo-options
if len(opts.GetUninterpretedOption()) > 0 && !customOpts {
interp.enableLenience(true)
err := interp.interpretFieldPseudoOptions(fqn, fld, opts)
interp.enableLenience(false)
if err != nil {
return err
}
}
// Must re-check length of uninterpreted options since above step could remove some.
if len(opts.GetUninterpretedOption()) == 0 {
// If the message has no other interpreted options, we clear it out. But don't
// do that if the descriptor came in with empty options or if it already has
// interpreted option fields.
if opts != nil && !emptyOptionsAlreadyPresent && proto.Equal(fld.Options, emptyFieldOptions) {
fld.Options = nil
}
return nil
}
// Then process actual options.
return interpretElementOptions(interp, fqn, targetTypeField, fld, customOpts)
}
func (interp *interpreter) interpretFieldPseudoOptions(fqn string, fld *descriptorpb.FieldDescriptorProto, opts *descriptorpb.FieldOptions) error {
scope := "field " + fqn
uo := opts.UninterpretedOption
// process json_name pseudo-option
if index, err := internal.FindOption(interp.file, interp.handleErrorf, scope, uo, "json_name"); err != nil {
return err
} else if index >= 0 {
opt := uo[index]
optNode := interp.file.OptionNode(opt)
if opt.StringValue == nil {
return interp.handleErrorf(interp.nodeInfo(optNode.GetValue()), "%s: expecting string value for json_name option", scope)
}
jsonName := string(opt.StringValue)
// Extensions don't support custom json_name values.
// If the value is already set (via the descriptor) and doesn't match the default value, return an error.
if fld.GetExtendee() != "" && jsonName != "" && jsonName != internal.JSONName(fld.GetName()) {
return interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: option json_name is not allowed on extensions", scope)
}
// attribute source code info
if on, ok := optNode.(*ast.OptionNode); ok {
interp.index[on] = &sourceinfo.OptionSourceInfo{Path: []int32{-1, internal.FieldJSONNameTag}}
}
uo = internal.RemoveOption(uo, index)
if strings.HasPrefix(jsonName, "[") && strings.HasSuffix(jsonName, "]") {
return interp.handleErrorf(interp.nodeInfo(optNode.GetValue()), "%s: option json_name value cannot start with '[' and end with ']'; that is reserved for representing extensions", scope)
}
fld.JsonName = proto.String(jsonName)
}
// and process default pseudo-option
if index, err := interp.processDefaultOption(scope, fqn, fld, uo); err != nil {
return err
} else if index >= 0 {
// attribute source code info
optNode := interp.file.OptionNode(uo[index])
if on, ok := optNode.(*ast.OptionNode); ok {
interp.index[on] = &sourceinfo.OptionSourceInfo{Path: []int32{-1, internal.FieldDefaultTag}}
}
uo = internal.RemoveOption(uo, index)
}
opts.UninterpretedOption = uo
return nil
}
func (interp *interpreter) processDefaultOption(scope string, fqn string, fld *descriptorpb.FieldDescriptorProto, uos []*descriptorpb.UninterpretedOption) (defaultIndex int, err error) {
found, err := internal.FindOption(interp.file, interp.handleErrorf, scope, uos, "default")
if err != nil || found == -1 {
return -1, err
}
opt := uos[found]
optNode := interp.file.OptionNode(opt)
if fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REPEATED {
return -1, interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: default value cannot be set because field is repeated", scope)
}
if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP || fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_MESSAGE {
return -1, interp.handleErrorf(interp.nodeInfo(optNode.GetName()), "%s: default value cannot be set because field is a message", scope)
}
mc := &internal.MessageContext{
File: interp.file,
ElementName: fqn,
ElementType: descriptorType(fld),
Option: opt,
}
val := optNode.GetValue()
var v any
if val.Value() == nil {
// no value in the AST, so we dig the value out of the uninterpreted option proto
v, err = interp.defaultValueFromProto(mc, fld, opt, val)
} else {
// compute value from AST
v, err = interp.defaultValue(mc, fld, val)
}
if err != nil {
return -1, interp.handleError(err)
}
if str, ok := v.(string); ok {
fld.DefaultValue = proto.String(str)
} else if b, ok := v.([]byte); ok {
fld.DefaultValue = proto.String(encodeDefaultBytes(b))
} else {
var flt float64
var ok bool
if flt, ok = v.(float64); !ok {
var flt32 float32
if flt32, ok = v.(float32); ok {
flt = float64(flt32)
}
}
if ok {
switch {
case math.IsInf(flt, 1):
fld.DefaultValue = proto.String("inf")
case math.IsInf(flt, -1):
fld.DefaultValue = proto.String("-inf")
case math.IsNaN(flt):
fld.DefaultValue = proto.String("nan")
default:
fld.DefaultValue = proto.String(fmt.Sprintf("%v", v))
}
} else {
fld.DefaultValue = proto.String(fmt.Sprintf("%v", v))
}
}
return found, nil
}
func (interp *interpreter) defaultValue(mc *internal.MessageContext, fld *descriptorpb.FieldDescriptorProto, val ast.ValueNode) (any, error) {
if _, ok := val.(*ast.MessageLiteralNode); ok {
return -1, reporter.Errorf(interp.nodeInfo(val), "%vdefault value cannot be a message", mc)
}
if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM {
ed := resolveDescriptor[protoreflect.EnumDescriptor](interp.resolver, fld.GetTypeName())
if ed == nil {
return -1, reporter.Errorf(interp.nodeInfo(val), "%vunable to resolve enum type %q for field %q", mc, fld.GetTypeName(), fld.GetName())
}
_, name, err := interp.enumFieldValue(mc, ed, val, false)
if err != nil {
return -1, err
}
return string(name), nil
}
return interp.scalarFieldValue(mc, fld.GetType(), val, false)
}
func (interp *interpreter) defaultValueFromProto(mc *internal.MessageContext, fld *descriptorpb.FieldDescriptorProto, opt *descriptorpb.UninterpretedOption, node ast.Node) (any, error) {
if opt.AggregateValue != nil {
return -1, reporter.Errorf(interp.nodeInfo(node), "%vdefault value cannot be a message", mc)
}
if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_ENUM {
ed := resolveDescriptor[protoreflect.EnumDescriptor](interp.resolver, fld.GetTypeName())
if ed == nil {
return -1, reporter.Errorf(interp.nodeInfo(node), "%vunable to resolve enum type %q for field %q", mc, fld.GetTypeName(), fld.GetName())
}
_, name, err := interp.enumFieldValueFromProto(mc, ed, opt, node)
if err != nil {
return nil, err
}
return string(name), nil
}
return interp.scalarFieldValueFromProto(mc, fld.GetType(), opt, node)
}
func encodeDefaultBytes(b []byte) string {
var buf bytes.Buffer
internal.WriteEscapedBytes(&buf, b)
return buf.String()
}
func (interp *interpreter) interpretEnumOptions(fqn string, ed *descriptorpb.EnumDescriptorProto, customOpts bool) error {
err := interpretElementOptions(interp, fqn, targetTypeEnum, ed, customOpts)
if err != nil {
return err
}
for _, evd := range ed.GetValue() {
evdFqn := fqn + "." + evd.GetName()
err := interpretElementOptions(interp, evdFqn, targetTypeEnumValue, evd, customOpts)
if err != nil {
return err
}
}
return nil
}
func interpretElementOptions[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]](
interp *interpreter,
fqn string,
target *targetType[Elem, OptsStruct, Opts],
elem Elem,
customOpts bool,
) error {
opts := elem.GetOptions()
uninterpreted := opts.GetUninterpretedOption()
if len(uninterpreted) > 0 {
remain, err := interp.interpretOptions(fqn, target.t, elem, opts, uninterpreted, customOpts)
if err != nil {
return err
}
target.setUninterpretedOptions(opts, remain)
} else if customOpts {
// If customOpts is true, we are in second pass of interpreting.
// For second pass, even if there are no options to interpret, we still
// need to verify feature usage.
features := opts.GetFeatures()
var msg protoreflect.Message
if len(features.ProtoReflect().GetUnknown()) > 0 {
// We need to first convert to a message that uses the sources' definition
// of FeatureSet.
optsDesc := opts.ProtoReflect().Descriptor()
optsFqn := string(optsDesc.FullName())
if md := interp.resolveOptionsType(optsFqn); md != nil {
dm := dynamicpb.NewMessage(md)
if err := cloneInto(dm, opts, interp.resolver); err != nil {
node := interp.file.Node(elem)
return interp.handleError(reporter.Error(interp.nodeInfo(node), err))
}
msg = dm
}
}
if msg == nil {
msg = opts.ProtoReflect()
}
err := interp.validateRecursive(false, msg, "", elem, nil, false, false, false)
if err != nil {
return err
}
}
return nil
}
// interpretOptions processes the options in uninterpreted, which are interpreted as fields
// of the given opts message. The first return value is the features to use for child elements.
// On success, the latter two return values will usually be nil, nil. But if the current
// operation is lenient, it may return a non-nil slice of uninterpreted options on success.
// In such a case, the returned slice contains the options which could not be interpreted.
func (interp *interpreter) interpretOptions(
fqn string,
targetType descriptorpb.FieldOptions_OptionTargetType,
element, opts proto.Message,
uninterpreted []*descriptorpb.UninterpretedOption,
customOpts bool,
) ([]*descriptorpb.UninterpretedOption, error) {
optsDesc := opts.ProtoReflect().Descriptor()
optsFqn := string(optsDesc.FullName())
var msg protoreflect.Message
// see if the parse included an override copy for these options
if md := interp.resolveOptionsType(optsFqn); md != nil {
dm := dynamicpb.NewMessage(md)
if err := cloneInto(dm, opts, interp.resolver); err != nil {
node := interp.file.Node(element)
return nil, interp.handleError(reporter.Error(interp.nodeInfo(node), err))
}
msg = dm
} else {
msg = proto.Clone(opts).ProtoReflect()
}
mc := &internal.MessageContext{
File: interp.file,
ElementName: fqn,
ElementType: descriptorType(element),
}
var remain []*descriptorpb.UninterpretedOption
for _, uo := range uninterpreted {
isCustom := uo.Name[0].GetIsExtension()
if isCustom != customOpts {
// We're not looking at these this phase.
remain = append(remain, uo)
continue
}
firstName := uo.Name[0].GetNamePart()
if targetType == descriptorpb.FieldOptions_TARGET_TYPE_FIELD &&
!isCustom && (firstName == "default" || firstName == "json_name") {
// Field pseudo-option that we can skip and is handled elsewhere.
remain = append(remain, uo)
continue
}
node := interp.file.OptionNode(uo)
if !isCustom && firstName == "uninterpreted_option" {
if interp.lenient {
remain = append(remain, uo)
continue
}
// uninterpreted_option might be found reflectively, but is not actually valid for use
if err := interp.handleErrorf(interp.nodeInfo(node.GetName()), "%vinvalid option 'uninterpreted_option'", mc); err != nil {
return nil, err
}
}
mc.Option = uo
interp.enableLenience(true)
srcInfo, err := interp.interpretField(targetType, mc, msg, uo, 0, interp.pathBuffer)
interp.enableLenience(false)
if err != nil {
return nil, err
}
if interp.lenientErrReported {
remain = append(remain, uo)
continue
}
if srcInfo != nil {
if optn, ok := node.(*ast.OptionNode); ok {
interp.index[optn] = srcInfo
}
}
}
// customOpts is true for the second pass, which is also when we want to validate feature usage.
doValidation := customOpts
if doValidation {
validateRequiredFields := !interp.lenient
err := interp.validateRecursive(validateRequiredFields, msg, "", element, nil, false, false, false)
if err != nil {
return nil, err
}
}
if interp.lenient {
// If we're lenient, then we don't want to clobber the passed in message
// and leave it partially populated. So we convert into a copy first
optsClone := opts.ProtoReflect().New().Interface()
if err := cloneInto(optsClone, msg.Interface(), interp.resolver); err != nil {
// TODO: do this in a more granular way, so we can convert individual
// fields and leave bad ones uninterpreted instead of skipping all of
// the work we've done so far.
return uninterpreted, nil
}
if doValidation {
if err := proto.CheckInitialized(optsClone); err != nil {
// Conversion from dynamic message failed to set some required fields.
// TODO above applies here as well...
return uninterpreted, nil
}
}
// conversion from dynamic message above worked, so now
// it is safe to overwrite the passed in message
proto.Reset(opts)
proto.Merge(opts, optsClone)
return remain, nil
}
// now try to convert into the passed in message and fail if not successful
if err := cloneInto(opts, msg.Interface(), interp.resolver); err != nil {
node := interp.file.Node(element)
return nil, interp.handleError(reporter.Error(interp.nodeInfo(node), err))
}
return remain, nil
}
// checkFieldUsage verifies that the given option field can be used
// for the given target type. It reports an error if not and returns
// a non-nil error if the handler returned a non-nil error.
func (interp *interpreter) checkFieldUsage(
targetType descriptorpb.FieldOptions_OptionTargetType,
fld protoreflect.FieldDescriptor,
node ast.Node,
) error {
msgOpts, _ := fld.ContainingMessage().Options().(*descriptorpb.MessageOptions)
if msgOpts.GetMessageSetWireFormat() && !messageset.CanSupportMessageSets() {
err := interp.handleErrorf(interp.nodeInfo(node), "field %q may not be used in an option: it uses 'message set wire format' legacy proto1 feature which is not supported", fld.FullName())
if err != nil {
return err
}
}
opts, ok := fld.Options().(*descriptorpb.FieldOptions)
if !ok {
return nil
}
targetTypes := opts.GetTargets()
if len(targetTypes) == 0 {
return nil
}
for _, allowedType := range targetTypes {
if allowedType == targetType {
return nil
}
}
allowedTypes := make([]string, len(targetTypes))
for i, t := range targetTypes {
allowedTypes[i] = targetTypeString(t)
}
if len(targetTypes) == 1 && targetTypes[0] == descriptorpb.FieldOptions_TARGET_TYPE_UNKNOWN {
return interp.handleErrorf(interp.nodeInfo(node), "field %q may not be used in an option (it declares no allowed target types)", fld.FullName())
}
return interp.handleErrorf(interp.nodeInfo(node), "field %q is allowed on [%s], not on %s", fld.FullName(), strings.Join(allowedTypes, ","), targetTypeString(targetType))
}
func targetTypeString(t descriptorpb.FieldOptions_OptionTargetType) string {
return strings.ToLower(strings.ReplaceAll(strings.TrimPrefix(t.String(), "TARGET_TYPE_"), "_", " "))
}
func editionString(t descriptorpb.Edition) string {
return strings.ToLower(strings.ReplaceAll(strings.TrimPrefix(t.String(), "EDITION_"), "_", "-"))
}
func cloneInto(dest proto.Message, src proto.Message, res linker.Resolver) error {
if dest.ProtoReflect().Descriptor() == src.ProtoReflect().Descriptor() {
proto.Reset(dest)
proto.Merge(dest, src)
return nil
}
// If descriptors are not the same, we could have field descriptors in src that
// don't match the ones in dest. There's no easy/sane way to handle that. So we
// just marshal to bytes and back to do this
marshaler := proto.MarshalOptions{
// We've already validated required fields before this point,
// so we can allow partial here.
AllowPartial: true,
}
data, err := marshaler.Marshal(src)
if err != nil {
return err
}
unmarshaler := proto.UnmarshalOptions{AllowPartial: true}
if res != nil {
unmarshaler.Resolver = res
} else {
// Use a typed nil, which returns "not found" to all queries
// and prevents fallback to protoregistry.GlobalTypes.
unmarshaler.Resolver = (*protoregistry.Types)(nil)
}
return unmarshaler.Unmarshal(data, dest)
}
func (interp *interpreter) validateRecursive(
validateRequiredFields bool,
msg protoreflect.Message,
prefix string,
element proto.Message,
path []int32,
isFeatures bool,
inFeatures bool,
inMap bool,
) error {
if validateRequiredFields {
flds := msg.Descriptor().Fields()
var missingFields []string
for i := range flds.Len() {
fld := flds.Get(i)
if fld.Cardinality() == protoreflect.Required && !msg.Has(fld) {
missingFields = append(missingFields, fmt.Sprintf("%s%s", prefix, fld.Name()))
}
}
if len(missingFields) > 0 {
node := interp.findOptionNode(path, element)
err := interp.handleErrorf(interp.nodeInfo(node), "error in %s options: some required fields missing: %v", descriptorType(element), strings.Join(missingFields, ", "))
if err != nil {
return err
}
}
}
var err error
msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
chpath := path
if !inMap {
chpath = append(chpath, int32(fld.Number()))
}
chInFeatures := isFeatures || inFeatures
chIsFeatures := !chInFeatures && len(path) == 0 && fld.Name() == "features"
if (isFeatures || (inFeatures && fld.IsExtension())) &&
interp.file.FileNode().Name() == fld.ParentFile().Path() {
var what, name string
if fld.IsExtension() {
what = "custom feature"
name = "(" + string(fld.FullName()) + ")"
} else {
what = "feature"
name = string(fld.Name())
}
node := interp.findOptionNode(path, element)
err = interp.handleErrorf(interp.nodeInfo(node), "%s %s cannot be used from the same file in which it is defined", what, name)
if err != nil {
return false
}
}
if chInFeatures {
// Validate feature usage against feature settings.
// First, check the feature support settings of the field.
opts, _ := fld.Options().(*descriptorpb.FieldOptions)
edition := interp.file.FileDescriptorProto().GetEdition()
if opts != nil && opts.FeatureSupport != nil {
err = interp.validateFeatureSupport(edition, opts.FeatureSupport, "field", string(fld.FullName()), chpath, element)
if err != nil {
return false
}
}
// Then, if it's an enum or has an enum, check the feature support settings of the enum values.
var enum protoreflect.EnumDescriptor
if fld.Enum() != nil {
enum = fld.Enum()
} else if fld.IsMap() && fld.MapValue().Enum() != nil {
enum = fld.MapValue().Enum()
}
if enum != nil {
switch {
case fld.IsMap():
val.Map().Range(func(_ protoreflect.MapKey, v protoreflect.Value) bool {
// Can't construct path to particular map entry since we don't this entry's index.
// So we leave chpath alone, and it will have to point to the whole map value (or
// the first entry if the map is de-structured across multiple option statements).
err = interp.validateEnumValueFeatureSupport(edition, enum, v.Enum(), chpath, element)
return err == nil
})
if err != nil {
return false
}
case fld.IsList():
sl := val.List()
for i := range sl.Len() {
v := sl.Get(i)
err = interp.validateEnumValueFeatureSupport(edition, enum, v.Enum(), append(chpath, int32(i)), element)
if err != nil {
return false
}
}
default:
err = interp.validateEnumValueFeatureSupport(edition, enum, val.Enum(), chpath, element)
if err != nil {
return false
}
}
}
}
// If it's a message or contains a message, recursively validate fields in those messages.
switch {
case fld.IsMap() && fld.MapValue().Message() != nil:
val.Map().Range(func(k protoreflect.MapKey, v protoreflect.Value) bool {
chprefix := fmt.Sprintf("%s%s[%v].", prefix, fieldName(fld), k)
err = interp.validateRecursive(validateRequiredFields, v.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, true)
return err == nil
})
if err != nil {
return false
}
case fld.IsList() && fld.Message() != nil:
sl := val.List()
for i := range sl.Len() {
v := sl.Get(i)
chprefix := fmt.Sprintf("%s%s[%d].", prefix, fieldName(fld), i)
if !inMap {
chpath = append(chpath, int32(i))
}
err = interp.validateRecursive(validateRequiredFields, v.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, inMap)
if err != nil {
return false
}
}
case !fld.IsMap() && fld.Message() != nil:
chprefix := fmt.Sprintf("%s%s.", prefix, fieldName(fld))
err = interp.validateRecursive(validateRequiredFields, val.Message(), chprefix, element, chpath, chIsFeatures, chInFeatures, inMap)
if err != nil {
return false
}
}
return true
})
return err
}
func (interp *interpreter) validateEnumValueFeatureSupport(
edition descriptorpb.Edition,
enum protoreflect.EnumDescriptor,
number protoreflect.EnumNumber,
path []int32,
element proto.Message,
) error {
enumVal := enum.Values().ByNumber(number)
if enumVal == nil {
return nil
}
enumValOpts, _ := enumVal.Options().(*descriptorpb.EnumValueOptions)
if enumValOpts == nil || enumValOpts.FeatureSupport == nil {
return nil
}
return interp.validateFeatureSupport(edition, enumValOpts.FeatureSupport, "enum value", string(enumVal.Name()), path, element)
}
func (interp *interpreter) validateFeatureSupport(
edition descriptorpb.Edition,
featureSupport *descriptorpb.FieldOptions_FeatureSupport,
what string,
name string,
path []int32,
element proto.Message,
) error {
if featureSupport.EditionIntroduced != nil && edition < featureSupport.GetEditionIntroduced() {
node := interp.findOptionNode(path, element)
err := interp.handleErrorf(interp.nodeInfo(node), "%s %q was not introduced until edition %s", what, name, editionString(featureSupport.GetEditionIntroduced()))
if err != nil {
return err
}
}
if featureSupport.EditionRemoved != nil && edition >= featureSupport.GetEditionRemoved() {
node := interp.findOptionNode(path, element)
err := interp.handleErrorf(interp.nodeInfo(node), "%s %q was removed in edition %s", what, name, editionString(featureSupport.GetEditionRemoved()))
if err != nil {
return err
}
}
if featureSupport.EditionDeprecated != nil && edition >= featureSupport.GetEditionDeprecated() {
node := interp.findOptionNode(path, element)
var suffix string
if featureSupport.GetDeprecationWarning() != "" {
suffix = ": " + featureSupport.GetDeprecationWarning()
}
interp.reporter.HandleWarningf(interp.nodeInfo(node), "%s %q is deprecated as of edition %s%s", what, name, editionString(featureSupport.GetEditionDeprecated()), suffix)
}
return nil
}
func (interp *interpreter) findOptionNode(
path []int32,
element proto.Message,
) ast.Node {
elementNode := interp.file.Node(element)
nodeWithOpts, _ := elementNode.(ast.NodeWithOptions)
if nodeWithOpts == nil {
return elementNode
}
node, _ := findOptionNode[*ast.OptionNode](
path,
optionsRanger{nodeWithOpts},
func(n *ast.OptionNode) *sourceinfo.OptionSourceInfo {
return interp.index[n]
},
)
if node != nil {
return node
}
return elementNode
}
func findOptionNode[N ast.Node](
path []int32,
nodes interface {
Range(func(N, ast.ValueNode) bool)
},
srcInfoAccessor func(N) *sourceinfo.OptionSourceInfo,
) (ast.Node, int) {
var bestMatch ast.Node
var bestMatchLen int
nodes.Range(func(node N, val ast.ValueNode) bool {
srcInfo := srcInfoAccessor(node)
if srcInfo == nil {
// can happen if we are lenient when interpreting -- this node
// could not be interpreted and thus has no source info; skip
return true
}
if srcInfo.Path[0] < 0 {
// negative first value means it's a field pseudo-option; skip
return true
}
match, matchLen := findOptionValueNode(path, node, val, srcInfo)
if matchLen > bestMatchLen {
bestMatch = match
bestMatchLen = matchLen
if matchLen >= len(path) {
// not going to find a better one
return false
}
}
return true
})
return bestMatch, bestMatchLen
}
type optionsRanger struct {
node ast.NodeWithOptions
}
func (r optionsRanger) Range(f func(*ast.OptionNode, ast.ValueNode) bool) {
r.node.RangeOptions(func(optNode *ast.OptionNode) bool {
return f(optNode, optNode.Val)
})
}
type valueRanger []ast.ValueNode
func (r valueRanger) Range(f func(ast.ValueNode, ast.ValueNode) bool) {
for _, elem := range r {
if !f(elem, elem) {
return
}
}
}
type fieldRanger map[*ast.MessageFieldNode]*sourceinfo.OptionSourceInfo
func (r fieldRanger) Range(f func(*ast.MessageFieldNode, ast.ValueNode) bool) {
for elem := range r {
if !f(elem, elem.Val) {
return
}
}
}
func isPathMatch(a, b []int32) bool {
length := len(a)
if len(b) < length {
length = len(b)
}
for i := range length {
if a[i] != b[i] {
return false
}
}
return true
}
func findOptionValueNode(
path []int32,
node ast.Node,
value ast.ValueNode,
srcInfo *sourceinfo.OptionSourceInfo,
) (ast.Node, int) {
srcInfoPath := srcInfo.Path
if _, ok := srcInfo.Children.(*sourceinfo.ArrayLiteralSourceInfo); ok {
// Last path element for array source info is the index of the
// first element. So exclude in the comparison, since path could
// indicate a later index, which is present in the array.
srcInfoPath = srcInfo.Path[:len(srcInfo.Path)-1]
}
if !isPathMatch(path, srcInfoPath) {
return nil, 0
}
if len(srcInfoPath) >= len(path) {
return node, len(path)
}
switch children := srcInfo.Children.(type) {
case *sourceinfo.ArrayLiteralSourceInfo:
array, ok := value.(*ast.ArrayLiteralNode)
if !ok {
break // should never happen
}
var i int
match, matchLen := findOptionNode[ast.ValueNode](
path,
valueRanger(array.Elements),
func(_ ast.ValueNode) *sourceinfo.OptionSourceInfo {
val := &children.Elements[i]
i++
return val
},
)
if match != nil {
return match, matchLen
}
case *sourceinfo.MessageLiteralSourceInfo:
match, matchLen := findOptionNode[*ast.MessageFieldNode](
path,
fieldRanger(children.Fields),
func(n *ast.MessageFieldNode) *sourceinfo.OptionSourceInfo {
return children.Fields[n]
},
)
if match != nil {
return match, matchLen
}
}
return node, len(srcInfoPath)
}
// interpretField interprets the option described by opt, as a field inside the given msg. This
// interprets components of the option name starting at nameIndex. When nameIndex == 0, then
// msg must be an options message. For nameIndex > 0, msg is a nested message inside of the
// options message. The given pathPrefix is the path (sequence of field numbers and indices
// with a FileDescriptorProto as the start) up to but not including the given nameIndex.
//
// Any errors encountered will be handled, so the returned error will only be non-nil if
// the handler returned non-nil. Callers must check that the source info is non-nil before
// using it since it can be nil (in the event of a problem) even if the error is nil.
func (interp *interpreter) interpretField(
targetType descriptorpb.FieldOptions_OptionTargetType,
mc *internal.MessageContext,
msg protoreflect.Message,
opt *descriptorpb.UninterpretedOption,
nameIndex int,
pathPrefix []int32,
) (*sourceinfo.OptionSourceInfo, error) {
var fld protoreflect.FieldDescriptor
nm := opt.GetName()[nameIndex]
node := interp.file.OptionNamePartNode(nm)
if nm.GetIsExtension() {
extName := nm.GetNamePart()
if extName[0] == '.' {
extName = extName[1:] /* skip leading dot */
}
var err error
fld, err = interp.resolveExtensionType(extName)
if errors.Is(err, protoregistry.NotFound) {
return nil, interp.handleErrorf(interp.nodeInfo(node),
"%vunrecognized extension %s of %s",
mc, extName, msg.Descriptor().FullName())
} else if err != nil {
return nil, interp.handleErrorWithPos(interp.nodeInfo(node), err)
}
if fld.ContainingMessage().FullName() != msg.Descriptor().FullName() {
return nil, interp.handleErrorf(interp.nodeInfo(node),
"%vextension %s should extend %s but instead extends %s",
mc, extName, msg.Descriptor().FullName(), fld.ContainingMessage().FullName())
}
} else {
fld = msg.Descriptor().Fields().ByName(protoreflect.Name(nm.GetNamePart()))
if fld == nil {
return nil, interp.handleErrorf(interp.nodeInfo(node),
"%vfield %s of %s does not exist",
mc, nm.GetNamePart(), msg.Descriptor().FullName())
}
}
pathPrefix = append(pathPrefix, int32(fld.Number()))
if err := interp.checkFieldUsage(targetType, fld, node); err != nil {
return nil, err
}
if len(opt.GetName()) > nameIndex+1 {
nextnm := opt.GetName()[nameIndex+1]
nextnode := interp.file.OptionNamePartNode(nextnm)
k := fld.Kind()
if k != protoreflect.MessageKind && k != protoreflect.GroupKind {
return nil, interp.handleErrorf(interp.nodeInfo(nextnode),
"%vcannot set field %s because %s is not a message",
mc, nextnm.GetNamePart(), nm.GetNamePart())
}
if fld.Cardinality() == protoreflect.Repeated {
return nil, interp.handleErrorf(interp.nodeInfo(nextnode),
"%vcannot set field %s because %s is repeated (must use an aggregate)",
mc, nextnm.GetNamePart(), nm.GetNamePart())
}
var fdm protoreflect.Message
if msg.Has(fld) {
v := msg.Mutable(fld)
fdm = v.Message()
} else {
if ood := fld.ContainingOneof(); ood != nil {
existingFld := msg.WhichOneof(ood)
if existingFld != nil && existingFld.Number() != fld.Number() {
return nil, interp.handleErrorf(interp.nodeInfo(node),
"%voneof %q already has field %q set",
mc, ood.Name(), fieldName(existingFld))
}
}
fldVal := msg.NewField(fld)
fdm = fldVal.Message()
msg.Set(fld, fldVal)
}
// recurse to set next part of name
return interp.interpretField(targetType, mc, fdm, opt, nameIndex+1, pathPrefix)
}
optNode := interp.file.OptionNode(opt)
optValNode := optNode.GetValue()
var srcInfo *sourceinfo.OptionSourceInfo
var err error
if optValNode.Value() == nil {
err = interp.setOptionFieldFromProto(targetType, mc, msg, fld, node, opt, optValNode)
srcInfoVal := newSrcInfo(pathPrefix, nil)
srcInfo = &srcInfoVal
} else {
srcInfo, err = interp.setOptionField(targetType, mc, msg, fld, node, optValNode, false, pathPrefix)
}
if err != nil {
return nil, err
}
return srcInfo, nil
}
// setOptionField sets the value for field fld in the given message msg to the value represented
// by AST node val. The given name is the AST node that corresponds to the name of fld. On success,
// it returns additional metadata about the field that was set.
func (interp *interpreter) setOptionField(
targetType descriptorpb.FieldOptions_OptionTargetType,
mc *internal.MessageContext,
msg protoreflect.Message,
fld protoreflect.FieldDescriptor,
name ast.Node,
val ast.ValueNode,
insideMsgLiteral bool,
pathPrefix []int32,
) (*sourceinfo.OptionSourceInfo, error) {
v := val.Value()
if sl, ok := v.([]ast.ValueNode); ok {
// handle slices a little differently than the others
if fld.Cardinality() != protoreflect.Repeated {
return nil, interp.handleErrorf(interp.nodeInfo(val), "%vvalue is an array but field is not repeated", mc)
}
origPath := mc.OptAggPath
defer func() {
mc.OptAggPath = origPath
}()
childVals := make([]sourceinfo.OptionSourceInfo, len(sl))
var firstIndex int
if fld.IsMap() {
firstIndex = msg.Get(fld).Map().Len()
} else {
firstIndex = msg.Get(fld).List().Len()
}
for index, item := range sl {
mc.OptAggPath = fmt.Sprintf("%s[%d]", origPath, index)
value, srcInfo, err := interp.fieldValue(targetType, mc, msg, fld, item, insideMsgLiteral, append(pathPrefix, int32(firstIndex+index)))
if err != nil || !value.IsValid() {
return nil, err
}
if fld.IsMap() {
mv := msg.Mutable(fld).Map()
setMapEntry(fld, msg, mv, value.Message())
} else {
lv := msg.Mutable(fld).List()
lv.Append(value)
}
childVals[index] = srcInfo
}
srcInfo := newSrcInfo(append(pathPrefix, int32(firstIndex)), &sourceinfo.ArrayLiteralSourceInfo{Elements: childVals})
return &srcInfo, nil
}
if fld.IsMap() {
pathPrefix = append(pathPrefix, int32(msg.Get(fld).Map().Len()))
} else if fld.IsList() {
pathPrefix = append(pathPrefix, int32(msg.Get(fld).List().Len()))
}
value, srcInfo, err := interp.fieldValue(targetType, mc, msg, fld, val, insideMsgLiteral, pathPrefix)
if err != nil || !value.IsValid() {
return nil, err
}
if ood := fld.ContainingOneof(); ood != nil {
existingFld := msg.WhichOneof(ood)
if existingFld != nil && existingFld.Number() != fld.Number() {
return nil, interp.handleErrorf(interp.nodeInfo(name), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld))
}
}
switch {
case fld.IsMap():
mv := msg.Mutable(fld).Map()
setMapEntry(fld, msg, mv, value.Message())
case fld.IsList():
lv := msg.Mutable(fld).List()
lv.Append(value)
default:
if msg.Has(fld) {
return nil, interp.handleErrorf(interp.nodeInfo(name), "%vnon-repeated option field %s already set", mc, fieldName(fld))
}
msg.Set(fld, value)
}
return &srcInfo, nil
}
// setOptionFieldFromProto sets the value for field fld in the given message msg to the value
// represented by the given uninterpreted option. The given ast.Node, if non-nil, will be used
// to report source positions in error messages. On success, it returns additional metadata
// about the field that was set.
func (interp *interpreter) setOptionFieldFromProto(
targetType descriptorpb.FieldOptions_OptionTargetType,
mc *internal.MessageContext,
msg protoreflect.Message,
fld protoreflect.FieldDescriptor,
name ast.Node,
opt *descriptorpb.UninterpretedOption,
node ast.Node,
) error {
k := fld.Kind()
var value protoreflect.Value
switch k {
case protoreflect.EnumKind:
num, _, err := interp.enumFieldValueFromProto(mc, fld.Enum(), opt, node)
if err != nil {
return interp.handleError(err)
}
value = protoreflect.ValueOfEnum(num)
case protoreflect.MessageKind, protoreflect.GroupKind:
if opt.AggregateValue == nil {
return interp.handleErrorf(interp.nodeInfo(node), "%vexpecting message, got %s", mc, optionValueKind(opt))
}
// We must parse the text format from the aggregate value string
var elem protoreflect.Message
switch {
case fld.IsMap():
elem = dynamicpb.NewMessage(fld.Message())
case fld.IsList():
elem = msg.Get(fld).List().NewElement().Message()
default:
elem = msg.NewField(fld).Message()
}
err := prototext.UnmarshalOptions{
Resolver: &msgLiteralResolver{interp: interp, pkg: fld.ParentFile().Package()},
AllowPartial: true,
}.Unmarshal([]byte(opt.GetAggregateValue()), elem.Interface())
if err != nil {
return interp.handleErrorf(interp.nodeInfo(node), "%vfailed to parse message literal %w", mc, err)
}
if err := interp.checkFieldUsagesInMessage(targetType, elem, node); err != nil {
return err
}
value = protoreflect.ValueOfMessage(elem)
default:
v, err := interp.scalarFieldValueFromProto(mc, descriptorpb.FieldDescriptorProto_Type(k), opt, node)
if err != nil {
return interp.handleError(err)
}
value = protoreflect.ValueOf(v)
}
if ood := fld.ContainingOneof(); ood != nil {
existingFld := msg.WhichOneof(ood)
if existingFld != nil && existingFld.Number() != fld.Number() {
return interp.handleErrorf(interp.nodeInfo(name), "%voneof %q already has field %q set", mc, ood.Name(), fieldName(existingFld))
}
}
switch {
case fld.IsMap():
mv := msg.Mutable(fld).Map()
setMapEntry(fld, msg, mv, value.Message())
case fld.IsList():
msg.Mutable(fld).List().Append(value)
default:
if msg.Has(fld) {
return interp.handleErrorf(interp.nodeInfo(name), "%vnon-repeated option field %s already set", mc, fieldName(fld))
}
msg.Set(fld, value)
}
return nil
}
// checkFieldUsagesInMessage verifies that all fields present in the given
// message can be used for the given target type. When an AST is
// present, we validate each field as it is processed. But without
// an AST, we unmarshal a message from an uninterpreted option's
// aggregate value string, and then must make sure that all fields
// set in that message are valid. This reports an error for each
// invalid field it encounters and returns a non-nil error if/when
// the handler returns a non-nil error.
func (interp *interpreter) checkFieldUsagesInMessage(
targetType descriptorpb.FieldOptions_OptionTargetType,
msg protoreflect.Message,
node ast.Node,
) error {
var err error
msg.Range(func(fld protoreflect.FieldDescriptor, val protoreflect.Value) bool {
err = interp.checkFieldUsage(targetType, fld, node)
if err != nil {
return false
}
switch {
case fld.IsList() && fld.Message() != nil:
listVal := val.List()
for i, length := 0, listVal.Len(); i < length; i++ {
err = interp.checkFieldUsagesInMessage(targetType, listVal.Get(i).Message(), node)
if err != nil {
return false
}
}
case fld.IsMap() && fld.MapValue().Message() != nil:
mapVal := val.Map()
mapVal.Range(func(_ protoreflect.MapKey, val protoreflect.Value) bool {
err = interp.checkFieldUsagesInMessage(targetType, val.Message(), node)
return err == nil
})
case !fld.IsMap() && fld.Message() != nil:
err = interp.checkFieldUsagesInMessage(targetType, val.Message(), node)
}
return err == nil
})
return err
}
func (interp *interpreter) enableLenience(enable bool) {
if !interp.lenient {
return // nothing to do
}
if enable {
// reset the flag that tracks if an error has been reported
interp.lenientErrReported = false
}
interp.lenienceEnabled = enable
}
func setMapEntry(
fld protoreflect.FieldDescriptor,
msg protoreflect.Message,
mapVal protoreflect.Map,
entry protoreflect.Message,
) {
keyFld, valFld := fld.MapKey(), fld.MapValue()
key := entry.Get(keyFld)
val := entry.Get(valFld)
if fld.MapValue().Kind() == protoreflect.MessageKind {
// Replace any nil/invalid values with an empty message
dm, valIsDynamic := val.Interface().(*dynamicpb.Message)
if (valIsDynamic && dm == nil) || !val.Message().IsValid() {
val = protoreflect.ValueOfMessage(dynamicpb.NewMessage(valFld.Message()))
}
_, containerIsDynamic := msg.Interface().(*dynamicpb.Message)
if valIsDynamic && !containerIsDynamic {
// This happens because we create dynamic messages to represent map entries,
// but the container of the map may expect a non-dynamic, generated type.
dest := mapVal.NewValue()
_, destIsDynamic := dest.Message().Interface().(*dynamicpb.Message)
if !destIsDynamic {
// reflection Set methods do not support cases where destination is
// generated but source is dynamic (or vice versa). But proto.Merge
// *DOES* support that, as long as dest and source use the same
// descriptor.
proto.Merge(dest.Message().Interface(), val.Message().Interface())
val = dest
}
}
}
// TODO: error if key is already present
mapVal.Set(key.MapKey(), val)
}
type msgLiteralResolver struct {
interp *interpreter
pkg protoreflect.FullName
}
func (r *msgLiteralResolver) FindMessageByName(message protoreflect.FullName) (protoreflect.MessageType, error) {
if r.interp.resolver == nil {
return nil, protoregistry.NotFound
}
return r.interp.resolver.FindMessageByName(message)
}
func (r *msgLiteralResolver) FindMessageByURL(url string) (protoreflect.MessageType, error) {
// In a message literal, we don't allow arbitrary URL prefixes
pos := strings.LastIndexByte(url, '/')
var urlPrefix string
if pos > 0 {
urlPrefix = url[:pos]
}
if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" {
return nil, fmt.Errorf("could not resolve type reference %s", url)
}
return r.FindMessageByName(protoreflect.FullName(url[pos+1:]))
}
func (r *msgLiteralResolver) FindExtensionByName(field protoreflect.FullName) (protoreflect.ExtensionType, error) {
if r.interp.resolver == nil {
return nil, protoregistry.NotFound
}
// In a message literal, extension name may be partially qualified, relative to package.
// So we have to search through package scopes.
pkg := r.pkg
for {
// TODO: This does not *fully* implement the insane logic of protoc with regards
// to resolving relative references.
// https://protobuf.com/docs/language-spec#reference-resolution
name := pkg.Append(protoreflect.Name(field))
ext, err := r.interp.resolver.FindExtensionByName(name)
if err == nil {
return ext, nil
}
if pkg == "" {
// no more namespaces to check
return nil, err
}
pkg = pkg.Parent()
}
}
func (r *msgLiteralResolver) FindExtensionByNumber(message protoreflect.FullName, field protoreflect.FieldNumber) (protoreflect.ExtensionType, error) {
if r.interp.resolver == nil {
return nil, protoregistry.NotFound
}
return r.interp.resolver.FindExtensionByNumber(message, field)
}
func fieldName(fld protoreflect.FieldDescriptor) string {
if fld.IsExtension() {
return fmt.Sprintf("(%s)", fld.FullName())
}
return string(fld.Name())
}
func valueKind(val any) string {
switch val := val.(type) {
case ast.Identifier:
return "identifier"
case bool:
return "bool"
case int64:
if val < 0 {
return "negative integer"
}
return "integer"
case uint64:
return "integer"
case float64:
return "double"
case string, []byte:
return "string"
case []*ast.MessageFieldNode:
return "message"
case []ast.ValueNode:
return "array"
default:
return fmt.Sprintf("%T", val)
}
}
func optionValueKind(opt *descriptorpb.UninterpretedOption) string {
switch {
case opt.IdentifierValue != nil:
return "identifier"
case opt.PositiveIntValue != nil:
return "integer"
case opt.NegativeIntValue != nil:
return "negative integer"
case opt.DoubleValue != nil:
return "double"
case opt.StringValue != nil:
return "string"
case opt.AggregateValue != nil:
return "message"
default:
// should not be possible
return "<nil>"
}
}
// fieldValue computes a compile-time value (constant or list or message literal) for the given
// AST node val. The value in val must be assignable to the field fld.
//
// If the returned value is not valid, then an error occurred during processing.
// The returned err may be nil, however, as any errors will already have been
// handled (so the resulting error could be nil if the handler returned nil).
func (interp *interpreter) fieldValue(
targetType descriptorpb.FieldOptions_OptionTargetType,
mc *internal.MessageContext,
msg protoreflect.Message,
fld protoreflect.FieldDescriptor,
val ast.ValueNode,
insideMsgLiteral bool,
pathPrefix []int32,
) (protoreflect.Value, sourceinfo.OptionSourceInfo, error) {
k := fld.Kind()
switch k {
case protoreflect.EnumKind:
num, _, err := interp.enumFieldValue(mc, fld.Enum(), val, insideMsgLiteral)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, interp.handleError(err)
}
return protoreflect.ValueOfEnum(num), newSrcInfo(pathPrefix, nil), nil
case protoreflect.MessageKind, protoreflect.GroupKind:
v := val.Value()
if aggs, ok := v.([]*ast.MessageFieldNode); ok {
var childMsg protoreflect.Message
switch {
case fld.IsList():
// List of messages
val := msg.NewField(fld)
childMsg = val.List().NewElement().Message()
case fld.IsMap():
// No generated type for map entries, so we use a dynamic type
childMsg = dynamicpb.NewMessage(fld.Message())
default:
// Normal message field
childMsg = msg.NewField(fld).Message()
}
return interp.messageLiteralValue(targetType, mc, aggs, childMsg, pathPrefix)
}
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{},
interp.handleErrorf(interp.nodeInfo(val), "%vexpecting message, got %s", mc, valueKind(v))
default:
v, err := interp.scalarFieldValue(mc, descriptorpb.FieldDescriptorProto_Type(k), val, insideMsgLiteral)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, interp.handleError(err)
}
return protoreflect.ValueOf(v), newSrcInfo(pathPrefix, nil), nil
}
}
// enumFieldValue resolves the given AST node val as an enum value descriptor. If the given
// value is not a valid identifier (or number if allowed), an error is returned instead.
func (interp *interpreter) enumFieldValue(
mc *internal.MessageContext,
ed protoreflect.EnumDescriptor,
val ast.ValueNode,
allowNumber bool,
) (protoreflect.EnumNumber, protoreflect.Name, error) {
v := val.Value()
var num protoreflect.EnumNumber
switch v := v.(type) {
case ast.Identifier:
name := protoreflect.Name(v)
ev := ed.Values().ByName(name)
if ev == nil {
return 0, "", reporter.Errorf(interp.nodeInfo(val), "%venum %s has no value named %s", mc, ed.FullName(), v)
}
return ev.Number(), name, nil
case int64:
if !allowNumber {
return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum name, got %s", mc, valueKind(v))
}
if v > math.MaxInt32 || v < math.MinInt32 {
return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for an enum", mc, v)
}
num = protoreflect.EnumNumber(v)
case uint64:
if !allowNumber {
return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum name, got %s", mc, valueKind(v))
}
if v > math.MaxInt32 {
return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for an enum", mc, v)
}
num = protoreflect.EnumNumber(v)
default:
return 0, "", reporter.Errorf(interp.nodeInfo(val), "%vexpecting enum, got %s", mc, valueKind(v))
}
ev := ed.Values().ByNumber(num)
if ev != nil {
return num, ev.Name(), nil
}
if ed.IsClosed() {
return num, "", reporter.Errorf(interp.nodeInfo(val), "%vclosed enum %s has no value with number %d", mc, ed.FullName(), num)
}
// unknown value, but enum is open, so we allow it and return blank name
return num, "", nil
}
// enumFieldValueFromProto resolves the given uninterpreted option value as an enum value descriptor.
// If the given value is not a valid identifier, an error is returned instead.
func (interp *interpreter) enumFieldValueFromProto(
mc *internal.MessageContext,
ed protoreflect.EnumDescriptor,
opt *descriptorpb.UninterpretedOption,
node ast.Node,
) (protoreflect.EnumNumber, protoreflect.Name, error) {
// We don't have to worry about allowing numbers because numbers are never allowed
// in uninterpreted values; they are only allowed inside aggregate values (i.e.
// message literals).
switch {
case opt.IdentifierValue != nil:
name := protoreflect.Name(opt.GetIdentifierValue())
ev := ed.Values().ByName(name)
if ev == nil {
return 0, "", reporter.Errorf(interp.nodeInfo(node), "%venum %s has no value named %s", mc, ed.FullName(), name)
}
return ev.Number(), name, nil
default:
return 0, "", reporter.Errorf(interp.nodeInfo(node), "%vexpecting enum, got %s", mc, optionValueKind(opt))
}
}
// scalarFieldValue resolves the given AST node val as a value whose type is assignable to a
// field with the given fldType.
func (interp *interpreter) scalarFieldValue(
mc *internal.MessageContext,
fldType descriptorpb.FieldDescriptorProto_Type,
val ast.ValueNode,
insideMsgLiteral bool,
) (any, error) {
v := val.Value()
switch fldType {
case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
if b, ok := v.(bool); ok {
return b, nil
}
if id, ok := v.(ast.Identifier); ok {
if insideMsgLiteral {
// inside a message literal, values use the protobuf text format,
// which is lenient in that it accepts "t" and "f" or "True" and "False"
switch id {
case "t", "true", "True":
return true, nil
case "f", "false", "False":
return false, nil
}
} else {
// options with simple scalar values (no message literal) are stricter
switch id {
case "true":
return true, nil
case "false":
return false, nil
}
}
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting bool, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
if str, ok := v.(string); ok {
return []byte(str), nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting bytes, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_STRING:
if str, ok := v.(string); ok {
return str, nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting string, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_INT32, descriptorpb.FieldDescriptorProto_TYPE_SINT32, descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
if i, ok := v.(int64); ok {
if i > math.MaxInt32 || i < math.MinInt32 {
return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int32", mc, i)
}
return int32(i), nil
}
if ui, ok := v.(uint64); ok {
if ui > math.MaxInt32 {
return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int32", mc, ui)
}
return int32(ui), nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting int32, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_UINT32, descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
if i, ok := v.(int64); ok {
if i > math.MaxUint32 || i < 0 {
return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint32", mc, i)
}
return uint32(i), nil
}
if ui, ok := v.(uint64); ok {
if ui > math.MaxUint32 {
return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint32", mc, ui)
}
return uint32(ui), nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting uint32, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_INT64, descriptorpb.FieldDescriptorProto_TYPE_SINT64, descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
if i, ok := v.(int64); ok {
return i, nil
}
if ui, ok := v.(uint64); ok {
if ui > math.MaxInt64 {
return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for int64", mc, ui)
}
return int64(ui), nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting int64, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_UINT64, descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
if i, ok := v.(int64); ok {
if i < 0 {
return nil, reporter.Errorf(interp.nodeInfo(val), "%vvalue %d is out of range for uint64", mc, i)
}
return uint64(i), nil
}
if ui, ok := v.(uint64); ok {
return ui, nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting uint64, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
if id, ok := v.(ast.Identifier); ok {
switch id {
case "inf":
return math.Inf(1), nil
case "nan":
return math.NaN(), nil
}
}
if d, ok := v.(float64); ok {
return d, nil
}
if i, ok := v.(int64); ok {
return float64(i), nil
}
if u, ok := v.(uint64); ok {
return float64(u), nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting double, got %s", mc, valueKind(v))
case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
if id, ok := v.(ast.Identifier); ok {
switch id {
case "inf":
return float32(math.Inf(1)), nil
case "nan":
return float32(math.NaN()), nil
}
}
if d, ok := v.(float64); ok {
return float32(d), nil
}
if i, ok := v.(int64); ok {
return float32(i), nil
}
if u, ok := v.(uint64); ok {
return float32(u), nil
}
return nil, reporter.Errorf(interp.nodeInfo(val), "%vexpecting float, got %s", mc, valueKind(v))
default:
return nil, reporter.Errorf(interp.nodeInfo(val), "%vunrecognized field type: %s", mc, fldType)
}
}
// scalarFieldValue resolves the given uninterpreted option value as a value whose type is
// assignable to a field with the given fldType.
func (interp *interpreter) scalarFieldValueFromProto(
mc *internal.MessageContext,
fldType descriptorpb.FieldDescriptorProto_Type,
opt *descriptorpb.UninterpretedOption,
node ast.Node,
) (any, error) {
switch fldType {
case descriptorpb.FieldDescriptorProto_TYPE_BOOL:
if opt.IdentifierValue != nil {
switch opt.GetIdentifierValue() {
case "true":
return true, nil
case "false":
return false, nil
}
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting bool, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_BYTES:
if opt.StringValue != nil {
return opt.GetStringValue(), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting bytes, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_STRING:
if opt.StringValue != nil {
return string(opt.GetStringValue()), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting string, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_INT32, descriptorpb.FieldDescriptorProto_TYPE_SINT32, descriptorpb.FieldDescriptorProto_TYPE_SFIXED32:
if opt.NegativeIntValue != nil {
i := opt.GetNegativeIntValue()
if i > math.MaxInt32 || i < math.MinInt32 {
return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int32", mc, i)
}
return int32(i), nil
}
if opt.PositiveIntValue != nil {
ui := opt.GetPositiveIntValue()
if ui > math.MaxInt32 {
return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int32", mc, ui)
}
return int32(ui), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting int32, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_UINT32, descriptorpb.FieldDescriptorProto_TYPE_FIXED32:
if opt.NegativeIntValue != nil {
i := opt.GetNegativeIntValue()
if i > math.MaxUint32 || i < 0 {
return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint32", mc, i)
}
return uint32(i), nil
}
if opt.PositiveIntValue != nil {
ui := opt.GetPositiveIntValue()
if ui > math.MaxUint32 {
return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint32", mc, ui)
}
return uint32(ui), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting uint32, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_INT64, descriptorpb.FieldDescriptorProto_TYPE_SINT64, descriptorpb.FieldDescriptorProto_TYPE_SFIXED64:
if opt.NegativeIntValue != nil {
return opt.GetNegativeIntValue(), nil
}
if opt.PositiveIntValue != nil {
ui := opt.GetPositiveIntValue()
if ui > math.MaxInt64 {
return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for int64", mc, ui)
}
return int64(ui), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting int64, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_UINT64, descriptorpb.FieldDescriptorProto_TYPE_FIXED64:
if opt.NegativeIntValue != nil {
i := opt.GetNegativeIntValue()
if i < 0 {
return nil, reporter.Errorf(interp.nodeInfo(node), "%vvalue %d is out of range for uint64", mc, i)
}
// should not be possible since i should always be negative...
return uint64(i), nil
}
if opt.PositiveIntValue != nil {
return opt.GetPositiveIntValue(), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting uint64, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_DOUBLE:
if opt.IdentifierValue != nil {
switch opt.GetIdentifierValue() {
case "inf":
return math.Inf(1), nil
case "nan":
return math.NaN(), nil
}
}
if opt.DoubleValue != nil {
return opt.GetDoubleValue(), nil
}
if opt.NegativeIntValue != nil {
return float64(opt.GetNegativeIntValue()), nil
}
if opt.PositiveIntValue != nil {
return float64(opt.GetPositiveIntValue()), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting double, got %s", mc, optionValueKind(opt))
case descriptorpb.FieldDescriptorProto_TYPE_FLOAT:
if opt.IdentifierValue != nil {
switch opt.GetIdentifierValue() {
case "inf":
return float32(math.Inf(1)), nil
case "nan":
return float32(math.NaN()), nil
}
}
if opt.DoubleValue != nil {
return float32(opt.GetDoubleValue()), nil
}
if opt.NegativeIntValue != nil {
return float32(opt.GetNegativeIntValue()), nil
}
if opt.PositiveIntValue != nil {
return float32(opt.GetPositiveIntValue()), nil
}
return nil, reporter.Errorf(interp.nodeInfo(node), "%vexpecting float, got %s", mc, optionValueKind(opt))
default:
return nil, reporter.Errorf(interp.nodeInfo(node), "%vunrecognized field type: %s", mc, fldType)
}
}
func descriptorType(m proto.Message) string {
switch m := m.(type) {
case *descriptorpb.DescriptorProto:
return "message"
case *descriptorpb.DescriptorProto_ExtensionRange:
return "extension range"
case *descriptorpb.FieldDescriptorProto:
if m.GetExtendee() == "" {
return "field"
}
return "extension"
case *descriptorpb.EnumDescriptorProto:
return "enum"
case *descriptorpb.EnumValueDescriptorProto:
return "enum value"
case *descriptorpb.ServiceDescriptorProto:
return "service"
case *descriptorpb.MethodDescriptorProto:
return "method"
case *descriptorpb.FileDescriptorProto:
return "file"
default:
// shouldn't be possible
return fmt.Sprintf("%T", m)
}
}
// messageLiteralValue processes a message literal value.
//
// If the returned value is not valid, then an error occurred during processing.
// The returned err may be nil, however, as any errors will already have been
// handled (so the resulting error could be nil if the handler returned nil).
func (interp *interpreter) messageLiteralValue(
targetType descriptorpb.FieldOptions_OptionTargetType,
mc *internal.MessageContext,
fieldNodes []*ast.MessageFieldNode,
msg protoreflect.Message,
pathPrefix []int32,
) (protoreflect.Value, sourceinfo.OptionSourceInfo, error) {
fmd := msg.Descriptor()
origPath := mc.OptAggPath
defer func() {
mc.OptAggPath = origPath
}()
flds := make(map[*ast.MessageFieldNode]*sourceinfo.OptionSourceInfo, len(fieldNodes))
var hadError bool
for _, fieldNode := range fieldNodes {
if origPath == "" {
mc.OptAggPath = fieldNode.Name.Value()
} else {
mc.OptAggPath = origPath + "." + fieldNode.Name.Value()
}
if fieldNode.Name.IsAnyTypeReference() {
if len(fieldNodes) > 1 {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vany type references cannot be repeated or mixed with other fields", mc)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
}
if fmd.FullName() != "google.protobuf.Any" {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vtype references are only allowed for google.protobuf.Any, but this type is %s", mc, fmd.FullName())
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
typeURLDescriptor := fmd.Fields().ByNumber(internal.AnyTypeURLTag)
var err error
switch {
case typeURLDescriptor == nil:
err = fmt.Errorf("message schema is missing type_url field (number %d)", internal.AnyTypeURLTag)
case typeURLDescriptor.IsList():
err = fmt.Errorf("message schema has type_url field (number %d) that is a list but should be singular", internal.AnyTypeURLTag)
case typeURLDescriptor.Kind() != protoreflect.StringKind:
err = fmt.Errorf("message schema has type_url field (number %d) that is %s but should be string", internal.AnyTypeURLTag, typeURLDescriptor.Kind())
}
if err != nil {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%v%w", mc, err)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
valueDescriptor := fmd.Fields().ByNumber(internal.AnyValueTag)
switch {
case valueDescriptor == nil:
err = fmt.Errorf("message schema is missing value field (number %d)", internal.AnyValueTag)
case valueDescriptor.IsList():
err = fmt.Errorf("message schema has value field (number %d) that is a list but should be singular", internal.AnyValueTag)
case valueDescriptor.Kind() != protoreflect.BytesKind:
err = fmt.Errorf("message schema has value field (number %d) that is %s but should be bytes", internal.AnyValueTag, valueDescriptor.Kind())
}
if err != nil {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%v%w", mc, err)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
urlPrefix := fieldNode.Name.URLPrefix.AsIdentifier()
msgName := fieldNode.Name.Name.AsIdentifier()
fullURL := fmt.Sprintf("%s/%s", urlPrefix, msgName)
// TODO: Support other URLs dynamically -- the caller of protocompile
// should be able to provide a custom resolver that can resolve type
// URLs into message descriptors. The default resolver would be
// implemented as below, only accepting "type.googleapis.com" and
// "type.googleprod.com" as hosts/prefixes and using the compiled
// file's transitive closure to find the named message, since that
// is what protoc does.
if urlPrefix != "type.googleapis.com" && urlPrefix != "type.googleprod.com" {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vcould not resolve type reference %s", mc, fullURL)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
anyFields, ok := fieldNode.Val.Value().([]*ast.MessageFieldNode)
if !ok {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "%vtype references for google.protobuf.Any must have message literal value", mc)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
anyMd := resolveDescriptor[protoreflect.MessageDescriptor](interp.resolver, string(msgName))
if anyMd == nil {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name.URLPrefix), "%vcould not resolve type reference %s", mc, fullURL)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
// parse the message value
msgVal, valueSrcInfo, err := interp.messageLiteralValue(targetType, mc, anyFields, dynamicpb.NewMessage(anyMd), append(pathPrefix, internal.AnyValueTag))
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
} else if !msgVal.IsValid() {
hadError = true
continue
}
b, err := (proto.MarshalOptions{Deterministic: true}).Marshal(msgVal.Message().Interface())
if err != nil {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "%vfailed to serialize message value: %w", mc, err)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
// Success!
if !hadError {
msg.Set(typeURLDescriptor, protoreflect.ValueOfString(fullURL))
msg.Set(valueDescriptor, protoreflect.ValueOfBytes(b))
flds[fieldNode] = &valueSrcInfo
}
continue
}
// Not expanded Any syntax; handle normal field.
var ffld protoreflect.FieldDescriptor
var err error
if fieldNode.Name.IsExtension() {
n := interp.file.ResolveMessageLiteralExtensionName(fieldNode.Name.Name)
if n == "" {
// this should not be possible!
n = string(fieldNode.Name.Name.AsIdentifier())
}
ffld, err = interp.resolveExtensionType(n)
if errors.Is(err, protoregistry.NotFound) {
// may need to qualify with package name
// (this should not be necessary!)
pkg := mc.File.FileDescriptorProto().GetPackage()
if pkg != "" {
ffld, err = interp.resolveExtensionType(pkg + "." + n)
}
}
} else {
ffld = fmd.Fields().ByName(protoreflect.Name(fieldNode.Name.Value()))
if ffld == nil {
err = protoregistry.NotFound
// It could be a proto2 group, where the text format refers to the group type
// name, and the field name is the lower-cased form of that.
ffld = fmd.Fields().ByName(protoreflect.Name(strings.ToLower(fieldNode.Name.Value())))
if ffld != nil {
// In editions, we support using the group type name only for fields that
// "look like" proto2 groups.
if protoreflect.Name(fieldNode.Name.Value()) == ffld.Message().Name() && // text format uses type name
ffld.Message().FullName().Parent() == ffld.FullName().Parent() && // message and field declared in same scope
ffld.Kind() == protoreflect.GroupKind /* uses delimited encoding */ {
// This one looks like a proto2 group, so it's a keeper.
err = nil
} else {
// It doesn't look like a proto2 group, so this is not a match.
ffld = nil
}
}
}
}
if errors.Is(err, protoregistry.NotFound) {
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Name), "%vfield %s not found", mc, string(fieldNode.Name.Name.AsIdentifier()))
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
} else if err != nil {
err := interp.handleErrorWithPos(interp.nodeInfo(fieldNode.Name), err)
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
if err := interp.checkFieldUsage(targetType, ffld, fieldNode.Name); err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
if fieldNode.Sep == nil && ffld.Message() == nil {
// If there is no separator, the field type should be a message.
// Otherwise, it is an error in the text format.
err := interp.handleErrorf(interp.nodeInfo(fieldNode.Val), "syntax error: unexpected value, expecting ':'")
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
hadError = true
continue
}
srcInfo, err := interp.setOptionField(targetType, mc, msg, ffld, fieldNode.Name, fieldNode.Val, true, append(pathPrefix, int32(ffld.Number())))
if err != nil {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, err
}
if srcInfo != nil {
flds[fieldNode] = srcInfo
}
}
if hadError {
return protoreflect.Value{}, sourceinfo.OptionSourceInfo{}, nil
}
return protoreflect.ValueOfMessage(msg),
newSrcInfo(pathPrefix, &sourceinfo.MessageLiteralSourceInfo{Fields: flds}),
nil
}
func newSrcInfo(path []int32, children sourceinfo.OptionChildrenSourceInfo) sourceinfo.OptionSourceInfo {
return sourceinfo.OptionSourceInfo{
Path: internal.ClonePath(path),
Children: children,
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package options
import (
"fmt"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/internal"
)
// StripSourceRetentionOptionsFromFile returns a file descriptor proto that omits any
// options in file that are defined to be retained only in source. If file has no
// such options, then it is returned as is. If it does have such options, a copy is
// made; the given file will not be mutated.
//
// Even when a copy is returned, it is not a deep copy: it may share data with the
// original file. So callers should not mutate the returned file unless mutating the
// input file is also safe.
func StripSourceRetentionOptionsFromFile(file *descriptorpb.FileDescriptorProto) (*descriptorpb.FileDescriptorProto, error) {
var path sourcePath
var removedPaths *sourcePathTrie
if file.SourceCodeInfo != nil && len(file.SourceCodeInfo.Location) > 0 {
path = make(sourcePath, 0, 16)
removedPaths = &sourcePathTrie{}
}
var dirty bool
optionsPath := path.push(internal.FileOptionsTag)
newOpts, err := stripSourceRetentionOptions(file.GetOptions(), optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts != file.GetOptions() {
dirty = true
}
msgsPath := path.push(internal.FileMessagesTag)
newMsgs, changed, err := stripOptionsFromAll(file.GetMessageType(), stripSourceRetentionOptionsFromMessage, msgsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
enumsPath := path.push(internal.FileEnumsTag)
newEnums, changed, err := stripOptionsFromAll(file.GetEnumType(), stripSourceRetentionOptionsFromEnum, enumsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
extsPath := path.push(internal.FileExtensionsTag)
newExts, changed, err := stripOptionsFromAll(file.GetExtension(), stripSourceRetentionOptionsFromField, extsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
svcsPath := path.push(internal.FileServicesTag)
newSvcs, changed, err := stripOptionsFromAll(file.GetService(), stripSourceRetentionOptionsFromService, svcsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
if !dirty {
return file, nil
}
newFile, err := shallowCopy(file)
if err != nil {
return nil, err
}
newFile.Options = newOpts
newFile.MessageType = newMsgs
newFile.EnumType = newEnums
newFile.Extension = newExts
newFile.Service = newSvcs
newFile.SourceCodeInfo = stripSourcePathsForSourceRetentionOptions(newFile.SourceCodeInfo, removedPaths)
return newFile, nil
}
type sourcePath protoreflect.SourcePath
func (p sourcePath) push(element int32) sourcePath {
if p == nil {
return nil
}
return append(p, element)
}
type sourcePathTrie struct {
removed bool
children map[int32]*sourcePathTrie
}
func (t *sourcePathTrie) addPath(p sourcePath) {
if t == nil {
return
}
if len(p) == 0 {
t.removed = true
return
}
child := t.children[p[0]]
if child == nil {
if t.children == nil {
t.children = map[int32]*sourcePathTrie{}
}
child = &sourcePathTrie{}
t.children[p[0]] = child
}
child.addPath(p[1:])
}
func (t *sourcePathTrie) isRemoved(p []int32) bool {
if t == nil {
return false
}
if t.removed {
return true
}
if len(p) == 0 {
return false
}
child := t.children[p[0]]
if child == nil {
return false
}
return child.isRemoved(p[1:])
}
func stripSourceRetentionOptions[M proto.Message](
options M,
path sourcePath,
removedPaths *sourcePathTrie,
) (M, error) {
optionsRef := options.ProtoReflect()
// See if there are any options to strip.
var hasFieldToStrip bool
var numFieldsToKeep int
var err error
optionsRef.Range(func(field protoreflect.FieldDescriptor, _ protoreflect.Value) bool {
fieldOpts, ok := field.Options().(*descriptorpb.FieldOptions)
if !ok {
err = fmt.Errorf("field options is unexpected type: got %T, want %T", field.Options(), fieldOpts)
return false
}
if fieldOpts.GetRetention() == descriptorpb.FieldOptions_RETENTION_SOURCE {
hasFieldToStrip = true
} else {
numFieldsToKeep++
}
return true
})
var zero M
if err != nil {
return zero, err
}
if !hasFieldToStrip {
return options, nil
}
if numFieldsToKeep == 0 {
// Stripping the message would remove *all* options. In that case,
// we'll clear out the options by returning the zero value (i.e. nil).
removedPaths.addPath(path) // clear out all source locations, too
return zero, nil
}
// There is at least one option to remove. So we need to make a copy that does not have those options.
newOptions := optionsRef.New()
ret, ok := newOptions.Interface().(M)
if !ok {
return zero, fmt.Errorf("creating new message of same type resulted in unexpected type; got %T, want %T", newOptions.Interface(), zero)
}
optionsRef.Range(func(field protoreflect.FieldDescriptor, val protoreflect.Value) bool {
fieldOpts, ok := field.Options().(*descriptorpb.FieldOptions)
if !ok {
err = fmt.Errorf("field options is unexpected type: got %T, want %T", field.Options(), fieldOpts)
return false
}
if fieldOpts.GetRetention() != descriptorpb.FieldOptions_RETENTION_SOURCE {
newOptions.Set(field, val)
} else {
removedPaths.addPath(path.push(int32(field.Number())))
}
return true
})
if err != nil {
return zero, err
}
return ret, nil
}
func stripSourceRetentionOptionsFromMessage(
msg *descriptorpb.DescriptorProto,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.DescriptorProto, error) {
var dirty bool
optionsPath := path.push(internal.MessageOptionsTag)
newOpts, err := stripSourceRetentionOptions(msg.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts != msg.Options {
dirty = true
}
fieldsPath := path.push(internal.MessageFieldsTag)
newFields, changed, err := stripOptionsFromAll(msg.Field, stripSourceRetentionOptionsFromField, fieldsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
oneofsPath := path.push(internal.MessageOneofsTag)
newOneofs, changed, err := stripOptionsFromAll(msg.OneofDecl, stripSourceRetentionOptionsFromOneof, oneofsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
extRangesPath := path.push(internal.MessageExtensionRangesTag)
newExtRanges, changed, err := stripOptionsFromAll(msg.ExtensionRange, stripSourceRetentionOptionsFromExtensionRange, extRangesPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
msgsPath := path.push(internal.MessageNestedMessagesTag)
newMsgs, changed, err := stripOptionsFromAll(msg.NestedType, stripSourceRetentionOptionsFromMessage, msgsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
enumsPath := path.push(internal.MessageEnumsTag)
newEnums, changed, err := stripOptionsFromAll(msg.EnumType, stripSourceRetentionOptionsFromEnum, enumsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
extsPath := path.push(internal.MessageExtensionsTag)
newExts, changed, err := stripOptionsFromAll(msg.Extension, stripSourceRetentionOptionsFromField, extsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
if !dirty {
return msg, nil
}
newMsg, err := shallowCopy(msg)
if err != nil {
return nil, err
}
newMsg.Options = newOpts
newMsg.Field = newFields
newMsg.OneofDecl = newOneofs
newMsg.ExtensionRange = newExtRanges
newMsg.NestedType = newMsgs
newMsg.EnumType = newEnums
newMsg.Extension = newExts
return newMsg, nil
}
func stripSourceRetentionOptionsFromField(
field *descriptorpb.FieldDescriptorProto,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.FieldDescriptorProto, error) {
optionsPath := path.push(internal.FieldOptionsTag)
newOpts, err := stripSourceRetentionOptions(field.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts == field.Options {
return field, nil
}
newField, err := shallowCopy(field)
if err != nil {
return nil, err
}
newField.Options = newOpts
return newField, nil
}
func stripSourceRetentionOptionsFromOneof(
oneof *descriptorpb.OneofDescriptorProto,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.OneofDescriptorProto, error) {
optionsPath := path.push(internal.OneofOptionsTag)
newOpts, err := stripSourceRetentionOptions(oneof.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts == oneof.Options {
return oneof, nil
}
newOneof, err := shallowCopy(oneof)
if err != nil {
return nil, err
}
newOneof.Options = newOpts
return newOneof, nil
}
func stripSourceRetentionOptionsFromExtensionRange(
extRange *descriptorpb.DescriptorProto_ExtensionRange,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.DescriptorProto_ExtensionRange, error) {
optionsPath := path.push(internal.ExtensionRangeOptionsTag)
newOpts, err := stripSourceRetentionOptions(extRange.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts == extRange.Options {
return extRange, nil
}
newExtRange, err := shallowCopy(extRange)
if err != nil {
return nil, err
}
newExtRange.Options = newOpts
return newExtRange, nil
}
func stripSourceRetentionOptionsFromEnum(
enum *descriptorpb.EnumDescriptorProto,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.EnumDescriptorProto, error) {
var dirty bool
optionsPath := path.push(internal.EnumOptionsTag)
newOpts, err := stripSourceRetentionOptions(enum.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts != enum.Options {
dirty = true
}
valsPath := path.push(internal.EnumValuesTag)
newVals, changed, err := stripOptionsFromAll(enum.Value, stripSourceRetentionOptionsFromEnumValue, valsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
if !dirty {
return enum, nil
}
newEnum, err := shallowCopy(enum)
if err != nil {
return nil, err
}
newEnum.Options = newOpts
newEnum.Value = newVals
return newEnum, nil
}
func stripSourceRetentionOptionsFromEnumValue(
enumVal *descriptorpb.EnumValueDescriptorProto,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.EnumValueDescriptorProto, error) {
optionsPath := path.push(internal.EnumValOptionsTag)
newOpts, err := stripSourceRetentionOptions(enumVal.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts == enumVal.Options {
return enumVal, nil
}
newEnumVal, err := shallowCopy(enumVal)
if err != nil {
return nil, err
}
newEnumVal.Options = newOpts
return newEnumVal, nil
}
func stripSourceRetentionOptionsFromService(
svc *descriptorpb.ServiceDescriptorProto,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.ServiceDescriptorProto, error) {
var dirty bool
optionsPath := path.push(internal.ServiceOptionsTag)
newOpts, err := stripSourceRetentionOptions(svc.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts != svc.Options {
dirty = true
}
methodsPath := path.push(internal.ServiceMethodsTag)
newMethods, changed, err := stripOptionsFromAll(svc.Method, stripSourceRetentionOptionsFromMethod, methodsPath, removedPaths)
if err != nil {
return nil, err
}
if changed {
dirty = true
}
if !dirty {
return svc, nil
}
newSvc, err := shallowCopy(svc)
if err != nil {
return nil, err
}
newSvc.Options = newOpts
newSvc.Method = newMethods
return newSvc, nil
}
func stripSourceRetentionOptionsFromMethod(
method *descriptorpb.MethodDescriptorProto,
path sourcePath,
removedPaths *sourcePathTrie,
) (*descriptorpb.MethodDescriptorProto, error) {
optionsPath := path.push(internal.MethodOptionsTag)
newOpts, err := stripSourceRetentionOptions(method.Options, optionsPath, removedPaths)
if err != nil {
return nil, err
}
if newOpts == method.Options {
return method, nil
}
newMethod, err := shallowCopy(method)
if err != nil {
return nil, err
}
newMethod.Options = newOpts
return newMethod, nil
}
func stripSourcePathsForSourceRetentionOptions(
sourceInfo *descriptorpb.SourceCodeInfo,
removedPaths *sourcePathTrie,
) *descriptorpb.SourceCodeInfo {
if sourceInfo == nil || len(sourceInfo.Location) == 0 || removedPaths == nil {
// nothing to do
return sourceInfo
}
newLocations := make([]*descriptorpb.SourceCodeInfo_Location, len(sourceInfo.Location))
var i int
for _, loc := range sourceInfo.Location {
if removedPaths.isRemoved(loc.Path) {
continue
}
newLocations[i] = loc
i++
}
newLocations = newLocations[:i]
return &descriptorpb.SourceCodeInfo{Location: newLocations}
}
func shallowCopy[M proto.Message](msg M) (M, error) {
msgRef := msg.ProtoReflect()
other := msgRef.New()
ret, ok := other.Interface().(M)
if !ok {
return ret, fmt.Errorf("creating new message of same type resulted in unexpected type; got %T, want %T", other.Interface(), ret)
}
msgRef.Range(func(field protoreflect.FieldDescriptor, val protoreflect.Value) bool {
other.Set(field, val)
return true
})
return ret, nil
}
// stripOptionsFromAll applies the given function to each element in the given
// slice in order to remove source-retention options from it. It returns the new
// slice and a bool indicating whether anything was actually changed. If the
// second value is false, then the returned slice is the same slice as the input
// slice. Usually, T is a pointer type, in which case the given updateFunc should
// NOT mutate the input value. Instead, it should return the input value if only
// if there is no update needed. If a mutation is needed, it should return a new
// value.
func stripOptionsFromAll[T comparable](
slice []T,
updateFunc func(T, sourcePath, *sourcePathTrie) (T, error),
path sourcePath,
removedPaths *sourcePathTrie,
) ([]T, bool, error) {
var updated []T // initialized lazily, only when/if a copy is needed
for i, item := range slice {
newItem, err := updateFunc(item, path.push(int32(i)), removedPaths)
if err != nil {
return nil, false, err
}
if updated != nil {
updated[i] = newItem
} else if newItem != item {
updated = make([]T, len(slice))
copy(updated[:i], slice)
updated[i] = newItem
}
}
if updated != nil {
return updated, true, nil
}
return slice, false, nil
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package options
import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/descriptorpb"
)
type optionsType[T any] interface {
*T
proto.Message
GetFeatures() *descriptorpb.FeatureSet
GetUninterpretedOption() []*descriptorpb.UninterpretedOption
}
type elementType[OptsStruct any, Opts optionsType[OptsStruct]] interface {
proto.Message
GetOptions() Opts
}
type targetType[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]] struct {
t descriptorpb.FieldOptions_OptionTargetType
setUninterpretedOptions func(opts Opts, uninterpreted []*descriptorpb.UninterpretedOption)
setOptions func(elem Elem, opts Opts)
}
var (
targetTypeFile = newTargetType[*descriptorpb.FileDescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_FILE, setUninterpretedFileOptions, setFileOptions,
)
targetTypeMessage = newTargetType[*descriptorpb.DescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_MESSAGE, setUninterpretedMessageOptions, setMessageOptions,
)
targetTypeField = newTargetType[*descriptorpb.FieldDescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_FIELD, setUninterpretedFieldOptions, setFieldOptions,
)
targetTypeOneof = newTargetType[*descriptorpb.OneofDescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_ONEOF, setUninterpretedOneofOptions, setOneofOptions,
)
targetTypeExtensionRange = newTargetType[*descriptorpb.DescriptorProto_ExtensionRange](
descriptorpb.FieldOptions_TARGET_TYPE_EXTENSION_RANGE, setUninterpretedExtensionRangeOptions, setExtensionRangeOptions,
)
targetTypeEnum = newTargetType[*descriptorpb.EnumDescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_ENUM, setUninterpretedEnumOptions, setEnumOptions,
)
targetTypeEnumValue = newTargetType[*descriptorpb.EnumValueDescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_ENUM_ENTRY, setUninterpretedEnumValueOptions, setEnumValueOptions,
)
targetTypeService = newTargetType[*descriptorpb.ServiceDescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_SERVICE, setUninterpretedServiceOptions, setServiceOptions,
)
targetTypeMethod = newTargetType[*descriptorpb.MethodDescriptorProto](
descriptorpb.FieldOptions_TARGET_TYPE_METHOD, setUninterpretedMethodOptions, setMethodOptions,
)
)
func newTargetType[Elem elementType[OptsStruct, Opts], OptsStruct any, Opts optionsType[OptsStruct]](
t descriptorpb.FieldOptions_OptionTargetType,
setUninterpretedOptions func(opts Opts, uninterpreted []*descriptorpb.UninterpretedOption),
setOptions func(elem Elem, opts Opts),
) *targetType[Elem, OptsStruct, Opts] {
return &targetType[Elem, OptsStruct, Opts]{
t: t,
setUninterpretedOptions: setUninterpretedOptions,
setOptions: setOptions,
}
}
func setUninterpretedFileOptions(opts *descriptorpb.FileOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedMessageOptions(opts *descriptorpb.MessageOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedFieldOptions(opts *descriptorpb.FieldOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedOneofOptions(opts *descriptorpb.OneofOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedExtensionRangeOptions(opts *descriptorpb.ExtensionRangeOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedEnumOptions(opts *descriptorpb.EnumOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedEnumValueOptions(opts *descriptorpb.EnumValueOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedServiceOptions(opts *descriptorpb.ServiceOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setUninterpretedMethodOptions(opts *descriptorpb.MethodOptions, uninterpreted []*descriptorpb.UninterpretedOption) {
opts.UninterpretedOption = uninterpreted
}
func setFileOptions(desc *descriptorpb.FileDescriptorProto, opts *descriptorpb.FileOptions) {
desc.Options = opts
}
func setMessageOptions(desc *descriptorpb.DescriptorProto, opts *descriptorpb.MessageOptions) {
desc.Options = opts
}
func setFieldOptions(desc *descriptorpb.FieldDescriptorProto, opts *descriptorpb.FieldOptions) {
desc.Options = opts
}
func setOneofOptions(desc *descriptorpb.OneofDescriptorProto, opts *descriptorpb.OneofOptions) {
desc.Options = opts
}
func setExtensionRangeOptions(desc *descriptorpb.DescriptorProto_ExtensionRange, opts *descriptorpb.ExtensionRangeOptions) {
desc.Options = opts
}
func setEnumOptions(desc *descriptorpb.EnumDescriptorProto, opts *descriptorpb.EnumOptions) {
desc.Options = opts
}
func setEnumValueOptions(desc *descriptorpb.EnumValueDescriptorProto, opts *descriptorpb.EnumValueOptions) {
desc.Options = opts
}
func setServiceOptions(desc *descriptorpb.ServiceDescriptorProto, opts *descriptorpb.ServiceOptions) {
desc.Options = opts
}
func setMethodOptions(desc *descriptorpb.MethodDescriptorProto, opts *descriptorpb.MethodOptions) {
desc.Options = opts
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"github.com/bufbuild/protocompile/ast"
)
// the types below are accumulator types, just used in intermediate productions
// to accumulate slices that will get stored in AST nodes
type compactOptionSlices struct {
options []*ast.OptionNode
commas []*ast.RuneNode
}
func toStringValueNode(strs []*ast.StringLiteralNode) ast.StringValueNode {
if len(strs) == 1 {
return strs[0]
}
return ast.NewCompoundLiteralStringNode(strs...)
}
type nameSlices struct {
// only names or idents will be set, never both
names []ast.StringValueNode
idents []*ast.IdentNode
commas []*ast.RuneNode
}
type rangeSlices struct {
ranges []*ast.RangeNode
commas []*ast.RuneNode
}
type valueSlices struct {
vals []ast.ValueNode
commas []*ast.RuneNode
}
type fieldRefSlices struct {
refs []*ast.FieldReferenceNode
dots []*ast.RuneNode
}
type identSlices struct {
idents []*ast.IdentNode
dots []*ast.RuneNode
}
func (s *identSlices) toIdentValueNode(leadingDot *ast.RuneNode) ast.IdentValueNode {
if len(s.idents) == 1 && leadingDot == nil {
// single simple name
return s.idents[0]
}
return ast.NewCompoundIdentNode(leadingDot, s.idents, s.dots)
}
type messageFieldList struct {
field *ast.MessageFieldNode
delimiter *ast.RuneNode
next *messageFieldList
}
func (list *messageFieldList) toNodes() ([]*ast.MessageFieldNode, []*ast.RuneNode) {
if list == nil {
return nil, nil
}
l := 0
for cur := list; cur != nil; cur = cur.next {
l++
}
fields := make([]*ast.MessageFieldNode, l)
delimiters := make([]*ast.RuneNode, l)
for cur, i := list, 0; cur != nil; cur, i = cur.next, i+1 {
fields[i] = cur.field
if cur.delimiter != nil {
delimiters[i] = cur.delimiter
}
}
return fields, delimiters
}
func prependRunes[T ast.Node](convert func(*ast.RuneNode) T, runes []*ast.RuneNode, elements []T) []T {
elems := make([]T, 0, len(runes)+len(elements))
for _, rune := range runes {
elems = append(elems, convert(rune))
}
elems = append(elems, elements...)
return elems
}
func toServiceElement(semi *ast.RuneNode) ast.ServiceElement {
return ast.NewEmptyDeclNode(semi)
}
func toMethodElement(semi *ast.RuneNode) ast.RPCElement {
return ast.NewEmptyDeclNode(semi)
}
func toFileElement(semi *ast.RuneNode) ast.FileElement {
return ast.NewEmptyDeclNode(semi)
}
func toEnumElement(semi *ast.RuneNode) ast.EnumElement {
return ast.NewEmptyDeclNode(semi)
}
func toMessageElement(semi *ast.RuneNode) ast.MessageElement {
return ast.NewEmptyDeclNode(semi)
}
type nodeWithRunes[T ast.Node] struct {
Node T
Runes []*ast.RuneNode
}
func newNodeWithRunes[T ast.Node](node T, trailingRunes ...*ast.RuneNode) nodeWithRunes[T] {
return nodeWithRunes[T]{
Node: node,
Runes: trailingRunes,
}
}
func toElements[T ast.Node](convert func(*ast.RuneNode) T, node T, runes []*ast.RuneNode) []T {
elements := make([]T, 1+len(runes))
elements[0] = node
for i, rune := range runes {
elements[i+1] = convert(rune)
}
return elements
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/reporter"
)
// Clone returns a copy of the given result. Since descriptor protos may be
// mutated during linking, this can return a defensive copy so that mutations
// don't impact concurrent operations in an unsafe way. This is called if the
// parse result could be re-used across concurrent operations and has unresolved
// references and options which will require mutation by the linker.
//
// If the given value has a method with the following signature, it will be
// called to perform the operation:
//
// Clone() Result
//
// If the given value does not provide a Clone method and is not the implementation
// provided by this package, it is possible for an error to occur in creating the
// copy, which may result in a panic. This can happen if the AST of the given result
// is not actually valid and a file descriptor proto cannot be successfully derived
// from it.
func Clone(r Result) Result {
if cl, ok := r.(interface{ Clone() Result }); ok {
return cl.Clone()
}
if res, ok := r.(*result); ok {
newProto := proto.Clone(res.proto).(*descriptorpb.FileDescriptorProto) //nolint:errcheck
newNodes := make(map[proto.Message]ast.Node, len(res.nodes))
newResult := &result{
file: res.file,
proto: newProto,
nodes: newNodes,
}
recreateNodeIndexForFile(res, newResult, res.proto, newProto)
return newResult
}
// Can't do the deep-copy we know how to do. So we have to take a
// different tactic.
if r.AST() == nil {
// no AST? all we have to do is copy the proto
fileProto := proto.Clone(r.FileDescriptorProto()).(*descriptorpb.FileDescriptorProto) //nolint:errcheck
return ResultWithoutAST(fileProto)
}
// Otherwise, we have an AST, but no way to clone the result's
// internals. So just re-create them from scratch.
res, err := ResultFromAST(r.AST(), false, reporter.NewHandler(nil))
if err != nil {
panic(err)
}
return res
}
func recreateNodeIndexForFile(orig, clone *result, origProto, cloneProto *descriptorpb.FileDescriptorProto) {
updateNodeIndexWithOptions[*descriptorpb.FileOptions](orig, clone, origProto, cloneProto)
for i, origMd := range origProto.MessageType {
cloneMd := cloneProto.MessageType[i]
recreateNodeIndexForMessage(orig, clone, origMd, cloneMd)
}
for i, origEd := range origProto.EnumType {
cloneEd := cloneProto.EnumType[i]
recreateNodeIndexForEnum(orig, clone, origEd, cloneEd)
}
for i, origExtd := range origProto.Extension {
cloneExtd := cloneProto.Extension[i]
updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origExtd, cloneExtd)
}
for i, origSd := range origProto.Service {
cloneSd := cloneProto.Service[i]
updateNodeIndexWithOptions[*descriptorpb.ServiceOptions](orig, clone, origSd, cloneSd)
for j, origMtd := range origSd.Method {
cloneMtd := cloneSd.Method[j]
updateNodeIndexWithOptions[*descriptorpb.MethodOptions](orig, clone, origMtd, cloneMtd)
}
}
}
func recreateNodeIndexForMessage(orig, clone *result, origProto, cloneProto *descriptorpb.DescriptorProto) {
updateNodeIndexWithOptions[*descriptorpb.MessageOptions](orig, clone, origProto, cloneProto)
for i, origFld := range origProto.Field {
cloneFld := cloneProto.Field[i]
updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origFld, cloneFld)
}
for i, origOod := range origProto.OneofDecl {
cloneOod := cloneProto.OneofDecl[i]
updateNodeIndexWithOptions[*descriptorpb.OneofOptions](orig, clone, origOod, cloneOod)
}
for i, origExtr := range origProto.ExtensionRange {
cloneExtr := cloneProto.ExtensionRange[i]
updateNodeIndex(orig, clone, asExtsNode(origExtr), asExtsNode(cloneExtr))
updateNodeIndexWithOptions[*descriptorpb.ExtensionRangeOptions](orig, clone, origExtr, cloneExtr)
}
for i, origRr := range origProto.ReservedRange {
cloneRr := cloneProto.ReservedRange[i]
updateNodeIndex(orig, clone, origRr, cloneRr)
}
for i, origNmd := range origProto.NestedType {
cloneNmd := cloneProto.NestedType[i]
recreateNodeIndexForMessage(orig, clone, origNmd, cloneNmd)
}
for i, origEd := range origProto.EnumType {
cloneEd := cloneProto.EnumType[i]
recreateNodeIndexForEnum(orig, clone, origEd, cloneEd)
}
for i, origExtd := range origProto.Extension {
cloneExtd := cloneProto.Extension[i]
updateNodeIndexWithOptions[*descriptorpb.FieldOptions](orig, clone, origExtd, cloneExtd)
}
}
func recreateNodeIndexForEnum(orig, clone *result, origProto, cloneProto *descriptorpb.EnumDescriptorProto) {
updateNodeIndexWithOptions[*descriptorpb.EnumOptions](orig, clone, origProto, cloneProto)
for i, origEvd := range origProto.Value {
cloneEvd := cloneProto.Value[i]
updateNodeIndexWithOptions[*descriptorpb.EnumValueOptions](orig, clone, origEvd, cloneEvd)
}
for i, origRr := range origProto.ReservedRange {
cloneRr := cloneProto.ReservedRange[i]
updateNodeIndex(orig, clone, origRr, cloneRr)
}
}
func recreateNodeIndexForOptions(orig, clone *result, origProtos, cloneProtos []*descriptorpb.UninterpretedOption) {
for i, origOpt := range origProtos {
cloneOpt := cloneProtos[i]
updateNodeIndex(orig, clone, origOpt, cloneOpt)
for j, origName := range origOpt.Name {
cloneName := cloneOpt.Name[j]
updateNodeIndex(orig, clone, origName, cloneName)
}
}
}
func updateNodeIndex[M proto.Message](orig, clone *result, origProto, cloneProto M) {
node := orig.nodes[origProto]
if node != nil {
clone.nodes[cloneProto] = node
}
}
type pointerMessage[T any] interface {
*T
proto.Message
}
type options[T any] interface {
// need this type instead of just proto.Message so we can check for nil pointer
pointerMessage[T]
GetUninterpretedOption() []*descriptorpb.UninterpretedOption
}
type withOptions[O options[T], T any] interface {
proto.Message
GetOptions() O
}
func updateNodeIndexWithOptions[O options[T], M withOptions[O, T], T any](orig, clone *result, origProto, cloneProto M) {
updateNodeIndex(orig, clone, origProto, cloneProto)
origOpts := origProto.GetOptions()
cloneOpts := cloneProto.GetOptions()
if origOpts != nil {
recreateNodeIndexForOptions(orig, clone, origOpts.GetUninterpretedOption(), cloneOpts.GetUninterpretedOption())
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bufio"
"bytes"
"errors"
"fmt"
"io"
"math"
"strconv"
"strings"
"unicode/utf8"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/reporter"
)
type runeReader struct {
data []byte
pos int
err error
mark int
// Enable this check to make input required to be valid UTF-8.
// For now, since protoc allows invalid UTF-8, default to false.
utf8Strict bool
}
func (rr *runeReader) readRune() (r rune, size int, err error) {
if rr.err != nil {
return 0, 0, rr.err
}
if rr.pos == len(rr.data) {
rr.err = io.EOF
return 0, 0, rr.err
}
r, sz := utf8.DecodeRune(rr.data[rr.pos:])
if rr.utf8Strict && r == utf8.RuneError {
rr.err = fmt.Errorf("invalid UTF8 at offset %d: %x", rr.pos, rr.data[rr.pos])
return 0, 0, rr.err
}
rr.pos += sz
return r, sz, nil
}
func (rr *runeReader) offset() int {
return rr.pos
}
func (rr *runeReader) unreadRune(sz int) {
newPos := rr.pos - sz
if newPos < rr.mark {
panic("unread past mark")
}
rr.pos = newPos
}
func (rr *runeReader) setMark() {
rr.mark = rr.pos
}
func (rr *runeReader) getMark() string {
return string(rr.data[rr.mark:rr.pos])
}
type comment struct {
tok ast.Token
isBlock bool
}
type protoLex struct {
input *runeReader
info *ast.FileInfo
handler *reporter.Handler
res *ast.FileNode
prevSym ast.TerminalNode
prevOffset int
eof ast.Token
prevLine, curLine int
maybeDonateComment int
comments []comment
}
var utf8Bom = []byte{0xEF, 0xBB, 0xBF}
func newLexer(in io.Reader, filename string, handler *reporter.Handler) (*protoLex, error) {
br := bufio.NewReader(in)
// if file has UTF8 byte order marker preface, consume it
marker, err := br.Peek(3)
if err == nil && bytes.Equal(marker, utf8Bom) {
_, _ = br.Discard(3)
}
contents, err := io.ReadAll(br)
if err != nil {
return nil, err
}
return &protoLex{
input: &runeReader{data: contents},
info: ast.NewFileInfo(filename, contents),
handler: handler,
}, nil
}
var keywords = map[string]int{
"syntax": _SYNTAX,
"edition": _EDITION,
"import": _IMPORT,
"weak": _WEAK,
"public": _PUBLIC,
"package": _PACKAGE,
"option": _OPTION,
"true": _TRUE,
"false": _FALSE,
"inf": _INF,
"nan": _NAN,
"repeated": _REPEATED,
"optional": _OPTIONAL,
"required": _REQUIRED,
"double": _DOUBLE,
"float": _FLOAT,
"int32": _INT32,
"int64": _INT64,
"uint32": _UINT32,
"uint64": _UINT64,
"sint32": _SINT32,
"sint64": _SINT64,
"fixed32": _FIXED32,
"fixed64": _FIXED64,
"sfixed32": _SFIXED32,
"sfixed64": _SFIXED64,
"bool": _BOOL,
"string": _STRING,
"bytes": _BYTES,
"group": _GROUP,
"oneof": _ONEOF,
"map": _MAP,
"extensions": _EXTENSIONS,
"to": _TO,
"max": _MAX,
"reserved": _RESERVED,
"enum": _ENUM,
"message": _MESSAGE,
"extend": _EXTEND,
"service": _SERVICE,
"rpc": _RPC,
"stream": _STREAM,
"returns": _RETURNS,
}
func (l *protoLex) maybeNewLine(r rune) {
if r == '\n' {
l.info.AddLine(l.input.offset())
l.curLine++
if len(l.comments) > 0 && l.comments[0].isBlock && l.maybeDonateComment > 0 {
// Newline after trailing block comment? Increment the signal that
// we may be able to donate comment to previous token.
l.maybeDonateComment++
}
}
}
func (l *protoLex) prev() ast.SourcePos {
return l.info.SourcePos(l.prevOffset)
}
func (l *protoLex) Lex(lval *protoSymType) int {
if l.handler.ReporterError() != nil {
// if error reporter already returned non-nil error,
// we can skip the rest of the input
return 0
}
l.comments = nil
for {
l.input.setMark()
l.prevOffset = l.input.offset()
c, _, err := l.input.readRune()
if err == io.EOF {
// we're not actually returning a rune, but this will associate
// accumulated comments as a trailing comment on last symbol
// (if appropriate)
l.setRune(lval, 0)
l.eof = lval.b.Token()
return 0
}
if err != nil {
l.setError(lval, err)
return _ERROR
}
if strings.ContainsRune("\n\r\t\f\v ", c) {
// skip whitespace
l.maybeNewLine(c)
continue
}
if c == '.' {
// decimal literals could start with a dot
cn, szn, err := l.input.readRune()
if err != nil {
l.setRune(lval, c)
return int(c)
}
if cn >= '0' && cn <= '9' {
l.readNumber()
token := l.input.getMark()
f, err := parseFloat(token)
if err != nil {
l.setError(lval, numError(err, "float", token))
return _ERROR
}
l.setFloat(lval, f)
return _FLOAT_LIT
}
l.input.unreadRune(szn)
l.setRune(lval, c)
return int(c)
}
if c == '_' || (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') {
// identifier
l.readIdentifier()
str := l.input.getMark()
if t, ok := keywords[str]; ok {
l.setIdent(lval, str)
return t
}
l.setIdent(lval, str)
return _NAME
}
if c >= '0' && c <= '9' {
// integer or float literal
l.readNumber()
token := l.input.getMark()
if strings.HasPrefix(token, "0x") || strings.HasPrefix(token, "0X") {
// hexadecimal
ui, err := strconv.ParseUint(token[2:], 16, 64)
if err != nil {
l.setError(lval, numError(err, "hexadecimal integer", token[2:]))
return _ERROR
}
l.setInt(lval, ui)
return _INT_LIT
}
if strings.ContainsAny(token, ".eE") {
// floating point!
f, err := parseFloat(token)
if err != nil {
l.setError(lval, numError(err, "float", token))
return _ERROR
}
l.setFloat(lval, f)
return _FLOAT_LIT
}
// integer! (decimal or octal)
base := 10
if token[0] == '0' {
base = 8
}
ui, err := strconv.ParseUint(token, base, 64)
if err != nil {
kind := "integer"
if base == 8 {
kind = "octal integer"
} else if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange {
// if it's too big to be an int, parse it as a float
var f float64
kind = "float"
f, err = parseFloat(token)
if err == nil {
l.setFloat(lval, f)
return _FLOAT_LIT
}
}
l.setError(lval, numError(err, kind, token))
return _ERROR
}
l.setInt(lval, ui)
return _INT_LIT
}
if c == '\'' || c == '"' {
// string literal
str, err := l.readStringLiteral(c)
if err != nil {
l.setError(lval, err)
return _ERROR
}
l.setString(lval, str)
return _STRING_LIT
}
if c == '/' {
// comment
cn, szn, err := l.input.readRune()
if err != nil {
l.setRune(lval, '/')
return int(c)
}
if cn == '/' {
startLine := l.curLine
if hasErr := l.skipToEndOfLineComment(lval); hasErr {
return _ERROR
}
l.addComment(false, startLine)
continue
}
if cn == '*' {
startLine := l.curLine
ok, hasErr := l.skipToEndOfBlockComment(lval)
if hasErr {
return _ERROR
}
if !ok {
l.setError(lval, errors.New("block comment never terminates, unexpected EOF"))
return _ERROR
}
l.addComment(true, startLine)
continue
}
l.input.unreadRune(szn)
}
if c < 32 || c == 127 {
l.setError(lval, errors.New("invalid control character"))
return _ERROR
}
if !strings.ContainsRune(";,.:=-+(){}[]<>/", c) {
l.setError(lval, errors.New("invalid character"))
return _ERROR
}
l.setRune(lval, c)
return int(c)
}
}
func parseFloat(token string) (float64, error) {
// strconv.ParseFloat allows _ to separate digits, but protobuf does not
if strings.ContainsRune(token, '_') {
return 0, &strconv.NumError{
Func: "parseFloat",
Num: token,
Err: strconv.ErrSyntax,
}
}
f, err := strconv.ParseFloat(token, 64)
if err == nil {
return f, nil
}
if numErr, ok := err.(*strconv.NumError); ok && numErr.Err == strconv.ErrRange && math.IsInf(f, 1) {
// protoc doesn't complain about float overflow and instead just uses "infinity"
// so we mirror that behavior by just returning infinity and ignoring the error
return f, nil
}
return f, err
}
func (l *protoLex) newToken() ast.Token {
offset := l.input.mark
length := l.input.pos - l.input.mark
return l.info.AddToken(offset, length)
}
func (l *protoLex) addComment(isBlock bool, startLine int) {
if len(l.comments) == 0 && startLine == l.prevLine {
l.maybeDonateComment++
}
l.comments = append(l.comments, comment{l.newToken(), isBlock})
}
func (l *protoLex) setPrevAndAddComments(n ast.TerminalNode) {
comments, maybeDonateComment := l.comments, l.maybeDonateComment
l.comments, l.maybeDonateComment = nil, 0
var prevTrailingComments []comment
if l.prevSym != nil && len(comments) > 0 {
cur := l.curLine
if cur == l.prevLine {
if rn, ok := n.(*ast.RuneNode); ok && rn.Rune == 0 {
// if current token is EOF, pretend it's on separate line
// so that the logic below can attribute a final trailing
// comment to the previous token
cur++
}
}
if cur > l.prevLine && maybeDonateComment > 0 {
// Comment starts right after the previous token. If it's a
// line comment, we record that as a trailing comment.
//
// But if it's a block comment, it is only a trailing comment
// if there are multiple comments or if the block comment ends
// on a line before n. This lattermost condition is signaled
// via l.maybeDonateComment > 1.
canDonate := !comments[0].isBlock ||
len(comments) > 1 || maybeDonateComment > 1
if canDonate {
prevTrailingComments = comments[:1]
comments = comments[1:]
}
}
}
// now we can associate comments
for _, c := range prevTrailingComments {
l.info.AddComment(c.tok, l.prevSym.Token())
}
for _, c := range comments {
l.info.AddComment(c.tok, n.Token())
}
l.prevSym = n
l.prevLine = l.curLine
}
func (l *protoLex) setString(lval *protoSymType, val string) {
lval.s = ast.NewStringLiteralNode(val, l.newToken())
l.setPrevAndAddComments(lval.s)
}
func (l *protoLex) setIdent(lval *protoSymType, val string) {
lval.id = ast.NewIdentNode(val, l.newToken())
l.setPrevAndAddComments(lval.id)
}
func (l *protoLex) setInt(lval *protoSymType, val uint64) {
lval.i = ast.NewUintLiteralNode(val, l.newToken())
l.setPrevAndAddComments(lval.i)
}
func (l *protoLex) setFloat(lval *protoSymType, val float64) {
lval.f = ast.NewFloatLiteralNode(val, l.newToken())
l.setPrevAndAddComments(lval.f)
}
func (l *protoLex) setRune(lval *protoSymType, val rune) {
lval.b = ast.NewRuneNode(val, l.newToken())
l.setPrevAndAddComments(lval.b)
}
func (l *protoLex) setError(lval *protoSymType, err error) {
lval.err, _ = l.addSourceError(err)
}
func (l *protoLex) readNumber() {
allowExpSign := false
for {
c, sz, err := l.input.readRune()
if err != nil {
break
}
if (c == '-' || c == '+') && !allowExpSign {
l.input.unreadRune(sz)
break
}
allowExpSign = false
if c != '.' && c != '_' && (c < '0' || c > '9') &&
(c < 'a' || c > 'z') && (c < 'A' || c > 'Z') &&
c != '-' && c != '+' {
// no more chars in the number token
l.input.unreadRune(sz)
break
}
if c == 'e' || c == 'E' {
// scientific notation char can be followed by
// an exponent sign
allowExpSign = true
}
}
}
func numError(err error, kind, s string) error {
ne, ok := err.(*strconv.NumError)
if !ok {
return err
}
if ne.Err == strconv.ErrRange {
return fmt.Errorf("value out of range for %s: %s", kind, s)
}
// syntax error
return fmt.Errorf("invalid syntax in %s value: %s", kind, s)
}
func (l *protoLex) readIdentifier() {
for {
c, sz, err := l.input.readRune()
if err != nil {
break
}
if c != '_' && (c < 'a' || c > 'z') && (c < 'A' || c > 'Z') && (c < '0' || c > '9') {
l.input.unreadRune(sz)
break
}
}
}
func (l *protoLex) readStringLiteral(quote rune) (string, error) {
var buf bytes.Buffer
var escapeError reporter.ErrorWithPos
var noMoreErrors bool
reportErr := func(msg, badEscape string) {
if noMoreErrors {
return
}
if escapeError != nil {
// report previous one
_, ok := l.addSourceError(escapeError)
if !ok {
noMoreErrors = true
}
}
var err error
if strings.HasSuffix(msg, "%s") {
err = fmt.Errorf(msg, badEscape)
} else {
err = errors.New(msg)
}
// we've now consumed the bad escape and lexer position is after it, so we need
// to back up to the beginning of the escape to report the correct position
escapeError = l.errWithCurrentPos(err, -len(badEscape))
}
for {
c, _, err := l.input.readRune()
if err != nil {
if err == io.EOF {
err = io.ErrUnexpectedEOF
}
return "", err
}
if c == '\n' {
return "", errors.New("encountered end-of-line before end of string literal")
}
if c == quote {
break
}
if c == 0 {
reportErr("null character ('\\0') not allowed in string literal", string(rune(0)))
continue
}
if c == '\\' {
// escape sequence
c, _, err = l.input.readRune()
if err != nil {
return "", err
}
switch {
case c == 'x' || c == 'X':
// hex escape
c1, sz1, err := l.input.readRune()
if err != nil {
return "", err
}
if c1 == quote || c1 == '\\' {
l.input.unreadRune(sz1)
reportErr("invalid hex escape: %s", "\\"+string(c))
continue
}
c2, sz2, err := l.input.readRune()
if err != nil {
return "", err
}
var hex string
if (c2 < '0' || c2 > '9') && (c2 < 'a' || c2 > 'f') && (c2 < 'A' || c2 > 'F') {
l.input.unreadRune(sz2)
hex = string(c1)
} else {
hex = string([]rune{c1, c2})
}
i, err := strconv.ParseInt(hex, 16, 32)
if err != nil {
reportErr("invalid hex escape: %s", "\\"+string(c)+hex)
continue
}
buf.WriteByte(byte(i))
case c >= '0' && c <= '7':
// octal escape
c2, sz2, err := l.input.readRune()
if err != nil {
return "", err
}
var octal string
if c2 < '0' || c2 > '7' {
l.input.unreadRune(sz2)
octal = string(c)
} else {
c3, sz3, err := l.input.readRune()
if err != nil {
return "", err
}
if c3 < '0' || c3 > '7' {
l.input.unreadRune(sz3)
octal = string([]rune{c, c2})
} else {
octal = string([]rune{c, c2, c3})
}
}
i, err := strconv.ParseInt(octal, 8, 32)
if err != nil {
reportErr("invalid octal escape: %s", "\\"+octal)
continue
}
if i > 0xff {
reportErr("octal escape is out range, must be between 0 and 377: %s", "\\"+octal)
continue
}
buf.WriteByte(byte(i))
case c == 'u':
// short unicode escape
u := make([]rune, 4)
for i := range u {
c2, sz2, err := l.input.readRune()
if err != nil {
return "", err
}
if c2 == quote || c2 == '\\' {
l.input.unreadRune(sz2)
u = u[:i]
break
}
u[i] = c2
}
codepointStr := string(u)
if len(u) < 4 {
reportErr("invalid unicode escape: %s", "\\u"+codepointStr)
continue
}
i, err := strconv.ParseInt(codepointStr, 16, 32)
if err != nil {
reportErr("invalid unicode escape: %s", "\\u"+codepointStr)
continue
}
buf.WriteRune(rune(i))
case c == 'U':
// long unicode escape
u := make([]rune, 8)
for i := range u {
c2, sz2, err := l.input.readRune()
if err != nil {
return "", err
}
if c2 == quote || c2 == '\\' {
l.input.unreadRune(sz2)
u = u[:i]
break
}
u[i] = c2
}
codepointStr := string(u)
if len(u) < 8 {
reportErr("invalid unicode escape: %s", "\\U"+codepointStr)
continue
}
i, err := strconv.ParseInt(string(u), 16, 32)
if err != nil {
reportErr("invalid unicode escape: %s", "\\U"+codepointStr)
continue
}
if i > 0x10ffff || i < 0 {
reportErr("unicode escape is out of range, must be between 0 and 0x10ffff: %s", "\\U"+codepointStr)
continue
}
buf.WriteRune(rune(i))
case c == 'a':
buf.WriteByte('\a')
case c == 'b':
buf.WriteByte('\b')
case c == 'f':
buf.WriteByte('\f')
case c == 'n':
buf.WriteByte('\n')
case c == 'r':
buf.WriteByte('\r')
case c == 't':
buf.WriteByte('\t')
case c == 'v':
buf.WriteByte('\v')
case c == '\\':
buf.WriteByte('\\')
case c == '\'':
buf.WriteByte('\'')
case c == '"':
buf.WriteByte('"')
case c == '?':
buf.WriteByte('?')
default:
reportErr("invalid escape sequence: %s", "\\"+string(c))
continue
}
} else {
buf.WriteRune(c)
}
}
if escapeError != nil {
return "", escapeError
}
return buf.String(), nil
}
func (l *protoLex) skipToEndOfLineComment(lval *protoSymType) (hasErr bool) {
for {
c, sz, err := l.input.readRune()
if err != nil {
// eof
return false
}
switch c {
case '\n':
// don't include newline in the comment
l.input.unreadRune(sz)
return false
case 0:
l.setError(lval, errors.New("invalid control character"))
return true
}
}
}
func (l *protoLex) skipToEndOfBlockComment(lval *protoSymType) (ok, hasErr bool) {
for {
c, _, err := l.input.readRune()
if err != nil {
return false, false
}
if c == 0 {
l.setError(lval, errors.New("invalid control character"))
return false, true
}
l.maybeNewLine(c)
if c == '*' {
c, sz, err := l.input.readRune()
if err != nil {
return false, false
}
if c == '/' {
return true, false
}
l.input.unreadRune(sz)
}
}
}
func (l *protoLex) addSourceError(err error) (reporter.ErrorWithPos, bool) {
ewp, ok := err.(reporter.ErrorWithPos)
if !ok {
// TODO: Store the previous span instead of just the position.
ewp = reporter.Error(ast.NewSourceSpan(l.prev(), l.prev()), err)
}
handlerErr := l.handler.HandleError(ewp)
return ewp, handlerErr == nil
}
func (l *protoLex) Error(s string) {
_, _ = l.addSourceError(errors.New(s))
}
// TODO: Accept both a start and end offset, and use that to create a span.
func (l *protoLex) errWithCurrentPos(err error, offset int) reporter.ErrorWithPos {
if ewp, ok := err.(reporter.ErrorWithPos); ok {
return ewp
}
pos := l.info.SourcePos(l.input.offset() + offset)
return reporter.Error(ast.NewSourceSpan(pos, pos), err)
}
func (l *protoLex) requireSemicolon(semicolons []*ast.RuneNode) (*ast.RuneNode, []*ast.RuneNode) {
if len(semicolons) == 0 {
l.Error("syntax error: expecting ';'")
return nil, nil
}
return semicolons[0], semicolons[1:]
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"fmt"
"io"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/reporter"
)
// The path ../.tmp/bin/goyacc is built when using `make generate` from repo root.
//go:generate ../.tmp/bin/goyacc -o proto.y.go -l -p proto proto.y
func init() {
protoErrorVerbose = true
// fix up the generated "token name" array so that error messages are nicer
setTokenName(_STRING_LIT, "string literal")
setTokenName(_INT_LIT, "int literal")
setTokenName(_FLOAT_LIT, "float literal")
setTokenName(_NAME, "identifier")
setTokenName(_ERROR, "error")
// for keywords, just show the keyword itself wrapped in quotes
for str, i := range keywords {
setTokenName(i, fmt.Sprintf(`"%s"`, str))
}
}
func setTokenName(token int, text string) {
// NB: this is based on logic in generated parse code that translates the
// int returned from the lexer into an internal token number.
var intern int8
if token < len(protoTok1) {
intern = protoTok1[token]
} else {
if token >= protoPrivate {
if token < protoPrivate+len(protoTok2) {
intern = protoTok2[token-protoPrivate]
}
}
if intern == 0 {
for i := 0; i+1 < len(protoTok3); i += 2 {
if int(protoTok3[i]) == token {
intern = protoTok3[i+1]
break
}
}
}
}
if intern >= 1 && int(intern-1) < len(protoToknames) {
protoToknames[intern-1] = text
return
}
panic(fmt.Sprintf("Unknown token value: %d", token))
}
// Parse parses the given source code info and returns an AST. The given filename
// is used to construct error messages and position information. The given reader
// supplies the source code. The given handler is used to report errors and
// warnings encountered while parsing. If any errors are reported, this function
// returns a non-nil error.
//
// If the error returned is due to a syntax error in the source, then a non-nil
// AST is also returned. If the handler chooses to not abort the parse (e.g. the
// underlying error reporter returns nil instead of an error), the parser will
// attempt to recover and keep going. This allows multiple syntax errors to be
// reported in a single pass. And it also means that more of the AST can be
// populated (erroneous productions around the syntax error will of course be
// absent).
//
// The degree to which the parser can recover from errors and populate the AST
// depends on the nature of the syntax error and if there are any tokens after the
// syntax error that can help the parser recover. This error recovery and partial
// AST production is best effort.
func Parse(filename string, r io.Reader, handler *reporter.Handler) (*ast.FileNode, error) {
lx, err := newLexer(r, filename, handler)
if err != nil {
return nil, err
}
protoParse(lx)
if lx.res == nil {
// nil AST means there was an error that prevented any parsing
// or the file was empty; synthesize empty non-nil AST
lx.res = ast.NewEmptyFileNode(filename)
}
return lx.res, handler.Error()
}
// Result is the result of constructing a descriptor proto from a parsed AST.
// From this result, the AST and the file descriptor proto can be had. This
// also contains numerous lookup functions, for looking up AST nodes that
// correspond to various elements of the descriptor hierarchy.
//
// Results can be created without AST information, using the ResultWithoutAST()
// function. All functions other than AST() will still return non-nil values,
// allowing compile operations to work with files that have only intermediate
// descriptor protos and no source code. For such results, the function that
// return AST nodes will return placeholder nodes. The position information for
// placeholder nodes contains only the filename.
type Result interface {
// AST returns the parsed abstract syntax tree. This returns nil if the
// Result was created without an AST.
AST() *ast.FileNode
// FileDescriptorProto returns the file descriptor proto.
FileDescriptorProto() *descriptorpb.FileDescriptorProto
// FileNode returns the root of the AST. If this result has no AST then a
// placeholder node is returned.
FileNode() ast.FileDeclNode
// Node returns the AST node from which the given message was created. This
// can return nil, such as if the given message is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
Node(proto.Message) ast.Node
// OptionNode returns the AST node corresponding to the given uninterpreted
// option. This can return nil, such as if the given option is not part of
// the FileDescriptorProto hierarchy. If this result has no AST, this
// returns a placeholder node.
OptionNode(*descriptorpb.UninterpretedOption) ast.OptionDeclNode
// OptionNamePartNode returns the AST node corresponding to the given name
// part for an uninterpreted option. This can return nil, such as if the
// given name part is not part of the FileDescriptorProto hierarchy. If this
// result has no AST, this returns a placeholder node.
OptionNamePartNode(*descriptorpb.UninterpretedOption_NamePart) ast.Node
// MessageNode returns the AST node corresponding to the given message. This
// can return nil, such as if the given message is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
MessageNode(*descriptorpb.DescriptorProto) ast.MessageDeclNode
// FieldNode returns the AST node corresponding to the given field. This can
// return nil, such as if the given field is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
FieldNode(*descriptorpb.FieldDescriptorProto) ast.FieldDeclNode
// OneofNode returns the AST node corresponding to the given oneof. This can
// return nil, such as if the given oneof is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
OneofNode(*descriptorpb.OneofDescriptorProto) ast.OneofDeclNode
// ExtensionRangeNode returns the AST node corresponding to the given
// extension range. This can return nil, such as if the given range is not
// part of the FileDescriptorProto hierarchy. If this result has no AST,
// this returns a placeholder node.
ExtensionRangeNode(*descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode
// ExtensionsNode returns the AST node corresponding to the "extensions"
// statement in a message that corresponds to the given range. This will be
// the parent of the node returned by ExtensionRangeNode, which contains the
// options that apply to all child ranges.
ExtensionsNode(*descriptorpb.DescriptorProto_ExtensionRange) ast.NodeWithOptions
// MessageReservedRangeNode returns the AST node corresponding to the given
// reserved range. This can return nil, such as if the given range is not
// part of the FileDescriptorProto hierarchy. If this result has no AST,
// this returns a placeholder node.
MessageReservedRangeNode(*descriptorpb.DescriptorProto_ReservedRange) ast.RangeDeclNode
// EnumNode returns the AST node corresponding to the given enum. This can
// return nil, such as if the given enum is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
EnumNode(*descriptorpb.EnumDescriptorProto) ast.NodeWithOptions
// EnumValueNode returns the AST node corresponding to the given enum. This
// can return nil, such as if the given enum value is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
EnumValueNode(*descriptorpb.EnumValueDescriptorProto) ast.EnumValueDeclNode
// EnumReservedRangeNode returns the AST node corresponding to the given
// reserved range. This can return nil, such as if the given range is not
// part of the FileDescriptorProto hierarchy. If this result has no AST,
// this returns a placeholder node.
EnumReservedRangeNode(*descriptorpb.EnumDescriptorProto_EnumReservedRange) ast.RangeDeclNode
// ServiceNode returns the AST node corresponding to the given service. This
// can return nil, such as if the given service is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
ServiceNode(*descriptorpb.ServiceDescriptorProto) ast.NodeWithOptions
// MethodNode returns the AST node corresponding to the given method. This
// can return nil, such as if the given method is not part of the
// FileDescriptorProto hierarchy. If this result has no AST, this returns a
// placeholder node.
MethodNode(*descriptorpb.MethodDescriptorProto) ast.RPCDeclNode
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Code generated by goyacc -o proto.y.go -l -p proto proto.y. DO NOT EDIT.
package parser
import __yyfmt__ "fmt"
//lint:file-ignore SA4006 generated parser has unused values
import (
"math"
"strings"
"github.com/bufbuild/protocompile/ast"
)
type protoSymType struct {
yys int
file *ast.FileNode
syn *ast.SyntaxNode
ed *ast.EditionNode
fileElements []ast.FileElement
pkg nodeWithRunes[*ast.PackageNode]
imprt nodeWithRunes[*ast.ImportNode]
msg nodeWithRunes[*ast.MessageNode]
msgElements []ast.MessageElement
fld *ast.FieldNode
msgFld nodeWithRunes[*ast.FieldNode]
mapFld nodeWithRunes[*ast.MapFieldNode]
mapType *ast.MapTypeNode
grp *ast.GroupNode
msgGrp nodeWithRunes[*ast.GroupNode]
oo nodeWithRunes[*ast.OneofNode]
ooElement ast.OneofElement
ooElements []ast.OneofElement
ext nodeWithRunes[*ast.ExtensionRangeNode]
resvd nodeWithRunes[*ast.ReservedNode]
en nodeWithRunes[*ast.EnumNode]
enElements []ast.EnumElement
env nodeWithRunes[*ast.EnumValueNode]
extend nodeWithRunes[*ast.ExtendNode]
extElement ast.ExtendElement
extElements []ast.ExtendElement
svc nodeWithRunes[*ast.ServiceNode]
svcElements []ast.ServiceElement
mtd nodeWithRunes[*ast.RPCNode]
mtdMsgType *ast.RPCTypeNode
mtdElements []ast.RPCElement
optRaw *ast.OptionNode
opt nodeWithRunes[*ast.OptionNode]
opts *compactOptionSlices
refRaw *ast.FieldReferenceNode
ref nodeWithRunes[*ast.FieldReferenceNode]
optNms *fieldRefSlices
cmpctOpts *ast.CompactOptionsNode
rng *ast.RangeNode
rngs *rangeSlices
names *nameSlices
cidPart nodeWithRunes[*ast.IdentNode]
cid *identSlices
tid ast.IdentValueNode
sl *valueSlices
msgLitFlds *messageFieldList
msgLitFld *ast.MessageFieldNode
v ast.ValueNode
il ast.IntValueNode
str []*ast.StringLiteralNode
s *ast.StringLiteralNode
i *ast.UintLiteralNode
f *ast.FloatLiteralNode
id *ast.IdentNode
b *ast.RuneNode
bs []*ast.RuneNode
err error
}
const _STRING_LIT = 57346
const _INT_LIT = 57347
const _FLOAT_LIT = 57348
const _NAME = 57349
const _SYNTAX = 57350
const _EDITION = 57351
const _IMPORT = 57352
const _WEAK = 57353
const _PUBLIC = 57354
const _PACKAGE = 57355
const _OPTION = 57356
const _TRUE = 57357
const _FALSE = 57358
const _INF = 57359
const _NAN = 57360
const _REPEATED = 57361
const _OPTIONAL = 57362
const _REQUIRED = 57363
const _DOUBLE = 57364
const _FLOAT = 57365
const _INT32 = 57366
const _INT64 = 57367
const _UINT32 = 57368
const _UINT64 = 57369
const _SINT32 = 57370
const _SINT64 = 57371
const _FIXED32 = 57372
const _FIXED64 = 57373
const _SFIXED32 = 57374
const _SFIXED64 = 57375
const _BOOL = 57376
const _STRING = 57377
const _BYTES = 57378
const _GROUP = 57379
const _ONEOF = 57380
const _MAP = 57381
const _EXTENSIONS = 57382
const _TO = 57383
const _MAX = 57384
const _RESERVED = 57385
const _ENUM = 57386
const _MESSAGE = 57387
const _EXTEND = 57388
const _SERVICE = 57389
const _RPC = 57390
const _STREAM = 57391
const _RETURNS = 57392
const _ERROR = 57393
var protoToknames = [...]string{
"$end",
"error",
"$unk",
"_STRING_LIT",
"_INT_LIT",
"_FLOAT_LIT",
"_NAME",
"_SYNTAX",
"_EDITION",
"_IMPORT",
"_WEAK",
"_PUBLIC",
"_PACKAGE",
"_OPTION",
"_TRUE",
"_FALSE",
"_INF",
"_NAN",
"_REPEATED",
"_OPTIONAL",
"_REQUIRED",
"_DOUBLE",
"_FLOAT",
"_INT32",
"_INT64",
"_UINT32",
"_UINT64",
"_SINT32",
"_SINT64",
"_FIXED32",
"_FIXED64",
"_SFIXED32",
"_SFIXED64",
"_BOOL",
"_STRING",
"_BYTES",
"_GROUP",
"_ONEOF",
"_MAP",
"_EXTENSIONS",
"_TO",
"_MAX",
"_RESERVED",
"_ENUM",
"_MESSAGE",
"_EXTEND",
"_SERVICE",
"_RPC",
"_STREAM",
"_RETURNS",
"_ERROR",
"'='",
"';'",
"':'",
"'{'",
"'}'",
"'\\\\'",
"'/'",
"'?'",
"'.'",
"','",
"'>'",
"'<'",
"'+'",
"'-'",
"'('",
"')'",
"'['",
"']'",
"'*'",
"'&'",
"'^'",
"'%'",
"'$'",
"'#'",
"'@'",
"'!'",
"'~'",
"'`'",
}
var protoStatenames = [...]string{}
const protoEofCode = 1
const protoErrCode = 2
const protoInitialStackSize = 16
var protoExca = [...]int16{
-1, 0,
1, 6,
-2, 21,
-1, 1,
1, -1,
-2, 0,
-1, 2,
1, 1,
-2, 21,
-1, 3,
1, 2,
-2, 21,
-1, 14,
1, 7,
-2, 0,
-1, 89,
52, 60,
61, 60,
69, 60,
-2, 61,
-1, 101,
55, 37,
58, 37,
62, 37,
67, 37,
69, 37,
-2, 34,
-1, 112,
52, 60,
61, 60,
69, 60,
-2, 62,
-1, 118,
56, 249,
-2, 0,
-1, 121,
55, 37,
58, 37,
62, 37,
67, 37,
69, 37,
-2, 35,
-1, 140,
56, 225,
-2, 0,
-1, 142,
56, 214,
-2, 0,
-1, 144,
56, 250,
-2, 0,
-1, 198,
56, 262,
-2, 0,
-1, 203,
56, 83,
62, 83,
-2, 0,
-1, 214,
56, 226,
-2, 0,
-1, 271,
56, 215,
-2, 0,
-1, 377,
56, 263,
-2, 0,
-1, 464,
56, 155,
-2, 0,
-1, 523,
69, 144,
-2, 141,
-1, 531,
56, 156,
-2, 0,
-1, 607,
67, 52,
-2, 49,
-1, 665,
69, 144,
-2, 142,
-1, 690,
67, 52,
-2, 50,
-1, 732,
56, 273,
-2, 0,
-1, 745,
56, 274,
-2, 0,
}
const protoPrivate = 57344
const protoLast = 2053
var protoAct = [...]int16{
140, 7, 746, 7, 7, 100, 139, 18, 440, 394,
604, 436, 607, 439, 502, 39, 524, 596, 95, 532,
496, 127, 437, 422, 520, 200, 32, 34, 523, 233,
421, 40, 90, 93, 94, 405, 102, 106, 36, 96,
109, 435, 272, 85, 378, 458, 326, 404, 21, 20,
19, 107, 108, 149, 215, 202, 145, 98, 101, 86,
663, 89, 449, 390, 134, 706, 703, 598, 707, 513,
9, 652, 395, 510, 465, 9, 511, 396, 717, 651,
507, 459, 459, 460, 452, 459, 456, 9, 506, 459,
459, 462, 739, 90, 693, 451, 655, 598, 459, 9,
680, 653, 459, 687, 508, 459, 423, 459, 124, 125,
453, 115, 459, 459, 459, 134, 126, 133, 142, 138,
131, 129, 497, 395, 198, 130, 423, 134, 199, 448,
416, 388, 389, 711, 489, 395, 505, 119, 9, 387,
207, 666, 488, 593, 9, 468, 472, 113, 222, 112,
273, 386, 470, 462, 587, 9, 373, 120, 121, 385,
110, 40, 110, 691, 674, 428, 424, 414, 374, 122,
114, 375, 279, 760, 758, 754, 750, 104, 744, 743,
741, 733, 729, 721, 695, 9, 424, 716, 753, 219,
217, 218, 668, 383, 227, 376, 322, 270, 213, 728,
719, 323, 713, 658, 464, 123, 379, 118, 117, 207,
116, 5, 6, 104, 399, 9, 598, 104, 670, 324,
31, 702, 222, 667, 493, 490, 9, 492, 430, 392,
419, 111, 13, 12, 403, 599, 407, 408, 413, 528,
463, 40, 381, 748, 726, 8, 412, 724, 397, 659,
33, 415, 15, 656, 26, 26, 9, 37, 38, 384,
210, 209, 105, 219, 217, 218, 103, 35, 227, 400,
595, 417, 211, 212, 402, 23, 529, 594, 104, 273,
409, 582, 406, 24, 413, 516, 25, 26, 382, 495,
491, 4, 412, 33, 10, 11, 731, 745, 380, 197,
377, 279, 475, 476, 477, 478, 479, 480, 481, 482,
483, 484, 485, 486, 418, 22, 143, 28, 27, 29,
30, 144, 274, 425, 141, 271, 220, 420, 275, 225,
411, 426, 427, 410, 40, 530, 531, 214, 231, 224,
221, 535, 147, 223, 429, 146, 534, 216, 204, 203,
447, 499, 601, 538, 150, 228, 605, 99, 602, 327,
540, 154, 234, 277, 606, 329, 542, 156, 237, 474,
391, 393, 438, 132, 128, 87, 88, 432, 206, 91,
431, 521, 518, 533, 522, 379, 17, 16, 434, 14,
3, 2, 1, 0, 0, 442, 442, 0, 0, 0,
0, 207, 0, 0, 457, 0, 0, 454, 455, 466,
0, 469, 471, 0, 0, 0, 0, 0, 0, 450,
473, 445, 433, 0, 0, 0, 0, 0, 0, 0,
0, 0, 444, 0, 494, 0, 0, 0, 0, 0,
0, 0, 0, 487, 0, 0, 0, 498, 0, 442,
461, 0, 0, 0, 467, 503, 514, 0, 0, 517,
0, 525, 526, 0, 0, 90, 504, 0, 583, 584,
0, 0, 0, 0, 0, 0, 0, 0, 586, 0,
0, 0, 0, 0, 585, 0, 0, 0, 588, 0,
591, 0, 509, 0, 0, 0, 0, 0, 527, 0,
512, 515, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 592, 0, 660, 661, 657, 590,
0, 0, 0, 0, 0, 0, 0, 90, 0, 0,
654, 0, 0, 589, 0, 0, 0, 0, 0, 0,
0, 597, 0, 90, 672, 673, 664, 40, 0, 0,
665, 669, 0, 0, 671, 0, 0, 675, 0, 0,
0, 0, 662, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 676, 0, 0, 0, 0, 0, 0,
679, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 678, 0, 0, 0,
0, 0, 0, 682, 0, 684, 689, 0, 690, 686,
685, 0, 0, 0, 0, 0, 0, 0, 677, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
681, 683, 0, 688, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 442, 0,
697, 0, 0, 699, 503, 696, 0, 692, 0, 701,
0, 0, 0, 133, 0, 504, 131, 129, 710, 0,
709, 130, 0, 0, 0, 0, 715, 712, 0, 700,
704, 0, 0, 0, 0, 0, 720, 0, 0, 722,
718, 714, 694, 0, 0, 698, 0, 0, 133, 0,
0, 131, 129, 0, 727, 0, 130, 732, 705, 708,
730, 0, 735, 725, 723, 0, 734, 0, 0, 0,
0, 0, 0, 0, 749, 742, 0, 0, 0, 0,
747, 736, 737, 0, 0, 755, 752, 0, 756, 0,
0, 757, 0, 747, 0, 0, 751, 0, 0, 0,
759, 0, 0, 0, 0, 0, 0, 0, 0, 0,
738, 501, 740, 33, 137, 135, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
0, 0, 0, 0, 134, 0, 0, 0, 0, 0,
0, 0, 395, 0, 441, 0, 0, 0, 500, 33,
137, 135, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 51, 52, 53, 54, 55, 56, 57, 58,
59, 60, 61, 62, 63, 64, 65, 66, 67, 68,
69, 70, 71, 72, 73, 74, 75, 76, 77, 78,
79, 80, 81, 82, 83, 84, 0, 0, 0, 0,
134, 0, 0, 0, 0, 0, 0, 0, 395, 0,
441, 0, 0, 443, 33, 137, 135, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 0, 0, 0, 0, 134, 0, 0, 0, 0,
0, 0, 0, 395, 0, 441, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 205, 92, 0, 0, 519, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
52, 53, 54, 55, 56, 57, 58, 59, 60, 61,
62, 63, 64, 65, 66, 67, 68, 69, 70, 71,
72, 73, 74, 75, 76, 77, 78, 79, 80, 81,
82, 83, 84, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 446, 0, 205, 0, 0, 0,
208, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 0, 0, 0, 0, 0,
201, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 208, 33, 137, 135, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
0, 0, 0, 0, 134, 0, 0, 0, 0, 0,
205, 0, 0, 0, 136, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78, 79, 80, 81, 82, 83, 84, 0,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 33, 423, 208, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, 51, 52, 53,
54, 55, 56, 57, 58, 59, 60, 61, 62, 63,
64, 65, 66, 67, 68, 69, 70, 71, 72, 73,
74, 75, 76, 77, 78, 79, 80, 81, 82, 83,
84, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 424, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 92, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
0, 0, 0, 0, 0, 0, 0, 0, 0, 97,
608, 609, 610, 611, 612, 613, 614, 615, 616, 617,
618, 619, 620, 621, 622, 623, 624, 625, 626, 627,
628, 629, 630, 631, 632, 633, 634, 635, 636, 637,
638, 639, 640, 641, 642, 643, 644, 645, 646, 647,
648, 649, 600, 650, 0, 0, 0, 0, 0, 0,
0, 0, 0, 603, 330, 331, 332, 333, 334, 335,
336, 337, 338, 339, 340, 341, 342, 343, 344, 345,
346, 347, 348, 349, 350, 351, 352, 353, 354, 355,
356, 357, 358, 359, 401, 360, 361, 362, 363, 364,
365, 366, 367, 368, 369, 370, 371, 372, 0, 0,
0, 0, 0, 226, 0, 0, 0, 328, 238, 239,
240, 241, 242, 243, 244, 26, 245, 246, 247, 248,
153, 152, 151, 249, 250, 251, 252, 253, 254, 255,
256, 257, 258, 259, 260, 261, 262, 263, 0, 230,
236, 229, 264, 265, 232, 28, 27, 29, 266, 267,
268, 269, 0, 0, 0, 0, 0, 0, 0, 0,
0, 235, 330, 331, 332, 333, 334, 335, 336, 337,
338, 339, 340, 341, 342, 343, 344, 345, 346, 347,
348, 349, 350, 351, 352, 353, 354, 355, 356, 357,
358, 359, 325, 360, 361, 362, 363, 364, 365, 366,
367, 368, 369, 370, 371, 372, 0, 0, 0, 0,
0, 148, 0, 0, 0, 328, 157, 158, 159, 160,
161, 162, 163, 164, 165, 166, 167, 168, 153, 152,
151, 169, 170, 171, 172, 173, 174, 175, 176, 177,
178, 179, 180, 181, 182, 183, 0, 184, 185, 186,
187, 188, 189, 190, 191, 192, 193, 194, 195, 196,
0, 0, 0, 0, 0, 536, 0, 0, 0, 155,
543, 544, 545, 546, 547, 548, 549, 537, 550, 551,
552, 553, 0, 0, 0, 554, 555, 556, 557, 558,
559, 560, 561, 562, 563, 564, 565, 566, 567, 568,
539, 569, 570, 571, 572, 573, 574, 575, 576, 577,
578, 579, 580, 581, 0, 0, 0, 0, 0, 0,
0, 0, 0, 541, 210, 209, 41, 42, 43, 44,
45, 46, 47, 48, 49, 50, 51, 52, 53, 54,
55, 56, 57, 58, 59, 60, 61, 62, 63, 64,
65, 66, 67, 68, 69, 70, 71, 72, 73, 74,
75, 76, 77, 78, 79, 80, 81, 82, 83, 84,
33, 406, 0, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, 51, 52, 53, 54, 55, 56, 57,
58, 59, 60, 61, 62, 63, 64, 65, 66, 67,
68, 69, 70, 71, 72, 73, 74, 75, 76, 77,
78, 79, 80, 81, 82, 83, 84, 276, 0, 0,
0, 0, 280, 281, 282, 283, 284, 285, 286, 26,
287, 288, 289, 290, 291, 292, 293, 294, 295, 296,
297, 298, 299, 300, 301, 302, 303, 304, 305, 306,
307, 308, 309, 310, 311, 312, 313, 314, 278, 315,
316, 317, 318, 319, 320, 321, 398, 0, 0, 0,
0, 41, 42, 43, 44, 45, 46, 47, 48, 49,
50, 51, 52, 53, 54, 55, 56, 57, 58, 59,
60, 61, 62, 63, 64, 65, 66, 67, 68, 69,
70, 71, 72, 73, 74, 75, 76, 77, 78, 79,
80, 81, 82, 83, 84, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, 51, 52, 53, 54, 55,
56, 57, 58, 59, 60, 61, 62, 63, 64, 65,
66, 67, 68, 69, 70, 71, 72, 73, 74, 75,
76, 77, 78, 79, 80, 81, 82, 83, 84, 608,
609, 610, 611, 612, 613, 614, 615, 616, 617, 618,
619, 620, 621, 622, 623, 624, 625, 626, 627, 628,
629, 630, 631, 632, 633, 634, 635, 636, 637, 638,
639, 640, 641, 642, 643, 644, 645, 646, 647, 648,
649, 0, 650,
}
var protoPact = [...]int16{
203, -1000, 162, 162, -1000, 181, 180, 273, 167, -1000,
-1000, -1000, 289, 289, 273, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, 246, 1958, 1329, 1958, 1958, 1389,
1958, -1000, 213, -1000, 209, -1000, 173, 289, 289, 102,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, 179, -1000, 1329, 110, -1000,
-1000, -1000, 1389, 155, 153, 152, -1000, 1958, -1000, 1958,
109, -1000, 150, -1000, -1000, -1000, -1000, 173, 173, -1000,
1958, 1149, -1000, -1000, -1000, 52, 162, 162, 1659, -1000,
-1000, -1000, -1000, 162, -1000, -1000, -1000, 162, -1000, -1000,
274, -1000, -1000, -1000, 1084, -1000, 255, -1000, -1000, 142,
1551, 141, 1865, 140, 1659, -1000, -1000, -1000, 166, 1605,
1958, -1000, -1000, -1000, 108, 1958, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 139, 240, -1000,
137, -1000, -1000, 1208, 98, 78, 9, -1000, 1914, -1000,
-1000, -1000, -1000, 162, 1551, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 1497, 1958, 277,
1958, 1958, 1816, -1000, 107, 1958, 67, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
162, 1865, -1000, -1000, -1000, -1000, -1000, 178, 1270, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, 162, -1000, -1000, 1958, 1958, 105, 1958, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, 176, 1958, 100, 162, 240, -1000, -1000,
-1000, -1000, 1958, -1000, -1000, -1000, -1000, -1000, -1000, 835,
835, -1000, -1000, -1000, -1000, 1022, 60, 26, 41, -1000,
-1000, 1958, 1958, 34, 30, -1000, 199, 149, 22, 92,
91, 85, 274, -1000, 1958, 100, 278, -1000, -1000, 121,
81, -1000, 184, -1000, 285, -1000, 175, 172, 1958, 100,
284, -1000, -1000, -1000, 56, -1000, -1000, -1000, -1000, 274,
-1000, 1769, -1000, 769, -1000, 74, -1000, 19, -1000, 35,
-1000, -1000, 1958, -1000, 21, 17, 280, -1000, 162, 959,
162, 162, 277, 234, 1713, 276, -1000, 162, 162, -1000,
289, -1000, 1958, -1000, 93, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, 46, 121, 162,
101, -1000, 272, 265, -1000, 44, 185, 1443, -1000, 10,
-1000, 32, -1000, -1000, -1000, -1000, -1000, 72, -1000, 27,
248, 162, 148, 244, -1000, 162, 46, -1000, -9, -1000,
-1000, 1329, 80, -1000, 171, -1000, -1000, -1000, -1000, -1000,
136, 1713, -1000, -1000, -1000, -1000, 165, 1329, 1958, 1958,
104, 1958, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, 46, -1000, -1000, 274, -1000, 1389, -1000, 162,
-1000, -1000, -1000, -1000, 45, 44, -1000, 163, -1000, 56,
1389, 36, -1000, 1958, -1000, 2002, 103, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000, -1000,
-1000, -1000, 900, -1000, -1000, -1000, 39, 128, 162, 46,
-1000, -1000, 162, -1000, -1000, -1000, -1000, 1149, 162, -1000,
-1000, 169, 14, 13, 1958, 100, -1000, 162, 71, -1000,
162, 147, -1000, 163, -1000, 132, 11, -1000, -1000, -1000,
-1000, -1000, -1000, 162, 145, 162, 127, -1000, 162, -1000,
-1000, -1000, 1149, 242, -1000, 163, 239, 162, 144, -1000,
-1000, -1000, 126, 162, -1000, -1000, 162, -1000, 125, 162,
-1000, 162, -1000, 163, 44, -1000, 37, 124, 162, -1000,
123, 122, 241, 162, 120, -1000, -1000, -1000, 163, 162,
133, -1000, 119, -1000, 162, 241, -1000, -1000, -1000, -1000,
162, -1000, 118, 162, -1000, -1000, -1000, -1000, -1000, 117,
-1000,
}
var protoPgo = [...]int16{
0, 392, 391, 390, 291, 252, 389, 387, 386, 384,
383, 7, 28, 24, 382, 381, 379, 378, 376, 61,
59, 16, 375, 45, 41, 21, 374, 11, 9, 22,
8, 373, 372, 14, 371, 370, 23, 5, 369, 368,
367, 366, 365, 364, 363, 53, 58, 57, 12, 10,
15, 362, 361, 360, 359, 358, 39, 357, 356, 18,
355, 354, 353, 46, 352, 351, 350, 349, 55, 25,
348, 347, 346, 345, 343, 342, 341, 340, 339, 338,
50, 54, 337, 6, 19, 336, 335, 333, 330, 329,
328, 29, 35, 30, 47, 327, 326, 49, 42, 325,
324, 322, 48, 56, 321, 316, 13, 315, 44, 300,
299, 298, 2, 297, 296, 20, 17, 0, 245,
}
var protoR1 = [...]int8{
0, 1, 1, 1, 1, 1, 1, 4, 6, 6,
5, 5, 5, 5, 5, 5, 5, 5, 118, 118,
117, 117, 116, 116, 2, 3, 7, 7, 7, 8,
50, 50, 56, 56, 57, 57, 47, 47, 46, 51,
51, 52, 52, 53, 53, 54, 54, 55, 55, 58,
58, 49, 49, 48, 10, 11, 18, 18, 19, 20,
20, 22, 22, 21, 21, 16, 25, 25, 26, 26,
26, 26, 30, 30, 30, 30, 31, 31, 106, 106,
28, 28, 69, 68, 68, 67, 67, 67, 67, 67,
67, 70, 70, 70, 17, 17, 17, 17, 24, 24,
24, 27, 27, 27, 27, 35, 35, 29, 29, 29,
32, 32, 32, 65, 65, 33, 33, 34, 34, 34,
66, 66, 59, 59, 60, 60, 61, 61, 62, 62,
63, 63, 64, 64, 45, 45, 45, 23, 23, 14,
14, 15, 15, 13, 13, 12, 9, 9, 75, 75,
77, 77, 77, 77, 74, 86, 86, 85, 85, 84,
84, 84, 84, 84, 72, 72, 72, 72, 76, 76,
76, 76, 78, 78, 78, 78, 79, 38, 38, 38,
38, 38, 38, 38, 38, 38, 38, 38, 38, 96,
96, 94, 94, 92, 92, 92, 95, 95, 93, 93,
93, 36, 36, 89, 89, 90, 90, 91, 91, 87,
87, 88, 88, 97, 100, 100, 99, 99, 98, 98,
98, 98, 101, 101, 80, 83, 83, 82, 82, 81,
81, 81, 81, 81, 81, 81, 81, 81, 81, 81,
71, 71, 71, 71, 71, 71, 71, 71, 102, 105,
105, 104, 104, 103, 103, 103, 103, 73, 73, 73,
73, 107, 110, 110, 109, 109, 108, 108, 108, 111,
111, 115, 115, 114, 114, 113, 113, 112, 112, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 39, 39, 39, 39, 39, 39, 39, 39,
39, 39, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 40, 40, 40, 40, 40, 40, 40, 40,
40, 40, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 44, 44, 44, 44, 44, 44,
44, 44, 44, 44, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 41, 41, 41, 41, 41, 41, 41,
41, 41, 41, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 42, 42, 42, 42,
42, 42, 42, 42, 42, 42, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 43,
43, 43, 43, 43, 43, 43, 43, 43, 43, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37, 37, 37, 37, 37, 37, 37, 37,
37, 37, 37,
}
var protoR2 = [...]int8{
0, 1, 1, 1, 2, 2, 0, 2, 2, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 2,
1, 0, 1, 0, 4, 4, 3, 4, 4, 3,
1, 3, 1, 2, 1, 2, 1, 1, 2, 1,
3, 1, 3, 1, 3, 1, 3, 1, 2, 1,
2, 1, 1, 2, 5, 5, 1, 1, 2, 1,
1, 1, 2, 1, 2, 3, 1, 1, 1, 1,
1, 1, 1, 2, 1, 2, 2, 2, 1, 2,
3, 2, 1, 1, 2, 1, 2, 2, 2, 2,
1, 3, 2, 3, 1, 3, 5, 3, 1, 1,
1, 1, 1, 2, 1, 1, 1, 1, 3, 2,
3, 2, 3, 1, 3, 1, 1, 3, 2, 3,
1, 3, 1, 2, 1, 2, 1, 2, 1, 2,
1, 2, 1, 2, 1, 1, 1, 3, 2, 1,
2, 1, 2, 1, 1, 2, 3, 1, 8, 9,
9, 10, 7, 8, 6, 0, 1, 2, 1, 1,
1, 1, 2, 1, 5, 6, 3, 4, 7, 8,
5, 6, 5, 6, 3, 4, 6, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 4,
4, 1, 3, 1, 3, 3, 1, 3, 1, 3,
3, 1, 2, 4, 1, 4, 1, 3, 3, 1,
3, 1, 3, 6, 1, 2, 2, 1, 1, 1,
1, 1, 4, 5, 6, 1, 2, 2, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
6, 7, 5, 6, 4, 5, 3, 4, 6, 0,
1, 2, 1, 1, 1, 2, 1, 6, 7, 5,
6, 6, 1, 2, 2, 1, 1, 1, 1, 6,
9, 4, 3, 1, 2, 2, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1, 1, 1, 1, 1, 1, 1, 1,
1, 1, 1,
}
var protoChk = [...]int16{
-1000, -1, -2, -3, -4, 8, 9, -117, -118, 53,
-4, -4, 52, 52, -6, -5, -7, -8, -11, -80,
-97, -102, -107, 2, 10, 13, 14, 45, 44, 46,
47, 53, -106, 4, -106, -5, -106, 11, 12, -50,
-37, 7, 8, 9, 10, 11, 12, 13, 14, 15,
16, 17, 18, 19, 20, 21, 22, 23, 24, 25,
26, 27, 28, 29, 30, 31, 32, 33, 34, 35,
36, 37, 38, 39, 40, 41, 42, 43, 44, 45,
46, 47, 48, 49, 50, -21, -20, -22, -18, -19,
-37, -16, 66, -37, -37, -59, -56, 60, -47, -57,
-37, -46, -37, 53, 4, 53, -117, -106, -106, -117,
60, 52, -19, -20, 60, -59, 55, 55, 55, -56,
-47, -46, 60, 55, -117, -117, -37, -25, -26, -28,
-106, -30, -31, -37, 55, 6, 65, 5, 67, -83,
-117, -100, -117, -105, -104, -103, -73, -75, 2, -45,
-61, 21, 20, 19, -52, 60, -40, 7, 8, 9,
10, 11, 12, 13, 14, 15, 16, 17, 18, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 38, 39, 40, 41, 42, 43,
44, 45, 46, 47, 48, 49, 50, -110, -117, -117,
-69, 56, -68, -67, -70, 2, -17, -37, 68, 6,
5, 17, 18, 56, -82, -81, -71, -97, -80, -102,
-96, -77, -11, -74, -78, -89, 2, -45, -60, 40,
38, -79, 43, -91, -51, 60, 39, -39, 7, 8,
9, 10, 11, 12, 13, 15, 16, 17, 18, 22,
23, 24, 25, 26, 27, 28, 29, 30, 31, 32,
33, 34, 35, 36, 41, 42, 47, 48, 49, 50,
56, -99, -98, -11, -101, -90, 2, -44, 43, -91,
7, 8, 9, 10, 11, 12, 13, 15, 16, 17,
18, 19, 20, 21, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36, 37,
38, 39, 40, 41, 42, 44, 45, 46, 47, 48,
49, 50, 56, -103, 53, 37, -63, -54, 60, -42,
7, 8, 9, 10, 11, 12, 13, 14, 15, 16,
17, 18, 19, 20, 21, 22, 23, 24, 25, 26,
27, 28, 29, 30, 31, 32, 33, 34, 35, 36,
38, 39, 40, 41, 42, 43, 44, 45, 46, 47,
48, 49, 50, -37, 60, -50, 56, -109, -108, -11,
-111, 2, 48, 56, -68, 61, 53, 61, 53, 54,
54, -35, -29, -34, -28, 63, 68, -56, 2, -117,
-81, 37, -63, -37, -94, -92, 5, -37, -37, -94,
-87, -88, -106, -37, 60, -50, 63, -117, -98, 52,
-95, -93, -36, 5, 65, -117, -37, -37, 60, -50,
52, -37, -117, -108, -37, -24, -27, -29, -32, -106,
-30, 65, -37, 68, -24, -69, 62, -66, 69, 2,
-29, 69, 58, 69, -37, -37, 52, -117, -23, 68,
53, -23, 61, 41, 55, 52, -117, -23, 53, -117,
61, -117, 61, -37, -38, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, -36, 61, 53,
41, 5, 52, 52, -37, 5, -115, 66, -37, -65,
69, 2, -33, -27, -29, 62, 69, 61, 69, -56,
52, 55, -23, 52, -117, -23, 5, -117, -14, 69,
-13, -15, -9, -12, -21, -117, -117, -92, 5, 42,
-86, -85, -84, -10, -72, -76, 2, 14, -62, 37,
-53, 60, -41, 7, 8, 9, 10, 11, 12, 13,
15, 16, 17, 18, 22, 23, 24, 25, 26, 27,
28, 29, 30, 31, 32, 33, 34, 35, 36, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
49, 50, 5, -117, -117, -106, -37, 61, -117, -23,
-93, -117, -36, 42, 5, 5, -116, -23, 53, 50,
49, -64, -55, 60, -49, -58, -43, -48, 7, 8,
9, 10, 11, 12, 13, 14, 15, 16, 17, 18,
19, 20, 21, 22, 23, 24, 25, 26, 27, 28,
29, 30, 31, 32, 33, 34, 35, 36, 37, 38,
39, 40, 41, 42, 43, 44, 45, 46, 47, 48,
50, 69, 61, 69, -29, 69, 5, -83, 55, 5,
-117, -117, -23, 69, -13, -12, 61, 52, 56, -84,
53, -21, -37, -37, 60, -50, -117, -23, -59, -117,
55, -23, -116, -23, -116, -115, -59, 67, -56, -49,
-48, 60, -33, 55, -23, 56, -83, -117, -23, -117,
-25, -117, 52, 52, -116, -23, 52, 55, -23, -37,
-117, 62, -83, 55, -116, -117, 55, 67, -83, 55,
-117, 56, -117, -25, 5, -116, 5, -83, 55, 56,
-83, -114, -117, 56, -83, -117, -116, -116, -23, 55,
-23, 56, -83, 56, 56, -113, -112, -11, 2, -117,
56, -116, -83, 55, 56, -117, -112, -117, 56, -83,
56,
}
var protoDef = [...]int16{
-2, -2, -2, -2, 3, 0, 0, 0, 20, 18,
4, 5, 0, 0, -2, 9, 10, 11, 12, 13,
14, 15, 16, 17, 0, 0, 0, 0, 0, 0,
0, 19, 0, 78, 0, 8, 21, 0, 0, 21,
30, 519, 520, 521, 522, 523, 524, 525, 526, 527,
528, 529, 530, 531, 532, 533, 534, 535, 536, 537,
538, 539, 540, 541, 542, 543, 544, 545, 546, 547,
548, 549, 550, 551, 552, 553, 554, 555, 556, 557,
558, 559, 560, 561, 562, 0, 63, 0, 59, -2,
56, 57, 0, 0, 0, 0, 122, 0, 32, 0,
36, -2, 0, 24, 79, 25, 26, 21, 21, 29,
0, 0, -2, 64, 58, 0, 21, 21, -2, 123,
33, -2, 38, 21, 27, 28, 31, 21, 66, 67,
68, 69, 70, 71, 0, 72, 0, 74, 65, 0,
-2, 0, -2, 0, -2, 252, 253, 254, 256, 0,
0, 134, 135, 136, 126, 0, 41, 312, 313, 314,
315, 316, 317, 318, 319, 320, 321, 322, 323, 324,
325, 326, 327, 328, 329, 330, 331, 332, 333, 334,
335, 336, 337, 338, 339, 340, 341, 342, 343, 344,
345, 346, 347, 348, 349, 350, 351, 0, -2, 55,
0, 81, 82, -2, 85, 90, 0, 94, 0, 73,
75, 76, 77, 21, -2, 228, 229, 230, 231, 232,
233, 234, 235, 236, 237, 238, 239, 0, 0, 0,
0, 0, 0, 204, 124, 0, 305, 39, 279, 280,
281, 282, 283, 284, 285, 286, 287, 288, 289, 290,
291, 292, 293, 294, 295, 296, 297, 298, 299, 300,
301, 302, 303, 304, 306, 307, 308, 309, 310, 311,
21, -2, 217, 218, 219, 220, 221, 0, 0, 206,
352, 353, 354, 355, 356, 357, 358, 359, 360, 361,
362, 363, 364, 365, 366, 367, 368, 369, 370, 371,
372, 373, 374, 375, 376, 377, 378, 379, 380, 381,
382, 383, 384, 385, 386, 387, 388, 389, 390, 391,
392, 393, 21, 251, 255, 0, 0, 130, 0, 45,
433, 434, 435, 436, 437, 438, 439, 440, 441, 442,
443, 444, 445, 446, 447, 448, 449, 450, 451, 452,
453, 454, 455, 456, 457, 458, 459, 460, 461, 462,
463, 464, 465, 466, 467, 468, 469, 470, 471, 472,
473, 474, 475, 0, 0, 127, 21, -2, 265, 266,
267, 268, 0, 80, 84, 86, 87, 88, 89, 0,
0, 92, 105, 106, 107, 0, 0, 0, 0, 224,
227, 0, 0, 21, 0, 191, 193, 0, 21, 0,
21, 21, 209, 211, 0, 125, 0, 213, 216, 0,
0, 196, 198, 201, 0, 248, 0, 0, 0, 131,
0, 42, 261, 264, 0, 93, 98, 99, 100, 101,
102, 0, 104, 0, 91, 0, 109, 0, 118, 0,
120, 95, 0, 97, 0, 21, 0, 246, 21, 0,
21, 21, 0, 0, -2, 0, 174, 21, 21, 207,
0, 208, 0, 40, 0, 177, 178, 179, 180, 181,
182, 183, 184, 185, 186, 187, 188, 21, 0, 21,
0, 202, 0, 0, 46, 23, 0, 0, 103, 0,
111, 0, 113, 115, 116, 108, 117, 0, 119, 0,
0, 21, 0, 0, 244, 21, 21, 247, 0, 138,
139, 0, 143, -2, 147, 189, 190, 192, 194, 195,
0, -2, 158, 159, 160, 161, 163, 0, 0, 0,
128, 0, 43, 394, 395, 396, 397, 398, 399, 400,
401, 402, 403, 404, 405, 406, 407, 408, 409, 410,
411, 412, 413, 414, 415, 416, 417, 418, 419, 420,
421, 422, 423, 424, 425, 426, 427, 428, 429, 430,
431, 432, 21, 175, 203, 210, 212, 0, 222, 21,
197, 205, 199, 200, 0, 23, 259, 23, 22, 0,
0, 0, 132, 0, 47, 0, 51, -2, 476, 477,
478, 479, 480, 481, 482, 483, 484, 485, 486, 487,
488, 489, 490, 491, 492, 493, 494, 495, 496, 497,
498, 499, 500, 501, 502, 503, 504, 505, 506, 507,
508, 509, 510, 511, 512, 513, 514, 515, 516, 517,
518, 110, 0, 112, 121, 96, 0, 0, 21, 21,
245, 242, 21, 137, 140, -2, 145, 0, 21, 157,
162, 0, 23, 0, 0, 129, 172, 21, 0, 223,
21, 0, 257, 23, 260, 21, 0, 272, 133, 48,
-2, 53, 114, 21, 0, 21, 0, 240, 21, 243,
146, 154, 0, 0, 166, 23, 0, 21, 0, 44,
173, 176, 0, 21, 258, 269, 21, 271, 0, 21,
152, 21, 241, 23, 23, 167, 0, 0, 21, 148,
0, 0, -2, 21, 0, 153, 54, 164, 23, 21,
0, 170, 0, 149, 21, -2, 276, 277, 278, 150,
21, 165, 0, 21, 171, 270, 275, 151, 168, 0,
169,
}
var protoTok1 = [...]int8{
1, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 77, 3, 75, 74, 73, 71, 3,
66, 67, 70, 64, 61, 65, 60, 58, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 54, 53,
63, 52, 62, 59, 76, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 68, 57, 69, 72, 3, 79, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 3, 3, 3, 3, 3, 3, 3,
3, 3, 3, 55, 3, 56, 78,
}
var protoTok2 = [...]int8{
2, 3, 4, 5, 6, 7, 8, 9, 10, 11,
12, 13, 14, 15, 16, 17, 18, 19, 20, 21,
22, 23, 24, 25, 26, 27, 28, 29, 30, 31,
32, 33, 34, 35, 36, 37, 38, 39, 40, 41,
42, 43, 44, 45, 46, 47, 48, 49, 50, 51,
}
var protoTok3 = [...]int8{
0,
}
var protoErrorMessages = [...]struct {
state int
token int
msg string
}{}
/* parser for yacc output */
var (
protoDebug = 0
protoErrorVerbose = false
)
type protoLexer interface {
Lex(lval *protoSymType) int
Error(s string)
}
type protoParser interface {
Parse(protoLexer) int
Lookahead() int
}
type protoParserImpl struct {
lval protoSymType
stack [protoInitialStackSize]protoSymType
char int
}
func (p *protoParserImpl) Lookahead() int {
return p.char
}
func protoNewParser() protoParser {
return &protoParserImpl{}
}
const protoFlag = -1000
func protoTokname(c int) string {
if c >= 1 && c-1 < len(protoToknames) {
if protoToknames[c-1] != "" {
return protoToknames[c-1]
}
}
return __yyfmt__.Sprintf("tok-%v", c)
}
func protoStatname(s int) string {
if s >= 0 && s < len(protoStatenames) {
if protoStatenames[s] != "" {
return protoStatenames[s]
}
}
return __yyfmt__.Sprintf("state-%v", s)
}
func protoErrorMessage(state, lookAhead int) string {
const TOKSTART = 4
if !protoErrorVerbose {
return "syntax error"
}
for _, e := range protoErrorMessages {
if e.state == state && e.token == lookAhead {
return "syntax error: " + e.msg
}
}
res := "syntax error: unexpected " + protoTokname(lookAhead)
// To match Bison, suggest at most four expected tokens.
expected := make([]int, 0, 4)
// Look for shiftable tokens.
base := int(protoPact[state])
for tok := TOKSTART; tok-1 < len(protoToknames); tok++ {
if n := base + tok; n >= 0 && n < protoLast && int(protoChk[int(protoAct[n])]) == tok {
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
}
if protoDef[state] == -2 {
i := 0
for protoExca[i] != -1 || int(protoExca[i+1]) != state {
i += 2
}
// Look for tokens that we accept or reduce.
for i += 2; protoExca[i] >= 0; i += 2 {
tok := int(protoExca[i])
if tok < TOKSTART || protoExca[i+1] == 0 {
continue
}
if len(expected) == cap(expected) {
return res
}
expected = append(expected, tok)
}
// If the default action is to accept or reduce, give up.
if protoExca[i+1] != 0 {
return res
}
}
for i, tok := range expected {
if i == 0 {
res += ", expecting "
} else {
res += " or "
}
res += protoTokname(tok)
}
return res
}
func protolex1(lex protoLexer, lval *protoSymType) (char, token int) {
token = 0
char = lex.Lex(lval)
if char <= 0 {
token = int(protoTok1[0])
goto out
}
if char < len(protoTok1) {
token = int(protoTok1[char])
goto out
}
if char >= protoPrivate {
if char < protoPrivate+len(protoTok2) {
token = int(protoTok2[char-protoPrivate])
goto out
}
}
for i := 0; i < len(protoTok3); i += 2 {
token = int(protoTok3[i+0])
if token == char {
token = int(protoTok3[i+1])
goto out
}
}
out:
if token == 0 {
token = int(protoTok2[1]) /* unknown char */
}
if protoDebug >= 3 {
__yyfmt__.Printf("lex %s(%d)\n", protoTokname(token), uint(char))
}
return char, token
}
func protoParse(protolex protoLexer) int {
return protoNewParser().Parse(protolex)
}
func (protorcvr *protoParserImpl) Parse(protolex protoLexer) int {
var proton int
var protoVAL protoSymType
var protoDollar []protoSymType
_ = protoDollar // silence set and not used
protoS := protorcvr.stack[:]
Nerrs := 0 /* number of errors */
Errflag := 0 /* error recovery flag */
protostate := 0
protorcvr.char = -1
prototoken := -1 // protorcvr.char translated into internal numbering
defer func() {
// Make sure we report no lookahead when not parsing.
protostate = -1
protorcvr.char = -1
prototoken = -1
}()
protop := -1
goto protostack
ret0:
return 0
ret1:
return 1
protostack:
/* put a state and value onto the stack */
if protoDebug >= 4 {
__yyfmt__.Printf("char %v in %v\n", protoTokname(prototoken), protoStatname(protostate))
}
protop++
if protop >= len(protoS) {
nyys := make([]protoSymType, len(protoS)*2)
copy(nyys, protoS)
protoS = nyys
}
protoS[protop] = protoVAL
protoS[protop].yys = protostate
protonewstate:
proton = int(protoPact[protostate])
if proton <= protoFlag {
goto protodefault /* simple state */
}
if protorcvr.char < 0 {
protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
}
proton += prototoken
if proton < 0 || proton >= protoLast {
goto protodefault
}
proton = int(protoAct[proton])
if int(protoChk[proton]) == prototoken { /* valid shift */
protorcvr.char = -1
prototoken = -1
protoVAL = protorcvr.lval
protostate = proton
if Errflag > 0 {
Errflag--
}
goto protostack
}
protodefault:
/* default state action */
proton = int(protoDef[protostate])
if proton == -2 {
if protorcvr.char < 0 {
protorcvr.char, prototoken = protolex1(protolex, &protorcvr.lval)
}
/* look through exception table */
xi := 0
for {
if protoExca[xi+0] == -1 && int(protoExca[xi+1]) == protostate {
break
}
xi += 2
}
for xi += 2; ; xi += 2 {
proton = int(protoExca[xi+0])
if proton < 0 || proton == prototoken {
break
}
}
proton = int(protoExca[xi+1])
if proton < 0 {
goto ret0
}
}
if proton == 0 {
/* error ... attempt to resume parsing */
switch Errflag {
case 0: /* brand new error */
protolex.Error(protoErrorMessage(protostate, prototoken))
Nerrs++
if protoDebug >= 1 {
__yyfmt__.Printf("%s", protoStatname(protostate))
__yyfmt__.Printf(" saw %s\n", protoTokname(prototoken))
}
fallthrough
case 1, 2: /* incompletely recovered error ... try again */
Errflag = 3
/* find a state where "error" is a legal shift action */
for protop >= 0 {
proton = int(protoPact[protoS[protop].yys]) + protoErrCode
if proton >= 0 && proton < protoLast {
protostate = int(protoAct[proton]) /* simulate a shift of "error" */
if int(protoChk[protostate]) == protoErrCode {
goto protostack
}
}
/* the current p has no shift on "error", pop stack */
if protoDebug >= 2 {
__yyfmt__.Printf("error recovery pops state %d\n", protoS[protop].yys)
}
protop--
}
/* there is no state on the stack with an error shift ... abort */
goto ret1
case 3: /* no shift yet; clobber input char */
if protoDebug >= 2 {
__yyfmt__.Printf("error recovery discards %s\n", protoTokname(prototoken))
}
if prototoken == protoEofCode {
goto ret1
}
protorcvr.char = -1
prototoken = -1
goto protonewstate /* try again in the same state */
}
}
/* reduction by production proton */
if protoDebug >= 2 {
__yyfmt__.Printf("reduce %v in:\n\t%v\n", proton, protoStatname(protostate))
}
protont := proton
protopt := protop
_ = protopt // guard against "declared and not used"
protop -= int(protoR2[proton])
// protop is now the index of $0. Perform the default action. Iff the
// reduced production is ε, $1 is possibly out of range.
if protop+1 >= len(protoS) {
nyys := make([]protoSymType, len(protoS)*2)
copy(nyys, protoS)
protoS = nyys
}
protoVAL = protoS[protop+1]
/* consult goto table to find next state */
proton = int(protoR1[proton])
protog := int(protoPgo[proton])
protoj := protog + protoS[protop].yys + 1
if protoj >= protoLast {
protostate = int(protoAct[protog])
} else {
protostate = int(protoAct[protoj])
if int(protoChk[protostate]) != -proton {
protostate = int(protoAct[protog])
}
}
// dummy call; replaced with literal code
switch protont {
case 1:
protoDollar = protoS[protopt-1 : protopt+1]
{
lex := protolex.(*protoLex)
protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, nil, lex.eof)
lex.res = protoVAL.file
}
case 2:
protoDollar = protoS[protopt-1 : protopt+1]
{
lex := protolex.(*protoLex)
protoVAL.file = ast.NewFileNodeWithEdition(lex.info, protoDollar[1].ed, nil, lex.eof)
lex.res = protoVAL.file
}
case 3:
protoDollar = protoS[protopt-1 : protopt+1]
{
lex := protolex.(*protoLex)
protoVAL.file = ast.NewFileNode(lex.info, nil, protoDollar[1].fileElements, lex.eof)
lex.res = protoVAL.file
}
case 4:
protoDollar = protoS[protopt-2 : protopt+1]
{
lex := protolex.(*protoLex)
protoVAL.file = ast.NewFileNode(lex.info, protoDollar[1].syn, protoDollar[2].fileElements, lex.eof)
lex.res = protoVAL.file
}
case 5:
protoDollar = protoS[protopt-2 : protopt+1]
{
lex := protolex.(*protoLex)
protoVAL.file = ast.NewFileNodeWithEdition(lex.info, protoDollar[1].ed, protoDollar[2].fileElements, lex.eof)
lex.res = protoVAL.file
}
case 6:
protoDollar = protoS[protopt-0 : protopt+1]
{
lex := protolex.(*protoLex)
protoVAL.file = ast.NewFileNode(lex.info, nil, nil, lex.eof)
lex.res = protoVAL.file
}
case 7:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.fileElements = prependRunes(toFileElement, protoDollar[1].bs, protoDollar[2].fileElements)
}
case 8:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.fileElements = append(protoDollar[1].fileElements, protoDollar[2].fileElements...)
}
case 9:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = protoDollar[1].fileElements
}
case 10:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].imprt.Node, protoDollar[1].imprt.Runes)
}
case 11:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].pkg.Node, protoDollar[1].pkg.Runes)
}
case 12:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes)
}
case 13:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].msg.Node, protoDollar[1].msg.Runes)
}
case 14:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].en.Node, protoDollar[1].en.Runes)
}
case 15:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].extend.Node, protoDollar[1].extend.Runes)
}
case 16:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = toElements[ast.FileElement](toFileElement, protoDollar[1].svc.Node, protoDollar[1].svc.Runes)
}
case 17:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.fileElements = nil
}
case 18:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.bs = []*ast.RuneNode{protoDollar[1].b}
}
case 19:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.bs = append(protoDollar[1].bs, protoDollar[2].b)
}
case 20:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.bs = protoDollar[1].bs
}
case 21:
protoDollar = protoS[protopt-0 : protopt+1]
{
protoVAL.bs = nil
}
case 22:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.b = protoDollar[1].b
}
case 23:
protoDollar = protoS[protopt-0 : protopt+1]
{
protolex.(*protoLex).Error("syntax error: expecting ';'")
protoVAL.b = nil
}
case 24:
protoDollar = protoS[protopt-4 : protopt+1]
{
protoVAL.syn = ast.NewSyntaxNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, toStringValueNode(protoDollar[3].str), protoDollar[4].b)
}
case 25:
protoDollar = protoS[protopt-4 : protopt+1]
{
protoVAL.ed = ast.NewEditionNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, toStringValueNode(protoDollar[3].str), protoDollar[4].b)
}
case 26:
protoDollar = protoS[protopt-3 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs)
protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, nil, toStringValueNode(protoDollar[2].str), semi), extra...)
}
case 27:
protoDollar = protoS[protopt-4 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs)
protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), nil, protoDollar[2].id.ToKeyword(), toStringValueNode(protoDollar[3].str), semi), extra...)
}
case 28:
protoDollar = protoS[protopt-4 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs)
protoVAL.imprt = newNodeWithRunes(ast.NewImportNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), nil, toStringValueNode(protoDollar[3].str), semi), extra...)
}
case 29:
protoDollar = protoS[protopt-3 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs)
protoVAL.pkg = newNodeWithRunes(ast.NewPackageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].cid.toIdentValueNode(nil), semi), extra...)
}
case 30:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}}
}
case 31:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b)
protoVAL.cid = protoDollar[1].cid
}
case 32:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes}
}
case 33:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...)
protoVAL.cid = protoDollar[1].cid
}
case 34:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes}
}
case 35:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...)
protoVAL.cid = protoDollar[1].cid
}
case 36:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id)
}
case 37:
protoDollar = protoS[protopt-1 : protopt+1]
{
protolex.(*protoLex).Error("syntax error: unexpected '.'")
protoVAL.cidPart = protoDollar[1].cidPart
}
case 38:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id, protoDollar[2].b)
}
case 39:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}}
}
case 40:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b)
protoVAL.cid = protoDollar[1].cid
}
case 41:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}}
}
case 42:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b)
protoVAL.cid = protoDollar[1].cid
}
case 43:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}}
}
case 44:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b)
protoVAL.cid = protoDollar[1].cid
}
case 45:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].id}}
}
case 46:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[3].id)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].b)
protoVAL.cid = protoDollar[1].cid
}
case 47:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes}
}
case 48:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...)
protoVAL.cid = protoDollar[1].cid
}
case 49:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cid = &identSlices{idents: []*ast.IdentNode{protoDollar[1].cidPart.Node}, dots: protoDollar[1].cidPart.Runes}
}
case 50:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].cid.idents = append(protoDollar[1].cid.idents, protoDollar[2].cidPart.Node)
protoDollar[1].cid.dots = append(protoDollar[1].cid.dots, protoDollar[2].cidPart.Runes...)
protoVAL.cid = protoDollar[1].cid
}
case 51:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id)
}
case 52:
protoDollar = protoS[protopt-1 : protopt+1]
{
protolex.(*protoLex).Error("syntax error: unexpected '.'")
protoVAL.cidPart = protoDollar[1].cidPart
}
case 53:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.cidPart = newNodeWithRunes(protoDollar[1].id, protoDollar[2].b)
}
case 54:
protoDollar = protoS[protopt-5 : protopt+1]
{
optName := ast.NewOptionNameNode(protoDollar[2].optNms.refs, protoDollar[2].optNms.dots)
protoVAL.optRaw = ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, protoDollar[5].b)
}
case 55:
protoDollar = protoS[protopt-5 : protopt+1]
{
optName := ast.NewOptionNameNode(protoDollar[2].optNms.refs, protoDollar[2].optNms.dots)
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs)
protoVAL.opt = newNodeWithRunes(ast.NewOptionNode(protoDollar[1].id.ToKeyword(), optName, protoDollar[3].b, protoDollar[4].v, semi), extra...)
}
case 56:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.refRaw = ast.NewFieldReferenceNode(protoDollar[1].id)
}
case 57:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.refRaw = protoDollar[1].refRaw
}
case 58:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.ref = newNodeWithRunes(protoDollar[1].refRaw, protoDollar[2].b)
}
case 59:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.ref = newNodeWithRunes(protoDollar[1].refRaw)
}
case 60:
protoDollar = protoS[protopt-1 : protopt+1]
{
protolex.(*protoLex).Error("syntax error: unexpected '.'")
protoVAL.ref = protoDollar[1].ref
}
case 61:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.optNms = &fieldRefSlices{refs: []*ast.FieldReferenceNode{protoDollar[1].ref.Node}, dots: protoDollar[1].ref.Runes}
}
case 62:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].optNms.refs = append(protoDollar[1].optNms.refs, protoDollar[2].ref.Node)
protoDollar[1].optNms.dots = append(protoDollar[1].optNms.dots, protoDollar[2].ref.Runes...)
protoVAL.optNms = protoDollar[1].optNms
}
case 63:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.optNms = &fieldRefSlices{refs: []*ast.FieldReferenceNode{protoDollar[1].ref.Node}, dots: protoDollar[1].ref.Runes}
}
case 64:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].optNms.refs = append(protoDollar[1].optNms.refs, protoDollar[2].ref.Node)
protoDollar[1].optNms.dots = append(protoDollar[1].optNms.dots, protoDollar[2].ref.Runes...)
protoVAL.optNms = protoDollar[1].optNms
}
case 65:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.refRaw = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].tid, protoDollar[3].b)
}
case 68:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.v = toStringValueNode(protoDollar[1].str)
}
case 71:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.v = protoDollar[1].id
}
case 72:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.v = protoDollar[1].f
}
case 73:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].f)
}
case 74:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.v = protoDollar[1].i
}
case 75:
protoDollar = protoS[protopt-2 : protopt+1]
{
if protoDollar[2].i.Val > math.MaxInt64+1 {
// can't represent as int so treat as float literal
protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, protoDollar[2].i)
} else {
protoVAL.v = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i)
}
}
case 76:
protoDollar = protoS[protopt-2 : protopt+1]
{
f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword())
protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f)
}
case 77:
protoDollar = protoS[protopt-2 : protopt+1]
{
f := ast.NewSpecialFloatLiteralNode(protoDollar[2].id.ToKeyword())
protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f)
}
case 78:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.str = []*ast.StringLiteralNode{protoDollar[1].s}
}
case 79:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.str = append(protoDollar[1].str, protoDollar[2].s)
}
case 80:
protoDollar = protoS[protopt-3 : protopt+1]
{
if protoDollar[2].msgLitFlds == nil {
protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b)
} else {
fields, delimiters := protoDollar[2].msgLitFlds.toNodes()
protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delimiters, protoDollar[3].b)
}
}
case 81:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b)
}
case 84:
protoDollar = protoS[protopt-2 : protopt+1]
{
if protoDollar[1].msgLitFlds != nil {
protoDollar[1].msgLitFlds.next = protoDollar[2].msgLitFlds
protoVAL.msgLitFlds = protoDollar[1].msgLitFlds
} else {
protoVAL.msgLitFlds = protoDollar[2].msgLitFlds
}
}
case 85:
protoDollar = protoS[protopt-1 : protopt+1]
{
if protoDollar[1].msgLitFld != nil {
protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld}
} else {
protoVAL.msgLitFlds = nil
}
}
case 86:
protoDollar = protoS[protopt-2 : protopt+1]
{
if protoDollar[1].msgLitFld != nil {
protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld, delimiter: protoDollar[2].b}
} else {
protoVAL.msgLitFlds = nil
}
}
case 87:
protoDollar = protoS[protopt-2 : protopt+1]
{
if protoDollar[1].msgLitFld != nil {
protoVAL.msgLitFlds = &messageFieldList{field: protoDollar[1].msgLitFld, delimiter: protoDollar[2].b}
} else {
protoVAL.msgLitFlds = nil
}
}
case 88:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.msgLitFlds = nil
}
case 89:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.msgLitFlds = nil
}
case 90:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgLitFlds = nil
}
case 91:
protoDollar = protoS[protopt-3 : protopt+1]
{
if protoDollar[1].refRaw != nil && protoDollar[2].b != nil {
protoVAL.msgLitFld = ast.NewMessageFieldNode(protoDollar[1].refRaw, protoDollar[2].b, protoDollar[3].v)
} else {
protoVAL.msgLitFld = nil
}
}
case 92:
protoDollar = protoS[protopt-2 : protopt+1]
{
if protoDollar[1].refRaw != nil && protoDollar[2].v != nil {
protoVAL.msgLitFld = ast.NewMessageFieldNode(protoDollar[1].refRaw, nil, protoDollar[2].v)
} else {
protoVAL.msgLitFld = nil
}
}
case 93:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.msgLitFld = nil
}
case 94:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.refRaw = ast.NewFieldReferenceNode(protoDollar[1].id)
}
case 95:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.refRaw = ast.NewExtensionFieldReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b)
}
case 96:
protoDollar = protoS[protopt-5 : protopt+1]
{
protoVAL.refRaw = ast.NewAnyTypeReferenceNode(protoDollar[1].b, protoDollar[2].cid.toIdentValueNode(nil), protoDollar[3].b, protoDollar[4].cid.toIdentValueNode(nil), protoDollar[5].b)
}
case 97:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.refRaw = nil
}
case 101:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.v = toStringValueNode(protoDollar[1].str)
}
case 103:
protoDollar = protoS[protopt-2 : protopt+1]
{
kw := protoDollar[2].id.ToKeyword()
switch strings.ToLower(kw.Val) {
case "inf", "infinity", "nan":
// these are acceptable
default:
// anything else is not
protolex.(*protoLex).Error(`only identifiers "inf", "infinity", or "nan" may appear after negative sign`)
}
// we'll validate the identifier later
f := ast.NewSpecialFloatLiteralNode(kw)
protoVAL.v = ast.NewSignedFloatLiteralNode(protoDollar[1].b, f)
}
case 104:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.v = protoDollar[1].id
}
case 108:
protoDollar = protoS[protopt-3 : protopt+1]
{
if protoDollar[2].msgLitFlds == nil {
protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b)
} else {
fields, delimiters := protoDollar[2].msgLitFlds.toNodes()
protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, fields, delimiters, protoDollar[3].b)
}
}
case 109:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.v = ast.NewMessageLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b)
}
case 110:
protoDollar = protoS[protopt-3 : protopt+1]
{
if protoDollar[2].sl == nil {
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b)
} else {
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, protoDollar[2].sl.vals, protoDollar[2].sl.commas, protoDollar[3].b)
}
}
case 111:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b)
}
case 112:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b)
}
case 113:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.sl = &valueSlices{vals: []ast.ValueNode{protoDollar[1].v}}
}
case 114:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].sl.vals = append(protoDollar[1].sl.vals, protoDollar[3].v)
protoDollar[1].sl.commas = append(protoDollar[1].sl.commas, protoDollar[2].b)
protoVAL.sl = protoDollar[1].sl
}
case 117:
protoDollar = protoS[protopt-3 : protopt+1]
{
if protoDollar[2].sl == nil {
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b)
} else {
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, protoDollar[2].sl.vals, protoDollar[2].sl.commas, protoDollar[3].b)
}
}
case 118:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[2].b)
}
case 119:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.v = ast.NewArrayLiteralNode(protoDollar[1].b, nil, nil, protoDollar[3].b)
}
case 120:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.sl = &valueSlices{vals: []ast.ValueNode{protoDollar[1].v}}
}
case 121:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].sl.vals = append(protoDollar[1].sl.vals, protoDollar[3].v)
protoDollar[1].sl.commas = append(protoDollar[1].sl.commas, protoDollar[2].b)
protoVAL.sl = protoDollar[1].sl
}
case 122:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil)
}
case 123:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b)
}
case 124:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil)
}
case 125:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b)
}
case 126:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil)
}
case 127:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b)
}
case 128:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil)
}
case 129:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b)
}
case 130:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil)
}
case 131:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b)
}
case 132:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.tid = protoDollar[1].cid.toIdentValueNode(nil)
}
case 133:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.tid = protoDollar[2].cid.toIdentValueNode(protoDollar[1].b)
}
case 137:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, protoDollar[2].opts.options, protoDollar[2].opts.commas, protoDollar[3].b)
}
case 138:
protoDollar = protoS[protopt-2 : protopt+1]
{
protolex.(*protoLex).Error("compact options must have at least one option")
protoVAL.cmpctOpts = ast.NewCompactOptionsNode(protoDollar[1].b, nil, nil, protoDollar[2].b)
}
case 139:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.opts = &compactOptionSlices{options: []*ast.OptionNode{protoDollar[1].opt.Node}, commas: protoDollar[1].opt.Runes}
}
case 140:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].opts.options = append(protoDollar[1].opts.options, protoDollar[2].opt.Node)
protoDollar[1].opts.commas = append(protoDollar[1].opts.commas, protoDollar[2].opt.Runes...)
protoVAL.opts = protoDollar[1].opts
}
case 141:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.opts = &compactOptionSlices{options: []*ast.OptionNode{protoDollar[1].opt.Node}, commas: protoDollar[1].opt.Runes}
}
case 142:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoDollar[1].opts.options = append(protoDollar[1].opts.options, protoDollar[2].opt.Node)
protoDollar[1].opts.commas = append(protoDollar[1].opts.commas, protoDollar[2].opt.Runes...)
protoVAL.opts = protoDollar[1].opts
}
case 143:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.opt = newNodeWithRunes(protoDollar[1].optRaw)
}
case 144:
protoDollar = protoS[protopt-1 : protopt+1]
{
protolex.(*protoLex).Error("syntax error: unexpected ','")
protoVAL.opt = protoDollar[1].opt
}
case 145:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.opt = newNodeWithRunes(protoDollar[1].optRaw, protoDollar[2].b)
}
case 146:
protoDollar = protoS[protopt-3 : protopt+1]
{
optName := ast.NewOptionNameNode(protoDollar[1].optNms.refs, protoDollar[1].optNms.dots)
protoVAL.optRaw = ast.NewCompactOptionNode(optName, protoDollar[2].b, protoDollar[3].v)
}
case 147:
protoDollar = protoS[protopt-1 : protopt+1]
{
optName := ast.NewOptionNameNode(protoDollar[1].optNms.refs, protoDollar[1].optNms.dots)
protolex.(*protoLex).Error("compact option must have a value")
protoVAL.optRaw = ast.NewCompactOptionNode(optName, nil, nil)
}
case 148:
protoDollar = protoS[protopt-8 : protopt+1]
{
protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b)
}
case 149:
protoDollar = protoS[protopt-9 : protopt+1]
{
protoVAL.grp = ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgElements, protoDollar[9].b)
}
case 150:
protoDollar = protoS[protopt-9 : protopt+1]
{
protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b), protoDollar[9].bs...)
}
case 151:
protoDollar = protoS[protopt-10 : protopt+1]
{
protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b, protoDollar[8].msgElements, protoDollar[9].b), protoDollar[10].bs...)
}
case 152:
protoDollar = protoS[protopt-7 : protopt+1]
{
protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, nil, nil, nil, protoDollar[4].b, protoDollar[5].msgElements, protoDollar[6].b), protoDollar[7].bs...)
}
case 153:
protoDollar = protoS[protopt-8 : protopt+1]
{
protoVAL.msgGrp = newNodeWithRunes(ast.NewGroupNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id.ToKeyword(), protoDollar[3].id, nil, nil, protoDollar[4].cmpctOpts, protoDollar[5].b, protoDollar[6].msgElements, protoDollar[7].b), protoDollar[8].bs...)
}
case 154:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.oo = newNodeWithRunes(ast.NewOneofNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].ooElements, protoDollar[5].b), protoDollar[6].bs...)
}
case 155:
protoDollar = protoS[protopt-0 : protopt+1]
{
protoVAL.ooElements = nil
}
case 157:
protoDollar = protoS[protopt-2 : protopt+1]
{
if protoDollar[2].ooElement != nil {
protoVAL.ooElements = append(protoDollar[1].ooElements, protoDollar[2].ooElement)
} else {
protoVAL.ooElements = protoDollar[1].ooElements
}
}
case 158:
protoDollar = protoS[protopt-1 : protopt+1]
{
if protoDollar[1].ooElement != nil {
protoVAL.ooElements = []ast.OneofElement{protoDollar[1].ooElement}
} else {
protoVAL.ooElements = nil
}
}
case 159:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.ooElement = protoDollar[1].optRaw
}
case 160:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.ooElement = protoDollar[1].fld
}
case 161:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.ooElement = protoDollar[1].grp
}
case 162:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.ooElement = nil
}
case 163:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.ooElement = nil
}
case 164:
protoDollar = protoS[protopt-5 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b)
}
case 165:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b)
}
case 166:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, nil, protoDollar[3].b)
}
case 167:
protoDollar = protoS[protopt-4 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, protoDollar[4].b)
}
case 168:
protoDollar = protoS[protopt-7 : protopt+1]
{
protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b, protoDollar[6].msgElements, protoDollar[7].b)
}
case 169:
protoDollar = protoS[protopt-8 : protopt+1]
{
protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b, protoDollar[7].msgElements, protoDollar[8].b)
}
case 170:
protoDollar = protoS[protopt-5 : protopt+1]
{
protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, nil, nil, nil, protoDollar[3].b, protoDollar[4].msgElements, protoDollar[5].b)
}
case 171:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.grp = ast.NewGroupNode(nil, protoDollar[1].id.ToKeyword(), protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, protoDollar[4].b, protoDollar[5].msgElements, protoDollar[6].b)
}
case 172:
protoDollar = protoS[protopt-5 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs)
protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, semi), extra...)
}
case 173:
protoDollar = protoS[protopt-6 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs)
protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, semi), extra...)
}
case 174:
protoDollar = protoS[protopt-3 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs)
protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, nil, nil, nil, semi), extra...)
}
case 175:
protoDollar = protoS[protopt-4 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs)
protoVAL.mapFld = newNodeWithRunes(ast.NewMapFieldNode(protoDollar[1].mapType, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, semi), extra...)
}
case 176:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.mapType = ast.NewMapTypeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].b, protoDollar[3].id, protoDollar[4].b, protoDollar[5].tid, protoDollar[6].b)
}
case 189:
protoDollar = protoS[protopt-4 : protopt+1]
{
// TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict
// between `extensions 1 to 10` and `extensions 1` followed by `to = 10`.
protoVAL.ext = newNodeWithRunes(ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, nil, protoDollar[3].b), protoDollar[4].bs...)
}
case 190:
protoDollar = protoS[protopt-4 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs)
protoVAL.ext = newNodeWithRunes(ast.NewExtensionRangeNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].cmpctOpts, semi), extra...)
}
case 191:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.rngs = &rangeSlices{ranges: []*ast.RangeNode{protoDollar[1].rng}}
}
case 192:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].rngs.ranges = append(protoDollar[1].rngs.ranges, protoDollar[3].rng)
protoDollar[1].rngs.commas = append(protoDollar[1].rngs.commas, protoDollar[2].b)
protoVAL.rngs = protoDollar[1].rngs
}
case 193:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, nil, nil, nil)
}
case 194:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), protoDollar[3].i, nil)
}
case 195:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.rng = ast.NewRangeNode(protoDollar[1].i, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword())
}
case 196:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.rngs = &rangeSlices{ranges: []*ast.RangeNode{protoDollar[1].rng}}
}
case 197:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].rngs.ranges = append(protoDollar[1].rngs.ranges, protoDollar[3].rng)
protoDollar[1].rngs.commas = append(protoDollar[1].rngs.commas, protoDollar[2].b)
protoVAL.rngs = protoDollar[1].rngs
}
case 198:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, nil, nil, nil)
}
case 199:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), protoDollar[3].il, nil)
}
case 200:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.rng = ast.NewRangeNode(protoDollar[1].il, protoDollar[2].id.ToKeyword(), nil, protoDollar[3].id.ToKeyword())
}
case 201:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.il = protoDollar[1].i
}
case 202:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.il = ast.NewNegativeIntLiteralNode(protoDollar[1].b, protoDollar[2].i)
}
case 203:
protoDollar = protoS[protopt-4 : protopt+1]
{
// TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict
// between `reserved 1 to 10` and `reserved 1` followed by `to = 10`.
protoVAL.resvd = newNodeWithRunes(ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].b), protoDollar[4].bs...)
}
case 205:
protoDollar = protoS[protopt-4 : protopt+1]
{
// TODO: Tolerate a missing semicolon here. This currnelty creates a shift/reduce conflict
// between `reserved 1 to 10` and `reserved 1` followed by `to = 10`.
protoVAL.resvd = newNodeWithRunes(ast.NewReservedRangesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].rngs.ranges, protoDollar[2].rngs.commas, protoDollar[3].b), protoDollar[4].bs...)
}
case 207:
protoDollar = protoS[protopt-3 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs)
protoVAL.resvd = newNodeWithRunes(ast.NewReservedNamesNode(protoDollar[1].id.ToKeyword(), protoDollar[2].names.names, protoDollar[2].names.commas, semi), extra...)
}
case 208:
protoDollar = protoS[protopt-3 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs)
protoVAL.resvd = newNodeWithRunes(ast.NewReservedIdentifiersNode(protoDollar[1].id.ToKeyword(), protoDollar[2].names.idents, protoDollar[2].names.commas, semi), extra...)
}
case 209:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.names = &nameSlices{names: []ast.StringValueNode{toStringValueNode(protoDollar[1].str)}}
}
case 210:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].names.names = append(protoDollar[1].names.names, toStringValueNode(protoDollar[3].str))
protoDollar[1].names.commas = append(protoDollar[1].names.commas, protoDollar[2].b)
protoVAL.names = protoDollar[1].names
}
case 211:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.names = &nameSlices{idents: []*ast.IdentNode{protoDollar[1].id}}
}
case 212:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoDollar[1].names.idents = append(protoDollar[1].names.idents, protoDollar[3].id)
protoDollar[1].names.commas = append(protoDollar[1].names.commas, protoDollar[2].b)
protoVAL.names = protoDollar[1].names
}
case 213:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.en = newNodeWithRunes(ast.NewEnumNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].enElements, protoDollar[5].b), protoDollar[6].bs...)
}
case 214:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.enElements = prependRunes(toEnumElement, protoDollar[1].bs, nil)
}
case 215:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.enElements = prependRunes(toEnumElement, protoDollar[1].bs, protoDollar[2].enElements)
}
case 216:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.enElements = append(protoDollar[1].enElements, protoDollar[2].enElements...)
}
case 217:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.enElements = protoDollar[1].enElements
}
case 218:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes)
}
case 219:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].env.Node, protoDollar[1].env.Runes)
}
case 220:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.enElements = toElements[ast.EnumElement](toEnumElement, protoDollar[1].resvd.Node, protoDollar[1].resvd.Runes)
}
case 221:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.enElements = nil
}
case 222:
protoDollar = protoS[protopt-4 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs)
protoVAL.env = newNodeWithRunes(ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, nil, semi), extra...)
}
case 223:
protoDollar = protoS[protopt-5 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs)
protoVAL.env = newNodeWithRunes(ast.NewEnumValueNode(protoDollar[1].id, protoDollar[2].b, protoDollar[3].il, protoDollar[4].cmpctOpts, semi), extra...)
}
case 224:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.msg = newNodeWithRunes(ast.NewMessageNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].msgElements, protoDollar[5].b), protoDollar[6].bs...)
}
case 225:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = prependRunes(toMessageElement, protoDollar[1].bs, nil)
}
case 226:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.msgElements = prependRunes(toMessageElement, protoDollar[1].bs, protoDollar[2].msgElements)
}
case 227:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.msgElements = append(protoDollar[1].msgElements, protoDollar[2].msgElements...)
}
case 228:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = protoDollar[1].msgElements
}
case 229:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msgFld.Node, protoDollar[1].msgFld.Runes)
}
case 230:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].en.Node, protoDollar[1].en.Runes)
}
case 231:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msg.Node, protoDollar[1].msg.Runes)
}
case 232:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].extend.Node, protoDollar[1].extend.Runes)
}
case 233:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].ext.Node, protoDollar[1].ext.Runes)
}
case 234:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].msgGrp.Node, protoDollar[1].msgGrp.Runes)
}
case 235:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes)
}
case 236:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].oo.Node, protoDollar[1].oo.Runes)
}
case 237:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].mapFld.Node, protoDollar[1].mapFld.Runes)
}
case 238:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = toElements[ast.MessageElement](toMessageElement, protoDollar[1].resvd.Node, protoDollar[1].resvd.Runes)
}
case 239:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.msgElements = nil
}
case 240:
protoDollar = protoS[protopt-6 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, semis), extra...)
}
case 241:
protoDollar = protoS[protopt-7 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[7].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, semis), extra...)
}
case 242:
protoDollar = protoS[protopt-5 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, semis), extra...)
}
case 243:
protoDollar = protoS[protopt-6 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, semis), extra...)
}
case 244:
protoDollar = protoS[protopt-4 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, nil, nil, nil, semis), extra...)
}
case 245:
protoDollar = protoS[protopt-5 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[5].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, nil, nil, protoDollar[4].cmpctOpts, semis), extra...)
}
case 246:
protoDollar = protoS[protopt-3 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[3].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, nil, semis), extra...)
}
case 247:
protoDollar = protoS[protopt-4 : protopt+1]
{
semis, extra := protolex.(*protoLex).requireSemicolon(protoDollar[4].bs)
protoVAL.msgFld = newNodeWithRunes(ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, nil, nil, protoDollar[3].cmpctOpts, semis), extra...)
}
case 248:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.extend = newNodeWithRunes(ast.NewExtendNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].b, protoDollar[4].extElements, protoDollar[5].b), protoDollar[6].bs...)
}
case 249:
protoDollar = protoS[protopt-0 : protopt+1]
{
protoVAL.extElements = nil
}
case 251:
protoDollar = protoS[protopt-2 : protopt+1]
{
if protoDollar[2].extElement != nil {
protoVAL.extElements = append(protoDollar[1].extElements, protoDollar[2].extElement)
} else {
protoVAL.extElements = protoDollar[1].extElements
}
}
case 252:
protoDollar = protoS[protopt-1 : protopt+1]
{
if protoDollar[1].extElement != nil {
protoVAL.extElements = []ast.ExtendElement{protoDollar[1].extElement}
} else {
protoVAL.extElements = nil
}
}
case 253:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.extElement = protoDollar[1].fld
}
case 254:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.extElement = protoDollar[1].grp
}
case 255:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.extElement = nil
}
case 256:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.extElement = nil
}
case 257:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, nil, protoDollar[6].b)
}
case 258:
protoDollar = protoS[protopt-7 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(protoDollar[1].id.ToKeyword(), protoDollar[2].tid, protoDollar[3].id, protoDollar[4].b, protoDollar[5].i, protoDollar[6].cmpctOpts, protoDollar[7].b)
}
case 259:
protoDollar = protoS[protopt-5 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, nil, protoDollar[5].b)
}
case 260:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.fld = ast.NewFieldNode(nil, protoDollar[1].tid, protoDollar[2].id, protoDollar[3].b, protoDollar[4].i, protoDollar[5].cmpctOpts, protoDollar[6].b)
}
case 261:
protoDollar = protoS[protopt-6 : protopt+1]
{
protoVAL.svc = newNodeWithRunes(ast.NewServiceNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].b, protoDollar[4].svcElements, protoDollar[5].b), protoDollar[6].bs...)
}
case 262:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.svcElements = prependRunes(toServiceElement, protoDollar[1].bs, nil)
}
case 263:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.svcElements = prependRunes(toServiceElement, protoDollar[1].bs, protoDollar[2].svcElements)
}
case 264:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.svcElements = append(protoDollar[1].svcElements, protoDollar[2].svcElements...)
}
case 265:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.svcElements = protoDollar[1].svcElements
}
case 266:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.svcElements = toElements[ast.ServiceElement](toServiceElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes)
}
case 267:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.svcElements = toElements[ast.ServiceElement](toServiceElement, protoDollar[1].mtd.Node, protoDollar[1].mtd.Runes)
}
case 268:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.svcElements = nil
}
case 269:
protoDollar = protoS[protopt-6 : protopt+1]
{
semi, extra := protolex.(*protoLex).requireSemicolon(protoDollar[6].bs)
protoVAL.mtd = newNodeWithRunes(ast.NewRPCNode(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].mtdMsgType, protoDollar[4].id.ToKeyword(), protoDollar[5].mtdMsgType, semi), extra...)
}
case 270:
protoDollar = protoS[protopt-9 : protopt+1]
{
protoVAL.mtd = newNodeWithRunes(ast.NewRPCNodeWithBody(protoDollar[1].id.ToKeyword(), protoDollar[2].id, protoDollar[3].mtdMsgType, protoDollar[4].id.ToKeyword(), protoDollar[5].mtdMsgType, protoDollar[6].b, protoDollar[7].mtdElements, protoDollar[8].b), protoDollar[9].bs...)
}
case 271:
protoDollar = protoS[protopt-4 : protopt+1]
{
protoVAL.mtdMsgType = ast.NewRPCTypeNode(protoDollar[1].b, protoDollar[2].id.ToKeyword(), protoDollar[3].tid, protoDollar[4].b)
}
case 272:
protoDollar = protoS[protopt-3 : protopt+1]
{
protoVAL.mtdMsgType = ast.NewRPCTypeNode(protoDollar[1].b, nil, protoDollar[2].tid, protoDollar[3].b)
}
case 273:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.mtdElements = prependRunes(toMethodElement, protoDollar[1].bs, nil)
}
case 274:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.mtdElements = prependRunes(toMethodElement, protoDollar[1].bs, protoDollar[2].mtdElements)
}
case 275:
protoDollar = protoS[protopt-2 : protopt+1]
{
protoVAL.mtdElements = append(protoDollar[1].mtdElements, protoDollar[2].mtdElements...)
}
case 276:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.mtdElements = protoDollar[1].mtdElements
}
case 277:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.mtdElements = toElements[ast.RPCElement](toMethodElement, protoDollar[1].opt.Node, protoDollar[1].opt.Runes)
}
case 278:
protoDollar = protoS[protopt-1 : protopt+1]
{
protoVAL.mtdElements = nil
}
}
goto protostack /* stack new state and value */
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"bytes"
"fmt"
"math"
"sort"
"strings"
"unicode"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
"github.com/bufbuild/protocompile/internal/editions"
"github.com/bufbuild/protocompile/reporter"
)
type result struct {
file *ast.FileNode
proto *descriptorpb.FileDescriptorProto
nodes map[proto.Message]ast.Node
ifNoAST *ast.NoSourceNode
}
// ResultWithoutAST returns a parse result that has no AST. All methods for
// looking up AST nodes return a placeholder node that contains only the filename
// in position information.
func ResultWithoutAST(proto *descriptorpb.FileDescriptorProto) Result {
return &result{proto: proto, ifNoAST: ast.NewNoSourceNode(proto.GetName())}
}
// ResultFromAST constructs a descriptor proto from the given AST. The returned
// result includes the descriptor proto and also contains an index that can be
// used to lookup AST node information for elements in the descriptor proto
// hierarchy.
//
// If validate is true, some basic validation is performed, to make sure the
// resulting descriptor proto is valid per protobuf rules and semantics. Only
// some language elements can be validated since some rules and semantics can
// only be checked after all symbols are all resolved, which happens in the
// linking step.
//
// The given handler is used to report any errors or warnings encountered. If any
// errors are reported, this function returns a non-nil error.
func ResultFromAST(file *ast.FileNode, validate bool, handler *reporter.Handler) (Result, error) {
filename := file.Name()
r := &result{file: file, nodes: map[proto.Message]ast.Node{}}
r.createFileDescriptor(filename, file, handler)
if validate {
validateBasic(r, handler)
}
// Now that we're done validating, we can set any missing labels to optional
// (we leave them absent in first pass if label was missing in source, so we
// can do validation on presence of label, but final descriptors are expected
// to always have them present).
fillInMissingLabels(r.proto)
return r, handler.Error()
}
func (r *result) AST() *ast.FileNode {
return r.file
}
func (r *result) FileDescriptorProto() *descriptorpb.FileDescriptorProto {
return r.proto
}
func (r *result) createFileDescriptor(filename string, file *ast.FileNode, handler *reporter.Handler) {
fd := &descriptorpb.FileDescriptorProto{Name: proto.String(filename)}
r.proto = fd
r.putFileNode(fd, file)
var syntax protoreflect.Syntax
switch {
case file.Syntax != nil:
switch file.Syntax.Syntax.AsString() {
case "proto3":
syntax = protoreflect.Proto3
case "proto2":
syntax = protoreflect.Proto2
default:
nodeInfo := file.NodeInfo(file.Syntax.Syntax)
if handler.HandleErrorf(nodeInfo, `syntax value must be "proto2" or "proto3"`) != nil {
return
}
}
// proto2 is the default, so no need to set for that value
if syntax != protoreflect.Proto2 {
fd.Syntax = proto.String(file.Syntax.Syntax.AsString())
}
case file.Edition != nil:
edition := file.Edition.Edition.AsString()
syntax = protoreflect.Editions
fd.Syntax = proto.String("editions")
editionEnum, ok := editions.SupportedEditions[edition]
if !ok {
nodeInfo := file.NodeInfo(file.Edition.Edition)
editionStrs := make([]string, 0, len(editions.SupportedEditions))
for supportedEdition := range editions.SupportedEditions {
editionStrs = append(editionStrs, fmt.Sprintf("%q", supportedEdition))
}
sort.Strings(editionStrs)
if handler.HandleErrorf(nodeInfo, `edition value %q not recognized; should be one of [%s]`, edition, strings.Join(editionStrs, ",")) != nil {
return
}
}
fd.Edition = editionEnum.Enum()
default:
syntax = protoreflect.Proto2
nodeInfo := file.NodeInfo(file)
handler.HandleWarningWithPos(nodeInfo, ErrNoSyntax)
}
for _, decl := range file.Decls {
if handler.ReporterError() != nil {
return
}
switch decl := decl.(type) {
case *ast.EnumNode:
fd.EnumType = append(fd.EnumType, r.asEnumDescriptor(decl, syntax, handler))
case *ast.ExtendNode:
r.addExtensions(decl, &fd.Extension, &fd.MessageType, syntax, handler, 0)
case *ast.ImportNode:
index := len(fd.Dependency)
fd.Dependency = append(fd.Dependency, decl.Name.AsString())
if decl.Public != nil {
fd.PublicDependency = append(fd.PublicDependency, int32(index))
} else if decl.Weak != nil {
fd.WeakDependency = append(fd.WeakDependency, int32(index))
}
case *ast.MessageNode:
fd.MessageType = append(fd.MessageType, r.asMessageDescriptor(decl, syntax, handler, 1))
case *ast.OptionNode:
if fd.Options == nil {
fd.Options = &descriptorpb.FileOptions{}
}
fd.Options.UninterpretedOption = append(fd.Options.UninterpretedOption, r.asUninterpretedOption(decl))
case *ast.ServiceNode:
fd.Service = append(fd.Service, r.asServiceDescriptor(decl))
case *ast.PackageNode:
if fd.Package != nil {
nodeInfo := file.NodeInfo(decl)
if handler.HandleErrorf(nodeInfo, "files should have only one package declaration") != nil {
return
}
}
pkgName := string(decl.Name.AsIdentifier())
if len(pkgName) >= 512 {
nodeInfo := file.NodeInfo(decl.Name)
if handler.HandleErrorf(nodeInfo, "package name (with whitespace removed) must be less than 512 characters long") != nil {
return
}
}
if strings.Count(pkgName, ".") > 100 {
nodeInfo := file.NodeInfo(decl.Name)
if handler.HandleErrorf(nodeInfo, "package name may not contain more than 100 periods") != nil {
return
}
}
fd.Package = proto.String(string(decl.Name.AsIdentifier()))
}
}
}
func (r *result) asUninterpretedOptions(nodes []*ast.OptionNode) []*descriptorpb.UninterpretedOption {
if len(nodes) == 0 {
return nil
}
opts := make([]*descriptorpb.UninterpretedOption, len(nodes))
for i, n := range nodes {
opts[i] = r.asUninterpretedOption(n)
}
return opts
}
func (r *result) asUninterpretedOption(node *ast.OptionNode) *descriptorpb.UninterpretedOption {
opt := &descriptorpb.UninterpretedOption{Name: r.asUninterpretedOptionName(node.Name.Parts)}
r.putOptionNode(opt, node)
switch val := node.Val.Value().(type) {
case bool:
if val {
opt.IdentifierValue = proto.String("true")
} else {
opt.IdentifierValue = proto.String("false")
}
case int64:
opt.NegativeIntValue = proto.Int64(val)
case uint64:
opt.PositiveIntValue = proto.Uint64(val)
case float64:
opt.DoubleValue = proto.Float64(val)
case string:
opt.StringValue = []byte(val)
case ast.Identifier:
opt.IdentifierValue = proto.String(string(val))
default:
// the grammar does not allow arrays here, so the only possible case
// left should be []*ast.MessageFieldNode, which corresponds to an
// *ast.MessageLiteralNode
if n, ok := node.Val.(*ast.MessageLiteralNode); ok {
var buf bytes.Buffer
for i, el := range n.Elements {
flattenNode(r.file, el, &buf)
if len(n.Seps) > i && n.Seps[i] != nil {
buf.WriteRune(' ')
buf.WriteRune(n.Seps[i].Rune)
}
}
aggStr := buf.String()
opt.AggregateValue = proto.String(aggStr)
}
// TODO: else that reports an error or panics??
}
return opt
}
func flattenNode(f *ast.FileNode, n ast.Node, buf *bytes.Buffer) {
if cn, ok := n.(ast.CompositeNode); ok {
for _, ch := range cn.Children() {
flattenNode(f, ch, buf)
}
return
}
if buf.Len() > 0 {
buf.WriteRune(' ')
}
buf.WriteString(f.NodeInfo(n).RawText())
}
func (r *result) asUninterpretedOptionName(parts []*ast.FieldReferenceNode) []*descriptorpb.UninterpretedOption_NamePart {
ret := make([]*descriptorpb.UninterpretedOption_NamePart, len(parts))
for i, part := range parts {
np := &descriptorpb.UninterpretedOption_NamePart{
NamePart: proto.String(string(part.Name.AsIdentifier())),
IsExtension: proto.Bool(part.IsExtension()),
}
r.putOptionNamePartNode(np, part)
ret[i] = np
}
return ret
}
func (r *result) addExtensions(ext *ast.ExtendNode, flds *[]*descriptorpb.FieldDescriptorProto, msgs *[]*descriptorpb.DescriptorProto, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) {
extendee := string(ext.Extendee.AsIdentifier())
count := 0
for _, decl := range ext.Decls {
switch decl := decl.(type) {
case *ast.FieldNode:
count++
// use higher limit since we don't know yet whether extendee is messageset wire format
fd := r.asFieldDescriptor(decl, internal.MaxTag, syntax, handler)
fd.Extendee = proto.String(extendee)
*flds = append(*flds, fd)
case *ast.GroupNode:
count++
// ditto: use higher limit right now
fd, md := r.asGroupDescriptors(decl, syntax, internal.MaxTag, handler, depth+1)
fd.Extendee = proto.String(extendee)
*flds = append(*flds, fd)
*msgs = append(*msgs, md)
}
}
if count == 0 {
nodeInfo := r.file.NodeInfo(ext)
_ = handler.HandleErrorf(nodeInfo, "extend sections must define at least one extension")
}
}
func asLabel(lbl *ast.FieldLabel) *descriptorpb.FieldDescriptorProto_Label {
if !lbl.IsPresent() {
return nil
}
switch {
case lbl.Repeated:
return descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum()
case lbl.Required:
return descriptorpb.FieldDescriptorProto_LABEL_REQUIRED.Enum()
default:
return descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
}
}
func (r *result) asFieldDescriptor(node *ast.FieldNode, maxTag int32, syntax protoreflect.Syntax, handler *reporter.Handler) *descriptorpb.FieldDescriptorProto {
var tag *int32
if node.Tag != nil {
if err := r.checkTag(node.Tag, node.Tag.Val, maxTag); err != nil {
_ = handler.HandleError(err)
}
tag = proto.Int32(int32(node.Tag.Val))
}
fd := newFieldDescriptor(node.Name.Val, string(node.FldType.AsIdentifier()), tag, asLabel(&node.Label))
r.putFieldNode(fd, node)
if opts := node.Options.GetElements(); len(opts) > 0 {
fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)}
}
if syntax == protoreflect.Proto3 && fd.Label != nil && fd.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL {
fd.Proto3Optional = proto.Bool(true)
}
return fd
}
var fieldTypes = map[string]descriptorpb.FieldDescriptorProto_Type{
"double": descriptorpb.FieldDescriptorProto_TYPE_DOUBLE,
"float": descriptorpb.FieldDescriptorProto_TYPE_FLOAT,
"int32": descriptorpb.FieldDescriptorProto_TYPE_INT32,
"int64": descriptorpb.FieldDescriptorProto_TYPE_INT64,
"uint32": descriptorpb.FieldDescriptorProto_TYPE_UINT32,
"uint64": descriptorpb.FieldDescriptorProto_TYPE_UINT64,
"sint32": descriptorpb.FieldDescriptorProto_TYPE_SINT32,
"sint64": descriptorpb.FieldDescriptorProto_TYPE_SINT64,
"fixed32": descriptorpb.FieldDescriptorProto_TYPE_FIXED32,
"fixed64": descriptorpb.FieldDescriptorProto_TYPE_FIXED64,
"sfixed32": descriptorpb.FieldDescriptorProto_TYPE_SFIXED32,
"sfixed64": descriptorpb.FieldDescriptorProto_TYPE_SFIXED64,
"bool": descriptorpb.FieldDescriptorProto_TYPE_BOOL,
"string": descriptorpb.FieldDescriptorProto_TYPE_STRING,
"bytes": descriptorpb.FieldDescriptorProto_TYPE_BYTES,
}
func newFieldDescriptor(name string, fieldType string, tag *int32, lbl *descriptorpb.FieldDescriptorProto_Label) *descriptorpb.FieldDescriptorProto {
fd := &descriptorpb.FieldDescriptorProto{
Name: proto.String(name),
JsonName: proto.String(internal.JSONName(name)),
Number: tag,
Label: lbl,
}
t, ok := fieldTypes[fieldType]
if ok {
fd.Type = t.Enum()
} else {
// NB: we don't have enough info to determine whether this is an enum
// or a message type, so we'll leave Type nil and set it later
// (during linking)
fd.TypeName = proto.String(fieldType)
}
return fd
}
func (r *result) asGroupDescriptors(group *ast.GroupNode, syntax protoreflect.Syntax, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) {
var tag *int32
if group.Tag != nil {
if err := r.checkTag(group.Tag, group.Tag.Val, maxTag); err != nil {
_ = handler.HandleError(err)
}
tag = proto.Int32(int32(group.Tag.Val))
}
if !unicode.IsUpper(rune(group.Name.Val[0])) {
nameNodeInfo := r.file.NodeInfo(group.Name)
_ = handler.HandleErrorf(nameNodeInfo, "group %s should have a name that starts with a capital letter", group.Name.Val)
}
fieldName := strings.ToLower(group.Name.Val)
fd := &descriptorpb.FieldDescriptorProto{
Name: proto.String(fieldName),
JsonName: proto.String(internal.JSONName(fieldName)),
Number: tag,
Label: asLabel(&group.Label),
Type: descriptorpb.FieldDescriptorProto_TYPE_GROUP.Enum(),
TypeName: proto.String(group.Name.Val),
}
r.putFieldNode(fd, group)
if opts := group.Options.GetElements(); len(opts) > 0 {
fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)}
}
md := &descriptorpb.DescriptorProto{Name: proto.String(group.Name.Val)}
groupMsg := group.AsMessage()
r.putMessageNode(md, groupMsg)
// don't bother processing body if we've exceeded depth
if r.checkDepth(depth, groupMsg, handler) {
r.addMessageBody(md, &group.MessageBody, syntax, handler, depth)
}
return fd, md
}
func (r *result) asMapDescriptors(mapField *ast.MapFieldNode, syntax protoreflect.Syntax, maxTag int32, handler *reporter.Handler, depth int) (*descriptorpb.FieldDescriptorProto, *descriptorpb.DescriptorProto) {
var tag *int32
if mapField.Tag != nil {
if err := r.checkTag(mapField.Tag, mapField.Tag.Val, maxTag); err != nil {
_ = handler.HandleError(err)
}
tag = proto.Int32(int32(mapField.Tag.Val))
}
mapEntry := mapField.AsMessage()
r.checkDepth(depth, mapEntry, handler)
var lbl *descriptorpb.FieldDescriptorProto_Label
if syntax == protoreflect.Proto2 {
lbl = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
}
keyFd := newFieldDescriptor("key", mapField.MapType.KeyType.Val, proto.Int32(1), lbl)
r.putFieldNode(keyFd, mapField.KeyField())
valFd := newFieldDescriptor("value", string(mapField.MapType.ValueType.AsIdentifier()), proto.Int32(2), lbl)
r.putFieldNode(valFd, mapField.ValueField())
entryName := internal.InitCap(internal.JSONName(mapField.Name.Val)) + "Entry"
fd := newFieldDescriptor(mapField.Name.Val, entryName, tag, descriptorpb.FieldDescriptorProto_LABEL_REPEATED.Enum())
if opts := mapField.Options.GetElements(); len(opts) > 0 {
fd.Options = &descriptorpb.FieldOptions{UninterpretedOption: r.asUninterpretedOptions(opts)}
}
r.putFieldNode(fd, mapField)
md := &descriptorpb.DescriptorProto{
Name: proto.String(entryName),
Options: &descriptorpb.MessageOptions{MapEntry: proto.Bool(true)},
Field: []*descriptorpb.FieldDescriptorProto{keyFd, valFd},
}
r.putMessageNode(md, mapEntry)
return fd, md
}
func (r *result) asExtensionRanges(node *ast.ExtensionRangeNode, maxTag int32, handler *reporter.Handler) []*descriptorpb.DescriptorProto_ExtensionRange {
opts := r.asUninterpretedOptions(node.Options.GetElements())
ers := make([]*descriptorpb.DescriptorProto_ExtensionRange, len(node.Ranges))
for i, rng := range node.Ranges {
start, end := r.getRangeBounds(rng, 1, maxTag, handler)
er := &descriptorpb.DescriptorProto_ExtensionRange{
Start: proto.Int32(start),
End: proto.Int32(end + 1),
}
if len(opts) > 0 {
er.Options = &descriptorpb.ExtensionRangeOptions{UninterpretedOption: opts}
}
r.putExtensionRangeNode(er, node, rng)
ers[i] = er
}
return ers
}
func (r *result) asEnumValue(ev *ast.EnumValueNode, handler *reporter.Handler) *descriptorpb.EnumValueDescriptorProto {
num, ok := ast.AsInt32(ev.Number, math.MinInt32, math.MaxInt32)
if !ok {
numberNodeInfo := r.file.NodeInfo(ev.Number)
_ = handler.HandleErrorf(numberNodeInfo, "value %d is out of range: should be between %d and %d", ev.Number.Value(), math.MinInt32, math.MaxInt32)
}
evd := &descriptorpb.EnumValueDescriptorProto{Name: proto.String(ev.Name.Val), Number: proto.Int32(num)}
r.putEnumValueNode(evd, ev)
if opts := ev.Options.GetElements(); len(opts) > 0 {
evd.Options = &descriptorpb.EnumValueOptions{UninterpretedOption: r.asUninterpretedOptions(opts)}
}
return evd
}
func (r *result) asMethodDescriptor(node *ast.RPCNode) *descriptorpb.MethodDescriptorProto {
md := &descriptorpb.MethodDescriptorProto{
Name: proto.String(node.Name.Val),
InputType: proto.String(string(node.Input.MessageType.AsIdentifier())),
OutputType: proto.String(string(node.Output.MessageType.AsIdentifier())),
}
r.putMethodNode(md, node)
if node.Input.Stream != nil {
md.ClientStreaming = proto.Bool(true)
}
if node.Output.Stream != nil {
md.ServerStreaming = proto.Bool(true)
}
// protoc always adds a MethodOptions if there are brackets
// We do the same to match protoc as closely as possible
// https://github.com/protocolbuffers/protobuf/blob/0c3f43a6190b77f1f68b7425d1b7e1a8257a8d0c/src/google/protobuf/compiler/parser.cc#L2152
if node.OpenBrace != nil {
md.Options = &descriptorpb.MethodOptions{}
for _, decl := range node.Decls {
if option, ok := decl.(*ast.OptionNode); ok {
md.Options.UninterpretedOption = append(md.Options.UninterpretedOption, r.asUninterpretedOption(option))
}
}
}
return md
}
func (r *result) asEnumDescriptor(en *ast.EnumNode, syntax protoreflect.Syntax, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto {
ed := &descriptorpb.EnumDescriptorProto{Name: proto.String(en.Name.Val)}
r.putEnumNode(ed, en)
rsvdNames := map[string]ast.SourcePos{}
for _, decl := range en.Decls {
switch decl := decl.(type) {
case *ast.OptionNode:
if ed.Options == nil {
ed.Options = &descriptorpb.EnumOptions{}
}
ed.Options.UninterpretedOption = append(ed.Options.UninterpretedOption, r.asUninterpretedOption(decl))
case *ast.EnumValueNode:
ed.Value = append(ed.Value, r.asEnumValue(decl, handler))
case *ast.ReservedNode:
r.addReservedNames(&ed.ReservedName, decl, syntax, handler, rsvdNames)
for _, rng := range decl.Ranges {
ed.ReservedRange = append(ed.ReservedRange, r.asEnumReservedRange(rng, handler))
}
}
}
return ed
}
func (r *result) asEnumReservedRange(rng *ast.RangeNode, handler *reporter.Handler) *descriptorpb.EnumDescriptorProto_EnumReservedRange {
start, end := r.getRangeBounds(rng, math.MinInt32, math.MaxInt32, handler)
rr := &descriptorpb.EnumDescriptorProto_EnumReservedRange{
Start: proto.Int32(start),
End: proto.Int32(end),
}
r.putEnumReservedRangeNode(rr, rng)
return rr
}
func (r *result) asMessageDescriptor(node *ast.MessageNode, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) *descriptorpb.DescriptorProto {
msgd := &descriptorpb.DescriptorProto{Name: proto.String(node.Name.Val)}
r.putMessageNode(msgd, node)
// don't bother processing body if we've exceeded depth
if r.checkDepth(depth, node, handler) {
r.addMessageBody(msgd, &node.MessageBody, syntax, handler, depth)
}
return msgd
}
func (r *result) addReservedNames(names *[]string, node *ast.ReservedNode, syntax protoreflect.Syntax, handler *reporter.Handler, alreadyReserved map[string]ast.SourcePos) {
if syntax == protoreflect.Editions {
if len(node.Names) > 0 {
nameNodeInfo := r.file.NodeInfo(node.Names[0])
_ = handler.HandleErrorf(nameNodeInfo, `must use identifiers, not string literals, to reserved names with editions`)
}
for _, n := range node.Identifiers {
name := string(n.AsIdentifier())
nameNodeInfo := r.file.NodeInfo(n)
if existing, ok := alreadyReserved[name]; ok {
_ = handler.HandleErrorf(nameNodeInfo, "name %q is already reserved at %s", name, existing)
continue
}
alreadyReserved[name] = nameNodeInfo.Start()
*names = append(*names, name)
}
return
}
if len(node.Identifiers) > 0 {
nameNodeInfo := r.file.NodeInfo(node.Identifiers[0])
_ = handler.HandleErrorf(nameNodeInfo, `must use string literals, not identifiers, to reserved names with proto2 and proto3`)
}
for _, n := range node.Names {
name := n.AsString()
nameNodeInfo := r.file.NodeInfo(n)
if existing, ok := alreadyReserved[name]; ok {
_ = handler.HandleErrorf(nameNodeInfo, "name %q is already reserved at %s", name, existing)
continue
}
alreadyReserved[name] = nameNodeInfo.Start()
*names = append(*names, name)
}
}
func (r *result) checkDepth(depth int, node ast.MessageDeclNode, handler *reporter.Handler) bool {
if depth < 32 {
return true
}
n := ast.Node(node)
if grp, ok := n.(*ast.SyntheticGroupMessageNode); ok {
// pinpoint the group keyword if the source is a group
n = grp.Keyword
}
_ = handler.HandleErrorf(r.file.NodeInfo(n), "message nesting depth must be less than 32")
return false
}
func (r *result) addMessageBody(msgd *descriptorpb.DescriptorProto, body *ast.MessageBody, syntax protoreflect.Syntax, handler *reporter.Handler, depth int) {
// first process any options
for _, decl := range body.Decls {
if opt, ok := decl.(*ast.OptionNode); ok {
if msgd.Options == nil {
msgd.Options = &descriptorpb.MessageOptions{}
}
msgd.Options.UninterpretedOption = append(msgd.Options.UninterpretedOption, r.asUninterpretedOption(opt))
}
}
// now that we have options, we can see if this uses messageset wire format, which
// impacts how we validate tag numbers in any fields in the message
maxTag := int32(internal.MaxNormalTag)
messageSetOpt, err := r.isMessageSetWireFormat("message "+msgd.GetName(), msgd, handler)
if err != nil {
return
} else if messageSetOpt != nil {
if syntax == protoreflect.Proto3 {
node := r.OptionNode(messageSetOpt)
nodeInfo := r.file.NodeInfo(node)
_ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format are not allowed with proto3 syntax")
}
maxTag = internal.MaxTag // higher limit for messageset wire format
}
rsvdNames := map[string]ast.SourcePos{}
// now we can process the rest
for _, decl := range body.Decls {
switch decl := decl.(type) {
case *ast.EnumNode:
msgd.EnumType = append(msgd.EnumType, r.asEnumDescriptor(decl, syntax, handler))
case *ast.ExtendNode:
r.addExtensions(decl, &msgd.Extension, &msgd.NestedType, syntax, handler, depth)
case *ast.ExtensionRangeNode:
msgd.ExtensionRange = append(msgd.ExtensionRange, r.asExtensionRanges(decl, maxTag, handler)...)
case *ast.FieldNode:
fd := r.asFieldDescriptor(decl, maxTag, syntax, handler)
msgd.Field = append(msgd.Field, fd)
case *ast.MapFieldNode:
fd, md := r.asMapDescriptors(decl, syntax, maxTag, handler, depth+1)
msgd.Field = append(msgd.Field, fd)
msgd.NestedType = append(msgd.NestedType, md)
case *ast.GroupNode:
fd, md := r.asGroupDescriptors(decl, syntax, maxTag, handler, depth+1)
msgd.Field = append(msgd.Field, fd)
msgd.NestedType = append(msgd.NestedType, md)
case *ast.OneofNode:
oodIndex := len(msgd.OneofDecl)
ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(decl.Name.Val)}
r.putOneofNode(ood, decl)
msgd.OneofDecl = append(msgd.OneofDecl, ood)
ooFields := 0
for _, oodecl := range decl.Decls {
switch oodecl := oodecl.(type) {
case *ast.OptionNode:
if ood.Options == nil {
ood.Options = &descriptorpb.OneofOptions{}
}
ood.Options.UninterpretedOption = append(ood.Options.UninterpretedOption, r.asUninterpretedOption(oodecl))
case *ast.FieldNode:
fd := r.asFieldDescriptor(oodecl, maxTag, syntax, handler)
fd.OneofIndex = proto.Int32(int32(oodIndex))
msgd.Field = append(msgd.Field, fd)
ooFields++
case *ast.GroupNode:
fd, md := r.asGroupDescriptors(oodecl, syntax, maxTag, handler, depth+1)
fd.OneofIndex = proto.Int32(int32(oodIndex))
msgd.Field = append(msgd.Field, fd)
msgd.NestedType = append(msgd.NestedType, md)
ooFields++
}
}
if ooFields == 0 {
declNodeInfo := r.file.NodeInfo(decl)
_ = handler.HandleErrorf(declNodeInfo, "oneof must contain at least one field")
}
case *ast.MessageNode:
msgd.NestedType = append(msgd.NestedType, r.asMessageDescriptor(decl, syntax, handler, depth+1))
case *ast.ReservedNode:
r.addReservedNames(&msgd.ReservedName, decl, syntax, handler, rsvdNames)
for _, rng := range decl.Ranges {
msgd.ReservedRange = append(msgd.ReservedRange, r.asMessageReservedRange(rng, maxTag, handler))
}
}
}
if messageSetOpt != nil {
if len(msgd.Field) > 0 {
node := r.FieldNode(msgd.Field[0])
nodeInfo := r.file.NodeInfo(node)
_ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format cannot contain non-extension fields")
}
if len(msgd.ExtensionRange) == 0 {
node := r.OptionNode(messageSetOpt)
nodeInfo := r.file.NodeInfo(node)
_ = handler.HandleErrorf(nodeInfo, "messages with message-set wire format must contain at least one extension range")
}
}
// process any proto3_optional fields
if syntax == protoreflect.Proto3 {
r.processProto3OptionalFields(msgd)
}
}
func (r *result) isMessageSetWireFormat(scope string, md *descriptorpb.DescriptorProto, handler *reporter.Handler) (*descriptorpb.UninterpretedOption, error) {
uo := md.GetOptions().GetUninterpretedOption()
index, err := internal.FindOption(r, handler.HandleErrorf, scope, uo, "message_set_wire_format")
if err != nil {
return nil, err
}
if index == -1 {
// no such option
return nil, nil
}
opt := uo[index]
switch opt.GetIdentifierValue() {
case "true":
return opt, nil
case "false":
return nil, nil
default:
optNode := r.OptionNode(opt)
optNodeInfo := r.file.NodeInfo(optNode.GetValue())
return nil, handler.HandleErrorf(optNodeInfo, "%s: expecting bool value for message_set_wire_format option", scope)
}
}
func (r *result) asMessageReservedRange(rng *ast.RangeNode, maxTag int32, handler *reporter.Handler) *descriptorpb.DescriptorProto_ReservedRange {
start, end := r.getRangeBounds(rng, 1, maxTag, handler)
rr := &descriptorpb.DescriptorProto_ReservedRange{
Start: proto.Int32(start),
End: proto.Int32(end + 1),
}
r.putMessageReservedRangeNode(rr, rng)
return rr
}
func (r *result) getRangeBounds(rng *ast.RangeNode, minVal, maxVal int32, handler *reporter.Handler) (int32, int32) {
checkOrder := true
start, ok := rng.StartValueAsInt32(minVal, maxVal)
if !ok {
checkOrder = false
startValNodeInfo := r.file.NodeInfo(rng.StartVal)
_ = handler.HandleErrorf(startValNodeInfo, "range start %d is out of range: should be between %d and %d", rng.StartValue(), minVal, maxVal)
}
end, ok := rng.EndValueAsInt32(minVal, maxVal)
if !ok {
checkOrder = false
if rng.EndVal != nil {
endValNodeInfo := r.file.NodeInfo(rng.EndVal)
_ = handler.HandleErrorf(endValNodeInfo, "range end %d is out of range: should be between %d and %d", rng.EndValue(), minVal, maxVal)
}
}
if checkOrder && start > end {
rangeStartNodeInfo := r.file.NodeInfo(rng.RangeStart())
_ = handler.HandleErrorf(rangeStartNodeInfo, "range, %d to %d, is invalid: start must be <= end", start, end)
}
return start, end
}
func (r *result) asServiceDescriptor(svc *ast.ServiceNode) *descriptorpb.ServiceDescriptorProto {
sd := &descriptorpb.ServiceDescriptorProto{Name: proto.String(svc.Name.Val)}
r.putServiceNode(sd, svc)
for _, decl := range svc.Decls {
switch decl := decl.(type) {
case *ast.OptionNode:
if sd.Options == nil {
sd.Options = &descriptorpb.ServiceOptions{}
}
sd.Options.UninterpretedOption = append(sd.Options.UninterpretedOption, r.asUninterpretedOption(decl))
case *ast.RPCNode:
sd.Method = append(sd.Method, r.asMethodDescriptor(decl))
}
}
return sd
}
func (r *result) checkTag(n ast.Node, v uint64, maxTag int32) error {
switch {
case v < 1:
return reporter.Errorf(r.file.NodeInfo(n), "tag number %d must be greater than zero", v)
case v > uint64(maxTag):
return reporter.Errorf(r.file.NodeInfo(n), "tag number %d is higher than max allowed tag number (%d)", v, maxTag)
case v >= internal.SpecialReservedStart && v <= internal.SpecialReservedEnd:
return reporter.Errorf(r.file.NodeInfo(n), "tag number %d is in disallowed reserved range %d-%d", v, internal.SpecialReservedStart, internal.SpecialReservedEnd)
default:
return nil
}
}
// processProto3OptionalFields adds synthetic oneofs to the given message descriptor
// for each proto3 optional field. It also updates the fields to have the correct
// oneof index reference.
func (r *result) processProto3OptionalFields(msgd *descriptorpb.DescriptorProto) {
// add synthetic oneofs to the given message descriptor for each proto3
// optional field, and update each field to have correct oneof index
var allNames map[string]struct{}
for _, fd := range msgd.Field {
if fd.GetProto3Optional() {
// lazy init the set of all names
if allNames == nil {
allNames = map[string]struct{}{}
for _, fd := range msgd.Field {
allNames[fd.GetName()] = struct{}{}
}
for _, od := range msgd.OneofDecl {
allNames[od.GetName()] = struct{}{}
}
// NB: protoc only considers names of other fields and oneofs
// when computing the synthetic oneof name. But that feels like
// a bug, since it means it could generate a name that conflicts
// with some other symbol defined in the message. If it's decided
// that's NOT a bug and is desirable, then we should remove the
// following four loops to mimic protoc's behavior.
for _, fd := range msgd.Extension {
allNames[fd.GetName()] = struct{}{}
}
for _, ed := range msgd.EnumType {
allNames[ed.GetName()] = struct{}{}
for _, evd := range ed.Value {
allNames[evd.GetName()] = struct{}{}
}
}
for _, fd := range msgd.NestedType {
allNames[fd.GetName()] = struct{}{}
}
}
// Compute a name for the synthetic oneof. This uses the same
// algorithm as used in protoc:
// https://github.com/protocolbuffers/protobuf/blob/74ad62759e0a9b5a21094f3fb9bb4ebfaa0d1ab8/src/google/protobuf/compiler/parser.cc#L785-L803
ooName := fd.GetName()
if !strings.HasPrefix(ooName, "_") {
ooName = "_" + ooName
}
for {
_, ok := allNames[ooName]
if !ok {
// found a unique name
allNames[ooName] = struct{}{}
break
}
ooName = "X" + ooName
}
fd.OneofIndex = proto.Int32(int32(len(msgd.OneofDecl)))
ood := &descriptorpb.OneofDescriptorProto{Name: proto.String(ooName)}
msgd.OneofDecl = append(msgd.OneofDecl, ood)
ooident := r.FieldNode(fd).(*ast.FieldNode) //nolint:errcheck
r.putOneofNode(ood, ast.NewSyntheticOneof(ooident))
}
}
}
func (r *result) Node(m proto.Message) ast.Node {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[m]
}
func (r *result) FileNode() ast.FileDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[r.proto].(ast.FileDeclNode) //nolint:errcheck
}
func (r *result) OptionNode(o *descriptorpb.UninterpretedOption) ast.OptionDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[o].(ast.OptionDeclNode) //nolint:errcheck
}
func (r *result) OptionNamePartNode(o *descriptorpb.UninterpretedOption_NamePart) ast.Node {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[o]
}
func (r *result) MessageNode(m *descriptorpb.DescriptorProto) ast.MessageDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[m].(ast.MessageDeclNode) //nolint:errcheck
}
func (r *result) FieldNode(f *descriptorpb.FieldDescriptorProto) ast.FieldDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[f].(ast.FieldDeclNode) //nolint:errcheck
}
func (r *result) OneofNode(o *descriptorpb.OneofDescriptorProto) ast.OneofDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[o].(ast.OneofDeclNode) //nolint:errcheck
}
func (r *result) ExtensionsNode(e *descriptorpb.DescriptorProto_ExtensionRange) ast.NodeWithOptions {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[asExtsNode(e)].(ast.NodeWithOptions) //nolint:errcheck
}
func (r *result) ExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange) ast.RangeDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[e].(ast.RangeDeclNode) //nolint:errcheck
}
func (r *result) MessageReservedRangeNode(rr *descriptorpb.DescriptorProto_ReservedRange) ast.RangeDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[rr].(ast.RangeDeclNode) //nolint:errcheck
}
func (r *result) EnumNode(e *descriptorpb.EnumDescriptorProto) ast.NodeWithOptions {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[e].(ast.NodeWithOptions) //nolint:errcheck
}
func (r *result) EnumValueNode(e *descriptorpb.EnumValueDescriptorProto) ast.EnumValueDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[e].(ast.EnumValueDeclNode) //nolint:errcheck
}
func (r *result) EnumReservedRangeNode(rr *descriptorpb.EnumDescriptorProto_EnumReservedRange) ast.RangeDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[rr].(ast.RangeDeclNode) //nolint:errcheck
}
func (r *result) ServiceNode(s *descriptorpb.ServiceDescriptorProto) ast.NodeWithOptions {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[s].(ast.NodeWithOptions) //nolint:errcheck
}
func (r *result) MethodNode(m *descriptorpb.MethodDescriptorProto) ast.RPCDeclNode {
if r.nodes == nil {
return r.ifNoAST
}
return r.nodes[m].(ast.RPCDeclNode) //nolint:errcheck
}
func (r *result) putFileNode(f *descriptorpb.FileDescriptorProto, n *ast.FileNode) {
r.nodes[f] = n
}
func (r *result) putOptionNode(o *descriptorpb.UninterpretedOption, n *ast.OptionNode) {
r.nodes[o] = n
}
func (r *result) putOptionNamePartNode(o *descriptorpb.UninterpretedOption_NamePart, n *ast.FieldReferenceNode) {
r.nodes[o] = n
}
func (r *result) putMessageNode(m *descriptorpb.DescriptorProto, n ast.MessageDeclNode) {
r.nodes[m] = n
}
func (r *result) putFieldNode(f *descriptorpb.FieldDescriptorProto, n ast.FieldDeclNode) {
r.nodes[f] = n
}
func (r *result) putOneofNode(o *descriptorpb.OneofDescriptorProto, n ast.OneofDeclNode) {
r.nodes[o] = n
}
func (r *result) putExtensionRangeNode(e *descriptorpb.DescriptorProto_ExtensionRange, er *ast.ExtensionRangeNode, n *ast.RangeNode) {
r.nodes[asExtsNode(e)] = er
r.nodes[e] = n
}
func (r *result) putMessageReservedRangeNode(rr *descriptorpb.DescriptorProto_ReservedRange, n *ast.RangeNode) {
r.nodes[rr] = n
}
func (r *result) putEnumNode(e *descriptorpb.EnumDescriptorProto, n *ast.EnumNode) {
r.nodes[e] = n
}
func (r *result) putEnumValueNode(e *descriptorpb.EnumValueDescriptorProto, n *ast.EnumValueNode) {
r.nodes[e] = n
}
func (r *result) putEnumReservedRangeNode(rr *descriptorpb.EnumDescriptorProto_EnumReservedRange, n *ast.RangeNode) {
r.nodes[rr] = n
}
func (r *result) putServiceNode(s *descriptorpb.ServiceDescriptorProto, n *ast.ServiceNode) {
r.nodes[s] = n
}
func (r *result) putMethodNode(m *descriptorpb.MethodDescriptorProto, n *ast.RPCNode) {
r.nodes[m] = n
}
// NB: If we ever add other put*Node methods, to index other kinds of elements in the descriptor
// proto hierarchy, we need to update the index recreation logic in clone.go, too.
func asExtsNode(er *descriptorpb.DescriptorProto_ExtensionRange) proto.Message {
return extsParent{er}
}
// a simple marker type that allows us to have two distinct keys in a map for
// the same ExtensionRange proto -- one for the range itself and another to
// associate with the enclosing/parent AST node.
type extsParent struct {
*descriptorpb.DescriptorProto_ExtensionRange
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package parser
import (
"fmt"
"sort"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
"github.com/bufbuild/protocompile/reporter"
"github.com/bufbuild/protocompile/walk"
)
func validateBasic(res *result, handler *reporter.Handler) {
fd := res.proto
var syntax protoreflect.Syntax
switch fd.GetSyntax() {
case "", "proto2":
syntax = protoreflect.Proto2
case "proto3":
syntax = protoreflect.Proto3
case "editions":
syntax = protoreflect.Editions
// TODO: default: error?
}
if err := validateImports(res, handler); err != nil {
return
}
if err := validateNoFeatures(res, syntax, "file options", fd.Options.GetUninterpretedOption(), handler); err != nil {
return
}
_ = walk.DescriptorProtos(fd,
func(name protoreflect.FullName, d proto.Message) error {
switch d := d.(type) {
case *descriptorpb.DescriptorProto:
if err := validateMessage(res, syntax, name, d, handler); err != nil {
// exit func is not called when enter returns error
return err
}
case *descriptorpb.FieldDescriptorProto:
if err := validateField(res, syntax, name, d, handler); err != nil {
return err
}
case *descriptorpb.OneofDescriptorProto:
if err := validateNoFeatures(res, syntax, fmt.Sprintf("oneof %s", name), d.Options.GetUninterpretedOption(), handler); err != nil {
return err
}
case *descriptorpb.EnumDescriptorProto:
if err := validateEnum(res, syntax, name, d, handler); err != nil {
return err
}
case *descriptorpb.EnumValueDescriptorProto:
if err := validateNoFeatures(res, syntax, fmt.Sprintf("enum value %s", name), d.Options.GetUninterpretedOption(), handler); err != nil {
return err
}
case *descriptorpb.ServiceDescriptorProto:
if err := validateNoFeatures(res, syntax, fmt.Sprintf("service %s", name), d.Options.GetUninterpretedOption(), handler); err != nil {
return err
}
case *descriptorpb.MethodDescriptorProto:
if err := validateNoFeatures(res, syntax, fmt.Sprintf("method %s", name), d.Options.GetUninterpretedOption(), handler); err != nil {
return err
}
}
return nil
})
}
func validateImports(res *result, handler *reporter.Handler) error {
fileNode := res.file
if fileNode == nil {
return nil
}
imports := make(map[string]ast.SourcePos)
for _, decl := range fileNode.Decls {
imp, ok := decl.(*ast.ImportNode)
if !ok {
continue
}
info := fileNode.NodeInfo(decl)
name := imp.Name.AsString()
if prev, ok := imports[name]; ok {
return handler.HandleErrorf(info, "%q was already imported at %v", name, prev)
}
imports[name] = info.Start()
}
return nil
}
func validateNoFeatures(res *result, syntax protoreflect.Syntax, scope string, opts []*descriptorpb.UninterpretedOption, handler *reporter.Handler) error {
if syntax == protoreflect.Editions {
// Editions is allowed to use features
return nil
}
if index, err := internal.FindFirstOption(res, handler.HandleErrorf, scope, opts, "features"); err != nil {
return err
} else if index >= 0 {
optNode := res.OptionNode(opts[index])
optNameNodeInfo := res.file.NodeInfo(optNode.GetName())
if err := handler.HandleErrorf(optNameNodeInfo, "%s: option 'features' may only be used with editions but file uses %s syntax", scope, syntax); err != nil {
return err
}
}
return nil
}
func validateMessage(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, md *descriptorpb.DescriptorProto, handler *reporter.Handler) error {
scope := fmt.Sprintf("message %s", name)
if syntax == protoreflect.Proto3 && len(md.ExtensionRange) > 0 {
n := res.ExtensionRangeNode(md.ExtensionRange[0])
nInfo := res.file.NodeInfo(n)
if err := handler.HandleErrorf(nInfo, "%s: extension ranges are not allowed in proto3", scope); err != nil {
return err
}
}
if index, err := internal.FindOption(res, handler.HandleErrorf, scope, md.Options.GetUninterpretedOption(), "map_entry"); err != nil {
return err
} else if index >= 0 {
optNode := res.OptionNode(md.Options.GetUninterpretedOption()[index])
optNameNodeInfo := res.file.NodeInfo(optNode.GetName())
if err := handler.HandleErrorf(optNameNodeInfo, "%s: map_entry option should not be set explicitly; use map type instead", scope); err != nil {
return err
}
}
if err := validateNoFeatures(res, syntax, scope, md.Options.GetUninterpretedOption(), handler); err != nil {
return err
}
// reserved ranges should not overlap
rsvd := make(tagRanges, len(md.ReservedRange))
for i, r := range md.ReservedRange {
n := res.MessageReservedRangeNode(r)
rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
}
sort.Sort(rsvd)
for i := 1; i < len(rsvd); i++ {
if rsvd[i].start < rsvd[i-1].end {
rangeNodeInfo := res.file.NodeInfo(rsvd[i].node)
if err := handler.HandleErrorf(rangeNodeInfo, "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end-1, rsvd[i].start, rsvd[i].end-1); err != nil {
return err
}
}
}
// extensions ranges should not overlap
exts := make(tagRanges, len(md.ExtensionRange))
for i, r := range md.ExtensionRange {
if err := validateNoFeatures(res, syntax, scope, r.Options.GetUninterpretedOption(), handler); err != nil {
return err
}
n := res.ExtensionRangeNode(r)
exts[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
}
sort.Sort(exts)
for i := 1; i < len(exts); i++ {
if exts[i].start < exts[i-1].end {
rangeNodeInfo := res.file.NodeInfo(exts[i].node)
if err := handler.HandleErrorf(rangeNodeInfo, "%s: extension ranges overlap: %d to %d and %d to %d", scope, exts[i-1].start, exts[i-1].end-1, exts[i].start, exts[i].end-1); err != nil {
return err
}
}
}
// see if any extension range overlaps any reserved range
var i, j int // i indexes rsvd; j indexes exts
for i < len(rsvd) && j < len(exts) {
if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end ||
exts[j].start >= rsvd[i].start && exts[j].start < rsvd[i].end {
var span ast.SourceSpan
if rsvd[i].start >= exts[j].start && rsvd[i].start < exts[j].end {
rangeNodeInfo := res.file.NodeInfo(rsvd[i].node)
span = rangeNodeInfo
} else {
rangeNodeInfo := res.file.NodeInfo(exts[j].node)
span = rangeNodeInfo
}
// ranges overlap
if err := handler.HandleErrorf(span, "%s: extension range %d to %d overlaps reserved range %d to %d", scope, exts[j].start, exts[j].end-1, rsvd[i].start, rsvd[i].end-1); err != nil {
return err
}
}
if rsvd[i].start < exts[j].start {
i++
} else {
j++
}
}
// now, check that fields don't re-use tags and don't try to use extension
// or reserved ranges or reserved names
rsvdNames := map[string]struct{}{}
for _, n := range md.ReservedName {
// validate reserved name while we're here
if !isIdentifier(n) {
node := findMessageReservedNameNode(res.MessageNode(md), n)
nodeInfo := res.file.NodeInfo(node)
if err := handler.HandleErrorf(nodeInfo, "%s: reserved name %q is not a valid identifier", scope, n); err != nil {
return err
}
}
rsvdNames[n] = struct{}{}
}
fieldTags := map[int32]string{}
for _, fld := range md.Field {
fn := res.FieldNode(fld)
if _, ok := rsvdNames[fld.GetName()]; ok {
fieldNameNodeInfo := res.file.NodeInfo(fn.FieldName())
if err := handler.HandleErrorf(fieldNameNodeInfo, "%s: field %s is using a reserved name", scope, fld.GetName()); err != nil {
return err
}
}
if existing := fieldTags[fld.GetNumber()]; existing != "" {
fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag())
if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: fields %s and %s both have the same tag %d", scope, existing, fld.GetName(), fld.GetNumber()); err != nil {
return err
}
}
fieldTags[fld.GetNumber()] = fld.GetName()
// check reserved ranges
r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end > fld.GetNumber() })
if r < len(rsvd) && rsvd[r].start <= fld.GetNumber() {
fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag())
if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: field %s is using tag %d which is in reserved range %d to %d", scope, fld.GetName(), fld.GetNumber(), rsvd[r].start, rsvd[r].end-1); err != nil {
return err
}
}
// and check extension ranges
e := sort.Search(len(exts), func(index int) bool { return exts[index].end > fld.GetNumber() })
if e < len(exts) && exts[e].start <= fld.GetNumber() {
fieldTagNodeInfo := res.file.NodeInfo(fn.FieldTag())
if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: field %s is using tag %d which is in extension range %d to %d", scope, fld.GetName(), fld.GetNumber(), exts[e].start, exts[e].end-1); err != nil {
return err
}
}
}
return nil
}
func isIdentifier(s string) bool {
if len(s) == 0 {
return false
}
for i, r := range s {
if i == 0 && r >= '0' && r <= '9' {
// can't start with number
return false
}
// alphanumeric and underscore ok; everything else bad
switch {
case r >= '0' && r <= '9':
case r >= 'a' && r <= 'z':
case r >= 'A' && r <= 'Z':
case r == '_':
default:
return false
}
}
return true
}
func findMessageReservedNameNode(msgNode ast.MessageDeclNode, name string) ast.Node {
var decls []ast.MessageElement
switch msgNode := msgNode.(type) {
case *ast.MessageNode:
decls = msgNode.Decls
case *ast.SyntheticGroupMessageNode:
decls = msgNode.Decls
default:
// leave decls empty
}
return findReservedNameNode(msgNode, decls, name)
}
func findReservedNameNode[T ast.Node](parent ast.Node, decls []T, name string) ast.Node {
for _, decl := range decls {
// NB: We have to convert to empty interface first, before we can do a type
// assertion because type assertions on type parameters aren't allowed. (The
// compiler cannot yet know whether T is an interface type or not.)
rsvd, ok := any(decl).(*ast.ReservedNode)
if !ok {
continue
}
for _, rsvdName := range rsvd.Names {
if rsvdName.AsString() == name {
return rsvdName
}
}
}
// couldn't find it? Instead of puking, report position of the parent.
return parent
}
func validateEnum(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, ed *descriptorpb.EnumDescriptorProto, handler *reporter.Handler) error {
scope := fmt.Sprintf("enum %s", name)
if len(ed.Value) == 0 {
enNode := res.EnumNode(ed)
enNodeInfo := res.file.NodeInfo(enNode)
if err := handler.HandleErrorf(enNodeInfo, "%s: enums must define at least one value", scope); err != nil {
return err
}
}
if err := validateNoFeatures(res, syntax, scope, ed.Options.GetUninterpretedOption(), handler); err != nil {
return err
}
allowAlias := false
var allowAliasOpt *descriptorpb.UninterpretedOption
if index, err := internal.FindOption(res, handler.HandleErrorf, scope, ed.Options.GetUninterpretedOption(), "allow_alias"); err != nil {
return err
} else if index >= 0 {
allowAliasOpt = ed.Options.UninterpretedOption[index]
valid := false
if allowAliasOpt.IdentifierValue != nil {
if allowAliasOpt.GetIdentifierValue() == "true" {
allowAlias = true
valid = true
} else if allowAliasOpt.GetIdentifierValue() == "false" {
valid = true
}
}
if !valid {
optNode := res.OptionNode(allowAliasOpt)
optNodeInfo := res.file.NodeInfo(optNode.GetValue())
if err := handler.HandleErrorf(optNodeInfo, "%s: expecting bool value for allow_alias option", scope); err != nil {
return err
}
}
}
if syntax == protoreflect.Proto3 && len(ed.Value) > 0 && ed.Value[0].GetNumber() != 0 {
evNode := res.EnumValueNode(ed.Value[0])
evNodeInfo := res.file.NodeInfo(evNode.GetNumber())
if err := handler.HandleErrorf(evNodeInfo, "%s: proto3 requires that first value of enum have numeric value zero", scope); err != nil {
return err
}
}
// check for aliases
vals := map[int32]string{}
hasAlias := false
for _, evd := range ed.Value {
existing := vals[evd.GetNumber()]
if existing != "" {
if allowAlias {
hasAlias = true
} else {
evNode := res.EnumValueNode(evd)
evNodeInfo := res.file.NodeInfo(evNode.GetNumber())
if err := handler.HandleErrorf(evNodeInfo, "%s: values %s and %s both have the same numeric value %d; use allow_alias option if intentional", scope, existing, evd.GetName(), evd.GetNumber()); err != nil {
return err
}
}
}
vals[evd.GetNumber()] = evd.GetName()
}
if allowAlias && !hasAlias {
optNode := res.OptionNode(allowAliasOpt)
optNodeInfo := res.file.NodeInfo(optNode.GetValue())
if err := handler.HandleErrorf(optNodeInfo, "%s: allow_alias is true but no values are aliases", scope); err != nil {
return err
}
}
// reserved ranges should not overlap
rsvd := make(tagRanges, len(ed.ReservedRange))
for i, r := range ed.ReservedRange {
n := res.EnumReservedRangeNode(r)
rsvd[i] = tagRange{start: r.GetStart(), end: r.GetEnd(), node: n}
}
sort.Sort(rsvd)
for i := 1; i < len(rsvd); i++ {
if rsvd[i].start <= rsvd[i-1].end {
rangeNodeInfo := res.file.NodeInfo(rsvd[i].node)
if err := handler.HandleErrorf(rangeNodeInfo, "%s: reserved ranges overlap: %d to %d and %d to %d", scope, rsvd[i-1].start, rsvd[i-1].end, rsvd[i].start, rsvd[i].end); err != nil {
return err
}
}
}
// now, check that fields don't re-use tags and don't try to use extension
// or reserved ranges or reserved names
rsvdNames := map[string]struct{}{}
for _, n := range ed.ReservedName {
// validate reserved name while we're here
if !isIdentifier(n) {
node := findEnumReservedNameNode(res.EnumNode(ed), n)
nodeInfo := res.file.NodeInfo(node)
if err := handler.HandleErrorf(nodeInfo, "%s: reserved name %q is not a valid identifier", scope, n); err != nil {
return err
}
}
rsvdNames[n] = struct{}{}
}
for _, ev := range ed.Value {
evn := res.EnumValueNode(ev)
if _, ok := rsvdNames[ev.GetName()]; ok {
enumValNodeInfo := res.file.NodeInfo(evn.GetName())
if err := handler.HandleErrorf(enumValNodeInfo, "%s: value %s is using a reserved name", scope, ev.GetName()); err != nil {
return err
}
}
// check reserved ranges
r := sort.Search(len(rsvd), func(index int) bool { return rsvd[index].end >= ev.GetNumber() })
if r < len(rsvd) && rsvd[r].start <= ev.GetNumber() {
enumValNodeInfo := res.file.NodeInfo(evn.GetNumber())
if err := handler.HandleErrorf(enumValNodeInfo, "%s: value %s is using number %d which is in reserved range %d to %d", scope, ev.GetName(), ev.GetNumber(), rsvd[r].start, rsvd[r].end); err != nil {
return err
}
}
}
return nil
}
func findEnumReservedNameNode(enumNode ast.Node, name string) ast.Node {
var decls []ast.EnumElement
if enumNode, ok := enumNode.(*ast.EnumNode); ok {
decls = enumNode.Decls
// if not the right type, we leave decls empty
}
return findReservedNameNode(enumNode, decls, name)
}
func validateField(res *result, syntax protoreflect.Syntax, name protoreflect.FullName, fld *descriptorpb.FieldDescriptorProto, handler *reporter.Handler) error {
var scope string
if fld.Extendee != nil {
scope = fmt.Sprintf("extension %s", name)
} else {
scope = fmt.Sprintf("field %s", name)
}
node := res.FieldNode(fld)
if fld.Number == nil {
fieldTagNodeInfo := res.file.NodeInfo(node)
if err := handler.HandleErrorf(fieldTagNodeInfo, "%s: missing field tag number", scope); err != nil {
return err
}
}
if syntax != protoreflect.Proto2 {
if fld.GetType() == descriptorpb.FieldDescriptorProto_TYPE_GROUP {
groupNodeInfo := res.file.NodeInfo(node.GetGroupKeyword())
if err := handler.HandleErrorf(groupNodeInfo, "%s: groups are not allowed in proto3 or editions", scope); err != nil {
return err
}
} else if fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED {
fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel())
if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: label 'required' is not allowed in proto3 or editions", scope); err != nil {
return err
}
}
if syntax == protoreflect.Editions {
if fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL {
fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel())
if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: label 'optional' is not allowed in editions; use option features.field_presence instead", scope); err != nil {
return err
}
}
if index, err := internal.FindOption(res, handler.HandleErrorf, scope, fld.Options.GetUninterpretedOption(), "packed"); err != nil {
return err
} else if index >= 0 {
optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index])
optNameNodeInfo := res.file.NodeInfo(optNode.GetName())
if err := handler.HandleErrorf(optNameNodeInfo, "%s: packed option is not allowed in editions; use option features.repeated_field_encoding instead", scope); err != nil {
return err
}
}
} else if syntax == protoreflect.Proto3 {
if index, err := internal.FindOption(res, handler.HandleErrorf, scope, fld.Options.GetUninterpretedOption(), "default"); err != nil {
return err
} else if index >= 0 {
optNode := res.OptionNode(fld.Options.GetUninterpretedOption()[index])
optNameNodeInfo := res.file.NodeInfo(optNode.GetName())
if err := handler.HandleErrorf(optNameNodeInfo, "%s: default values are not allowed in proto3", scope); err != nil {
return err
}
}
}
} else {
if fld.Label == nil && fld.OneofIndex == nil {
fieldNameNodeInfo := res.file.NodeInfo(node.FieldName())
if err := handler.HandleErrorf(fieldNameNodeInfo, "%s: field has no label; proto2 requires explicit 'optional' label", scope); err != nil {
return err
}
}
if fld.GetExtendee() != "" && fld.Label != nil && fld.GetLabel() == descriptorpb.FieldDescriptorProto_LABEL_REQUIRED {
fieldLabelNodeInfo := res.file.NodeInfo(node.FieldLabel())
if err := handler.HandleErrorf(fieldLabelNodeInfo, "%s: extension fields cannot be 'required'", scope); err != nil {
return err
}
}
}
return validateNoFeatures(res, syntax, scope, fld.Options.GetUninterpretedOption(), handler)
}
type tagRange struct {
start int32
end int32
node ast.RangeDeclNode
}
type tagRanges []tagRange
func (r tagRanges) Len() int {
return len(r)
}
func (r tagRanges) Less(i, j int) bool {
return r[i].start < r[j].start ||
(r[i].start == r[j].start && r[i].end < r[j].end)
}
func (r tagRanges) Swap(i, j int) {
r[i], r[j] = r[j], r[i]
}
func fillInMissingLabels(fd *descriptorpb.FileDescriptorProto) {
for _, md := range fd.MessageType {
fillInMissingLabelsInMsg(md)
}
for _, extd := range fd.Extension {
fillInMissingLabel(extd)
}
}
func fillInMissingLabelsInMsg(md *descriptorpb.DescriptorProto) {
for _, fld := range md.Field {
fillInMissingLabel(fld)
}
for _, nmd := range md.NestedType {
fillInMissingLabelsInMsg(nmd)
}
for _, extd := range md.Extension {
fillInMissingLabel(extd)
}
}
func fillInMissingLabel(fld *descriptorpb.FieldDescriptorProto) {
if fld.Label == nil {
fld.Label = descriptorpb.FieldDescriptorProto_LABEL_OPTIONAL.Enum()
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protoutil
import (
"fmt"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
"google.golang.org/protobuf/types/dynamicpb"
"github.com/bufbuild/protocompile/internal/editions"
)
// GetFeatureDefault gets the default value for the given feature and the given
// edition. The given feature must represent a field of the google.protobuf.FeatureSet
// message and must not be an extension.
//
// If the given field is from a dynamically built descriptor (i.e. it's containing
// message descriptor is different from the linked-in descriptor for
// [*descriptorpb.FeatureSet]), the returned value may be a dynamic value. In such
// cases, the value may not be directly usable using [protoreflect.Message.Set] with
// an instance of [*descriptorpb.FeatureSet] and must instead be used with a
// [*dynamicpb.Message].
//
// To get the default value of a custom feature, use [GetCustomFeatureDefault]
// instead.
func GetFeatureDefault(edition descriptorpb.Edition, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
if feature.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
feature.Name(), feature.ContainingMessage().FullName(), editions.FeatureSetDescriptor.FullName())
}
var msgType protoreflect.MessageType
if feature.ContainingMessage() == editions.FeatureSetDescriptor {
msgType = editions.FeatureSetType
} else {
msgType = dynamicpb.NewMessageType(feature.ContainingMessage())
}
return editions.GetFeatureDefault(edition, msgType, feature)
}
// GetCustomFeatureDefault gets the default value for the given custom feature
// and given edition. A custom feature is a field whose containing message is the
// type of an extension field of google.protobuf.FeatureSet. The given extension
// describes that extension field and message type. The given feature must be a
// field of that extension's message type.
func GetCustomFeatureDefault(edition descriptorpb.Edition, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
extDesc := extension.TypeDescriptor()
if extDesc.ContainingMessage().FullName() != editions.FeatureSetDescriptor.FullName() {
return protoreflect.Value{}, fmt.Errorf("extension %s does not extend %s", extDesc.FullName(), editions.FeatureSetDescriptor.FullName())
}
if extDesc.Message() == nil {
return protoreflect.Value{}, fmt.Errorf("extensions of %s should be messages; %s is instead %s",
editions.FeatureSetDescriptor.FullName(), extDesc.FullName(), extDesc.Kind().String())
}
if feature.IsExtension() {
return protoreflect.Value{}, fmt.Errorf("feature %s is an extension, but feature extension %s may not itself have extensions",
feature.FullName(), extDesc.FullName())
}
if feature.ContainingMessage().FullName() != extDesc.Message().FullName() {
return protoreflect.Value{}, fmt.Errorf("feature %s is a field of %s but should be a field of %s",
feature.Name(), feature.ContainingMessage().FullName(), extDesc.Message().FullName())
}
if feature.ContainingMessage() != extDesc.Message() {
return protoreflect.Value{}, fmt.Errorf("feature %s has a different message descriptor from the given extension type for %s",
feature.Name(), extDesc.Message().FullName())
}
return editions.GetFeatureDefault(edition, extension.Zero().Message().Type(), feature)
}
// ResolveFeature resolves a feature for the given descriptor.
//
// If the given element is in a proto2 or proto3 syntax file, this skips
// resolution and just returns the relevant default (since such files are not
// allowed to override features). If neither the given element nor any of its
// ancestors override the given feature, the relevant default is returned.
//
// This has the same caveat as GetFeatureDefault if the given feature is from a
// dynamically built descriptor.
func ResolveFeature(element protoreflect.Descriptor, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
edition := editions.GetEdition(element)
defaultVal, err := GetFeatureDefault(edition, feature)
if err != nil {
return protoreflect.Value{}, err
}
return resolveFeature(edition, defaultVal, element, feature)
}
// ResolveCustomFeature resolves a custom feature for the given extension and
// field descriptor.
//
// The given extension must be an extension of google.protobuf.FeatureSet that
// represents a non-repeated message value. The given feature is a field in
// that extension's message type.
//
// If the given element is in a proto2 or proto3 syntax file, this skips
// resolution and just returns the relevant default (since such files are not
// allowed to override features). If neither the given element nor any of its
// ancestors override the given feature, the relevant default is returned.
func ResolveCustomFeature(element protoreflect.Descriptor, extension protoreflect.ExtensionType, feature protoreflect.FieldDescriptor) (protoreflect.Value, error) {
edition := editions.GetEdition(element)
defaultVal, err := GetCustomFeatureDefault(edition, extension, feature)
if err != nil {
return protoreflect.Value{}, err
}
return resolveFeature(edition, defaultVal, element, extension.TypeDescriptor(), feature)
}
func resolveFeature(
edition descriptorpb.Edition,
defaultVal protoreflect.Value,
element protoreflect.Descriptor,
fields ...protoreflect.FieldDescriptor,
) (protoreflect.Value, error) {
if edition == descriptorpb.Edition_EDITION_PROTO2 || edition == descriptorpb.Edition_EDITION_PROTO3 {
// these syntax levels can't specify features, so we can short-circuit the search
// through the descriptor hierarchy for feature overrides
return defaultVal, nil
}
val, err := editions.ResolveFeature(element, fields...)
if err != nil {
return protoreflect.Value{}, err
}
if val.IsValid() {
return val, nil
}
return defaultVal, nil
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package protoutil contains useful functions for interacting with descriptors.
// For now these include only functions for efficiently converting descriptors
// produced by the compiler to descriptor protos and functions for resolving
// "features" (a core concept of Protobuf Editions).
//
// Despite the fact that descriptor protos are mutable, calling code should NOT
// mutate any of the protos returned from this package. For efficiency, some
// values returned from this package may reference internal state of a compiler
// result, and mutating the proto could corrupt or invalidate parts of that
// result.
package protoutil
import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protodesc"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
)
// DescriptorProtoWrapper is a protoreflect.Descriptor that wraps an
// underlying descriptor proto. It provides the same interface as
// Descriptor but with one extra operation, to efficiently query for
// the underlying descriptor proto.
//
// Descriptors that implement this will also implement another method
// whose specified return type is the concrete type returned by the
// AsProto method. The name of this method varies by the type of this
// descriptor:
//
// Descriptor Type Other Method Name
// ---------------------+------------------------------------
// FileDescriptor | FileDescriptorProto()
// MessageDescriptor | MessageDescriptorProto()
// FieldDescriptor | FieldDescriptorProto()
// OneofDescriptor | OneofDescriptorProto()
// EnumDescriptor | EnumDescriptorProto()
// EnumValueDescriptor | EnumValueDescriptorProto()
// ServiceDescriptor | ServiceDescriptorProto()
// MethodDescriptor | MethodDescriptorProto()
//
// For example, a DescriptorProtoWrapper that implements FileDescriptor
// returns a *descriptorpb.FileDescriptorProto value from its AsProto
// method and also provides a method with the following signature:
//
// FileDescriptorProto() *descriptorpb.FileDescriptorProto
type DescriptorProtoWrapper interface {
protoreflect.Descriptor
// AsProto returns the underlying descriptor proto. The concrete
// type of the proto message depends on the type of this
// descriptor:
// Descriptor Type Proto Message Type
// ---------------------+------------------------------------
// FileDescriptor | *descriptorpb.FileDescriptorProto
// MessageDescriptor | *descriptorpb.DescriptorProto
// FieldDescriptor | *descriptorpb.FieldDescriptorProto
// OneofDescriptor | *descriptorpb.OneofDescriptorProto
// EnumDescriptor | *descriptorpb.EnumDescriptorProto
// EnumValueDescriptor | *descriptorpb.EnumValueDescriptorProto
// ServiceDescriptor | *descriptorpb.ServiceDescriptorProto
// MethodDescriptor | *descriptorpb.MethodDescriptorProto
AsProto() proto.Message
}
// ProtoFromDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromDescriptor(d protoreflect.Descriptor) proto.Message {
switch d := d.(type) {
case protoreflect.FileDescriptor:
return ProtoFromFileDescriptor(d)
case protoreflect.MessageDescriptor:
return ProtoFromMessageDescriptor(d)
case protoreflect.FieldDescriptor:
return ProtoFromFieldDescriptor(d)
case protoreflect.OneofDescriptor:
return ProtoFromOneofDescriptor(d)
case protoreflect.EnumDescriptor:
return ProtoFromEnumDescriptor(d)
case protoreflect.EnumValueDescriptor:
return ProtoFromEnumValueDescriptor(d)
case protoreflect.ServiceDescriptor:
return ProtoFromServiceDescriptor(d)
case protoreflect.MethodDescriptor:
return ProtoFromMethodDescriptor(d)
default:
// WTF??
if res, ok := d.(DescriptorProtoWrapper); ok {
return res.AsProto()
}
return nil
}
}
// ProtoFromFileDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For file descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. File descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromFileDescriptor(d protoreflect.FileDescriptor) *descriptorpb.FileDescriptorProto {
if imp, ok := d.(protoreflect.FileImport); ok {
d = imp.FileDescriptor
}
type canProto interface {
FileDescriptorProto() *descriptorpb.FileDescriptorProto
}
if res, ok := d.(canProto); ok {
return res.FileDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if fd, ok := res.AsProto().(*descriptorpb.FileDescriptorProto); ok {
return fd
}
}
return protodesc.ToFileDescriptorProto(d)
}
// ProtoFromMessageDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For message descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Message descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromMessageDescriptor(d protoreflect.MessageDescriptor) *descriptorpb.DescriptorProto {
type canProto interface {
MessageDescriptorProto() *descriptorpb.DescriptorProto
}
if res, ok := d.(canProto); ok {
return res.MessageDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if md, ok := res.AsProto().(*descriptorpb.DescriptorProto); ok {
return md
}
}
return protodesc.ToDescriptorProto(d)
}
// ProtoFromFieldDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For field descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Field descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromFieldDescriptor(d protoreflect.FieldDescriptor) *descriptorpb.FieldDescriptorProto {
type canProto interface {
FieldDescriptorProto() *descriptorpb.FieldDescriptorProto
}
if res, ok := d.(canProto); ok {
return res.FieldDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if fd, ok := res.AsProto().(*descriptorpb.FieldDescriptorProto); ok {
return fd
}
}
return protodesc.ToFieldDescriptorProto(d)
}
// ProtoFromOneofDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For oneof descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Oneof descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromOneofDescriptor(d protoreflect.OneofDescriptor) *descriptorpb.OneofDescriptorProto {
type canProto interface {
OneofDescriptorProto() *descriptorpb.OneofDescriptorProto
}
if res, ok := d.(canProto); ok {
return res.OneofDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if ood, ok := res.AsProto().(*descriptorpb.OneofDescriptorProto); ok {
return ood
}
}
return protodesc.ToOneofDescriptorProto(d)
}
// ProtoFromEnumDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For enum descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Enum descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromEnumDescriptor(d protoreflect.EnumDescriptor) *descriptorpb.EnumDescriptorProto {
type canProto interface {
EnumDescriptorProto() *descriptorpb.EnumDescriptorProto
}
if res, ok := d.(canProto); ok {
return res.EnumDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if ed, ok := res.AsProto().(*descriptorpb.EnumDescriptorProto); ok {
return ed
}
}
return protodesc.ToEnumDescriptorProto(d)
}
// ProtoFromEnumValueDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For enum value descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Enum value descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromEnumValueDescriptor(d protoreflect.EnumValueDescriptor) *descriptorpb.EnumValueDescriptorProto {
type canProto interface {
EnumValueDescriptorProto() *descriptorpb.EnumValueDescriptorProto
}
if res, ok := d.(canProto); ok {
return res.EnumValueDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if ed, ok := res.AsProto().(*descriptorpb.EnumValueDescriptorProto); ok {
return ed
}
}
return protodesc.ToEnumValueDescriptorProto(d)
}
// ProtoFromServiceDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For service descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Service descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromServiceDescriptor(d protoreflect.ServiceDescriptor) *descriptorpb.ServiceDescriptorProto {
type canProto interface {
ServiceDescriptorProto() *descriptorpb.ServiceDescriptorProto
}
if res, ok := d.(canProto); ok {
return res.ServiceDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if sd, ok := res.AsProto().(*descriptorpb.ServiceDescriptorProto); ok {
return sd
}
}
return protodesc.ToServiceDescriptorProto(d)
}
// ProtoFromMethodDescriptor extracts a descriptor proto from the given "rich"
// descriptor. For method descriptors generated by the compiler, this is an
// inexpensive and non-lossy operation. Method descriptors from other sources
// however may be expensive (to re-create a proto) and even lossy.
func ProtoFromMethodDescriptor(d protoreflect.MethodDescriptor) *descriptorpb.MethodDescriptorProto {
type canProto interface {
MethodDescriptorProto() *descriptorpb.MethodDescriptorProto
}
if res, ok := d.(canProto); ok {
return res.MethodDescriptorProto()
}
if res, ok := d.(DescriptorProtoWrapper); ok {
if md, ok := res.AsProto().(*descriptorpb.MethodDescriptorProto); ok {
return md
}
}
return protodesc.ToMethodDescriptorProto(d)
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package reporter
import (
"errors"
"fmt"
"github.com/bufbuild/protocompile/ast"
)
// ErrInvalidSource is a sentinel error that is returned by compilation and
// stand-alone compilation steps (such as parsing, linking) when one or more
// errors is reported but the configured ErrorReporter always returns nil.
var ErrInvalidSource = errors.New("parse failed: invalid proto source")
// ErrorWithPos is an error about a proto source file that adds information
// about the location in the file that caused the error.
type ErrorWithPos interface {
error
ast.SourceSpan
// GetPosition returns the start source position that caused the underlying error.
GetPosition() ast.SourcePos
// Unwrap returns the underlying error.
Unwrap() error
}
// Error creates a new ErrorWithPos from the given error and source position.
func Error(span ast.SourceSpan, err error) ErrorWithPos {
var ewp ErrorWithPos
if errors.As(err, &ewp) {
// replace existing position with given one
return &errorWithSpan{SourceSpan: span, underlying: ewp.Unwrap()}
}
return &errorWithSpan{SourceSpan: span, underlying: err}
}
// Errorf creates a new ErrorWithPos whose underlying error is created using the
// given message format and arguments (via fmt.Errorf).
func Errorf(span ast.SourceSpan, format string, args ...any) ErrorWithPos {
return Error(span, fmt.Errorf(format, args...))
}
type errorWithSpan struct {
ast.SourceSpan
underlying error
}
func (e *errorWithSpan) Error() string {
sourcePos := e.GetPosition()
return fmt.Sprintf("%s: %v", sourcePos, e.underlying)
}
func (e *errorWithSpan) GetPosition() ast.SourcePos {
return e.Start()
}
func (e *errorWithSpan) Unwrap() error {
return e.underlying
}
var _ ErrorWithPos = (*errorWithSpan)(nil)
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package reporter contains the types used for reporting errors from
// protocompile operations. It contains error types as well as interfaces
// for reporting and handling errors and warnings.
package reporter
import (
"sync"
"github.com/bufbuild/protocompile/ast"
)
// ErrorReporter is responsible for reporting the given error. If the reporter
// returns a non-nil error, parsing/linking will abort with that error. If the
// reporter returns nil, parsing will continue, allowing the parser to try to
// report as many syntax and/or link errors as it can find.
type ErrorReporter func(err ErrorWithPos) error
// WarningReporter is responsible for reporting the given warning. This is used
// for indicating non-error messages to the calling program for things that do
// not cause the parse to fail but are considered bad practice. Though they are
// just warnings, the details are supplied to the reporter via an error type.
type WarningReporter func(ErrorWithPos)
// Reporter is a type that handles reporting both errors and warnings.
// A reporter does not need to be thread-safe. Safe concurrent access is
// managed by a Handler.
type Reporter interface {
// Error is called when the given error is encountered and needs to be
// reported to the calling program. This signature matches ErrorReporter
// because it has the same semantics. If this function returns non-nil
// then the operation will abort immediately with the given error. But
// if it returns nil, the operation will continue, reporting more errors
// as they are encountered. If the reporter never returns non-nil then
// the operation will eventually fail with ErrInvalidSource.
Error(ErrorWithPos) error
// Warning is called when the given warnings is encountered and needs to be
// reported to the calling program. Despite the argument being an error
// type, a warning will never cause the operation to abort or fail (unless
// the reporter's implementation of this method panics).
Warning(ErrorWithPos)
}
// NewReporter creates a new reporter that invokes the given functions on error
// or warning.
func NewReporter(errs ErrorReporter, warnings WarningReporter) Reporter {
return reporterFuncs{errs: errs, warnings: warnings}
}
type reporterFuncs struct {
errs ErrorReporter
warnings WarningReporter
}
func (r reporterFuncs) Error(err ErrorWithPos) error {
if r.errs == nil {
return err
}
return r.errs(err)
}
func (r reporterFuncs) Warning(err ErrorWithPos) {
if r.warnings != nil {
r.warnings(err)
}
}
// Handler is used by protocompile operations for handling errors and warnings.
// This type is thread-safe. It uses a mutex to serialize calls to its reporter
// so that reporter instances do not have to be thread-safe (unless re-used
// across multiple handlers).
type Handler struct {
parent *Handler
mu sync.Mutex
reporter Reporter
errsReported bool
err error
}
// NewHandler creates a new Handler that reports errors and warnings using the
// given reporter.
func NewHandler(rep Reporter) *Handler {
if rep == nil {
rep = NewReporter(nil, nil)
}
return &Handler{reporter: rep}
}
// SubHandler returns a "child" of h. Use of a child handler is the same as use
// of the parent, except that the Error() and ReporterError() functions only
// report non-nil for errors that were reported using the child handler. So
// errors reported directly to the parent or to a different child handler won't
// be returned. This is useful for making concurrent access to the handler more
// deterministic: if a child handler is only used from one goroutine, its view
// of reported errors is consistent and unimpacted by concurrent operations.
func (h *Handler) SubHandler() *Handler {
return &Handler{parent: h}
}
// HandleError handles the given error. If the given err is an ErrorWithPos, it
// is reported, and this function returns the error returned by the reporter. If
// the given err is NOT an ErrorWithPos, the current operation will abort
// immediately.
//
// If the handler has already aborted (by returning a non-nil error from a prior
// call to HandleError or HandleErrorf), that same error is returned and the
// given error is not reported.
func (h *Handler) HandleError(err error) error {
if h.parent != nil {
_, isErrWithPos := err.(ErrorWithPos)
err = h.parent.HandleError(err)
// update child state
h.mu.Lock()
defer h.mu.Unlock()
if isErrWithPos {
h.errsReported = true
}
h.err = err
return err
}
h.mu.Lock()
defer h.mu.Unlock()
if h.err != nil {
return h.err
}
if ewp, ok := err.(ErrorWithPos); ok {
h.errsReported = true
err = h.reporter.Error(ewp)
}
h.err = err
return err
}
// HandleErrorWithPos handles an error with the given source position.
//
// If the handler has already aborted (by returning a non-nil error from a prior
// call to HandleError or HandleErrorf), that same error is returned and the
// given error is not reported.
func (h *Handler) HandleErrorWithPos(span ast.SourceSpan, err error) error {
return h.HandleError(Error(span, err))
}
// HandleErrorf handles an error with the given source position, creating the
// error using the given message format and arguments.
//
// If the handler has already aborted (by returning a non-nil error from a call
// to HandleError or HandleErrorf), that same error is returned and the given
// error is not reported.
func (h *Handler) HandleErrorf(span ast.SourceSpan, format string, args ...any) error {
return h.HandleError(Errorf(span, format, args...))
}
// HandleWarning handles the given warning. This will delegate to the handler's
// configured reporter.
func (h *Handler) HandleWarning(err ErrorWithPos) {
if h.parent != nil {
h.parent.HandleWarning(err)
return
}
// even though we aren't touching mutable fields, we acquire lock anyway so
// that underlying reporter does not have to be thread-safe
h.mu.Lock()
defer h.mu.Unlock()
h.reporter.Warning(err)
}
// HandleWarningWithPos handles a warning with the given source position. This will
// delegate to the handler's configured reporter.
func (h *Handler) HandleWarningWithPos(span ast.SourceSpan, err error) {
h.HandleWarning(Error(span, err))
}
// HandleWarningf handles a warning with the given source position, creating the
// actual error value using the given message format and arguments.
func (h *Handler) HandleWarningf(span ast.SourceSpan, format string, args ...any) {
h.HandleWarning(Errorf(span, format, args...))
}
// Error returns the handler result. If any errors have been reported then this
// returns a non-nil error. If the reporter never returned a non-nil error then
// ErrInvalidSource is returned. Otherwise, this returns the error returned by
// the handler's reporter (the same value returned by ReporterError).
func (h *Handler) Error() error {
h.mu.Lock()
defer h.mu.Unlock()
if h.errsReported && h.err == nil {
return ErrInvalidSource
}
return h.err
}
// ReporterError returns the error returned by the handler's reporter. If
// the reporter has either not been invoked (no errors handled) or has not
// returned any non-nil value, then this returns nil.
func (h *Handler) ReporterError() error {
h.mu.Lock()
defer h.mu.Unlock()
return h.err
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocompile
import (
"errors"
"io"
"io/fs"
"os"
"path/filepath"
"strings"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/parser"
)
// Resolver is used by the compiler to resolve a proto source file name
// into some unit that is usable by the compiler. The result could be source
// for a proto file or it could be an already-parsed AST or descriptor.
//
// Resolver implementations must be thread-safe as a single compilation
// operation could invoke FindFileByPath from multiple goroutines.
type Resolver interface {
// FindFileByPath searches for information for the given file path. If no
// result is available, it should return a non-nil error, such as
// protoregistry.NotFound.
FindFileByPath(path string) (SearchResult, error)
}
// SearchResult represents information about a proto source file. Only one of
// the various fields must be set, based on what is available for a file. If
// multiple fields are set, the compiler prefers them in opposite order listed:
// so it uses a descriptor if present and only falls back to source if nothing
// else is available.
type SearchResult struct {
// Represents source code for the file. This should be nil if source code
// is not available. If no field below is set, then the compiler will parse
// the source code into an AST.
Source io.Reader
// Represents the abstract syntax tree for the file. If no field below is
// set, then the compiler will convert the AST into a descriptor proto.
AST *ast.FileNode
// A descriptor proto that represents the file. If the field below is not
// set, then the compiler will link this proto with its dependencies to
// produce a linked descriptor.
Proto *descriptorpb.FileDescriptorProto
// A parse result for the file. This packages both an AST and a descriptor
// proto in one. When a parser result is available, it is more efficient
// than using an AST search result, since the descriptor proto need not be
// re-created. And it provides better error messages than a descriptor proto
// search result, since the AST has greater fidelity with regard to source
// positions (even if the descriptor proto includes source code info).
ParseResult parser.Result
// A fully linked descriptor that represents the file. If this field is set,
// then the compiler has little or no additional work to do for this file as
// it is already compiled. If this value implements linker.File, there is no
// additional work. Otherwise, the additional work is to compute an index of
// symbols in the file, for efficient lookup.
Desc protoreflect.FileDescriptor
}
// ResolverFunc is a simple function type that implements Resolver.
type ResolverFunc func(string) (SearchResult, error)
var _ Resolver = ResolverFunc(nil)
func (f ResolverFunc) FindFileByPath(path string) (SearchResult, error) {
return f(path)
}
// CompositeResolver is a slice of resolvers, which are consulted in order
// until one can supply a result. If none of the constituent resolvers can
// supply a result, the error returned by the first resolver is returned. If
// the slice of resolvers is empty, all operations return
// protoregistry.NotFound.
type CompositeResolver []Resolver
var _ Resolver = CompositeResolver(nil)
func (f CompositeResolver) FindFileByPath(path string) (SearchResult, error) {
if len(f) == 0 {
return SearchResult{}, protoregistry.NotFound
}
var firstErr error
for _, res := range f {
r, err := res.FindFileByPath(path)
if err == nil {
return r, nil
}
if firstErr == nil {
firstErr = err
}
}
return SearchResult{}, firstErr
}
// SourceResolver can resolve file names by returning source code. It uses
// an optional list of import paths to search. By default, it searches the
// file system.
type SourceResolver struct {
// Optional list of import paths. If present and not empty, then all
// file paths to find are assumed to be relative to one of these paths.
// If nil or empty, all file paths to find are assumed to be relative to
// the current working directory.
ImportPaths []string
// Optional function for returning a file's contents. If nil, then
// os.Open is used to open files on the file system.
//
// This function must be thread-safe as a single compilation operation
// could result in concurrent invocations of this function from
// multiple goroutines.
Accessor func(path string) (io.ReadCloser, error)
}
var _ Resolver = (*SourceResolver)(nil)
func (r *SourceResolver) FindFileByPath(path string) (SearchResult, error) {
if len(r.ImportPaths) == 0 {
reader, err := r.accessFile(path)
if err != nil {
return SearchResult{}, err
}
return SearchResult{Source: reader}, nil
}
var e error
for _, importPath := range r.ImportPaths {
reader, err := r.accessFile(filepath.Join(importPath, path))
if err != nil {
if errors.Is(err, fs.ErrNotExist) {
e = err
continue
}
return SearchResult{}, err
}
return SearchResult{Source: reader}, nil
}
return SearchResult{}, e
}
func (r *SourceResolver) accessFile(path string) (io.ReadCloser, error) {
if r.Accessor != nil {
return r.Accessor(path)
}
return os.Open(path)
}
// SourceAccessorFromMap returns a function that can be used as the Accessor
// field of a SourceResolver that uses the given map to load source. The map
// keys are file names and the values are the corresponding file contents.
//
// The given map is used directly and not copied. Since accessor functions
// must be thread-safe, this means that the provided map must not be mutated
// once this accessor is provided to a compile operation.
func SourceAccessorFromMap(srcs map[string]string) func(string) (io.ReadCloser, error) {
return func(path string) (io.ReadCloser, error) {
src, ok := srcs[path]
if !ok {
return nil, os.ErrNotExist
}
return io.NopCloser(strings.NewReader(src)), nil
}
}
// WithStandardImports returns a new resolver that knows about the same standard
// imports that are included with protoc.
//
// Note that this uses the descriptors embedded in generated code in the packages
// of the Protobuf Go module, except for "google/protobuf/cpp_features.proto" and
// "google/protobuf/java_features.proto". For those two files, compiled descriptors
// are embedded in this module because there is no package in the Protobuf Go module
// that contains generated code for those files. This resolver also provides results
// for the "google/protobuf/go_features.proto", which is technically not a standard
// file (it is not included with protoc) but is included in generated code in the
// Protobuf Go module.
//
// As of v0.14.0 of this module (and v1.34.2 of the Protobuf Go module and v27.0 of
// Protobuf), the contents of the standard import "google/protobuf/descriptor.proto"
// contain extension declarations which are *absent* from the descriptors that this
// resolver returns. That is because extension declarations are only retained in
// source, not at runtime, which means they are not available in the embedded
// descriptors in generated code.
//
// To use versions of the standard imports that *do* include these extension
// declarations, see wellknownimports.WithStandardImports instead. As of this
// writing, the declarations are only needed to prevent source files from
// illegally re-defining the custom features for C++, Java, and Go.
func WithStandardImports(r Resolver) Resolver {
return ResolverFunc(func(name string) (SearchResult, error) {
res, err := r.FindFileByPath(name)
if err != nil {
// error from given resolver? see if it's a known standard file
if d, ok := standardImports[name]; ok {
return SearchResult{Desc: d}, nil
}
}
return res, err
})
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package sourceinfo contains the logic for computing source code info for a
// file descriptor.
//
// The inputs to the computation are an AST for a file as well as the index of
// interpreted options for that file.
package sourceinfo
import (
"bytes"
"fmt"
"strings"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/ast"
"github.com/bufbuild/protocompile/internal"
)
// OptionIndex is a mapping of AST nodes that define options to corresponding
// paths into the containing file descriptor. The path is a sequence of field
// tags and indexes that define a traversal path from the root (the file
// descriptor) to the resolved option field. The info also includes similar
// information about child elements, for options whose values are composite
// (like a list or message literal).
type OptionIndex map[*ast.OptionNode]*OptionSourceInfo
// OptionSourceInfo describes the source info path for an option value and
// contains information about the value's descendants in the AST.
type OptionSourceInfo struct {
// The source info path to this element. If this element represents a
// declaration with an array-literal value, the last element of the
// path is the index of the first item in the array.
//
// This path is relative to the options message. So the first element
// is a field number of the options message.
//
// If the first element is negative, it indicates the number of path
// components to remove from the path to the relevant options. This is
// used for field pseudo-options, so that the path indicates a field on
// the descriptor, which is a parent of the options message (since that
// is how the pseudo-options are actually stored).
Path []int32
// Children can be an *ArrayLiteralSourceInfo, a *MessageLiteralSourceInfo,
// or nil, depending on whether the option's value is an
// [*ast.ArrayLiteralNode], an [*ast.MessageLiteralNode], or neither.
// For [*ast.ArrayLiteralNode] values, this is only populated if the
// value is a non-empty array of messages. (Empty arrays and arrays
// of scalar values do not need any additional info.)
Children OptionChildrenSourceInfo
}
// OptionChildrenSourceInfo represents source info paths for child elements of
// an option value.
type OptionChildrenSourceInfo interface {
isChildSourceInfo()
}
// ArrayLiteralSourceInfo represents source info paths for the child
// elements of an [*ast.ArrayLiteralNode]. This value is only useful for
// non-empty array literals that contain messages.
type ArrayLiteralSourceInfo struct {
Elements []OptionSourceInfo
}
func (*ArrayLiteralSourceInfo) isChildSourceInfo() {}
// MessageLiteralSourceInfo represents source info paths for the child
// elements of an [*ast.MessageLiteralNode].
type MessageLiteralSourceInfo struct {
Fields map[*ast.MessageFieldNode]*OptionSourceInfo
}
func (*MessageLiteralSourceInfo) isChildSourceInfo() {}
// GenerateSourceInfo generates source code info for the given AST. If the given
// opts is present, it can generate source code info for interpreted options.
// Otherwise, any options in the AST will get source code info as uninterpreted
// options.
func GenerateSourceInfo(file *ast.FileNode, opts OptionIndex, genOpts ...GenerateOption) *descriptorpb.SourceCodeInfo {
if file == nil {
return nil
}
sci := sourceCodeInfo{file: file, commentsUsed: map[ast.SourcePos]struct{}{}}
for _, sourceInfoOpt := range genOpts {
sourceInfoOpt.apply(&sci)
}
generateSourceInfoForFile(opts, &sci, file)
return &descriptorpb.SourceCodeInfo{Location: sci.locs}
}
// GenerateOption represents an option for how source code info is generated.
type GenerateOption interface {
apply(*sourceCodeInfo)
}
// WithExtraComments will result in source code info that contains extra comments.
// By default, comments are only generated for full declarations. Inline comments
// around elements of a declaration are not included in source code info. This option
// changes that behavior so that as many comments as possible are described in the
// source code info.
func WithExtraComments() GenerateOption {
return extraCommentsOption{}
}
// WithExtraOptionLocations will result in source code info that contains extra
// locations to describe elements inside of a message literal. By default, option
// values are treated as opaque, so the only locations included are for the entire
// option value. But with this option, paths to the various fields set inside a
// message literal will also have locations. This makes it possible for usages of
// the source code info to report precise locations for specific fields inside the
// value.
func WithExtraOptionLocations() GenerateOption {
return extraOptionLocationsOption{}
}
type extraCommentsOption struct{}
func (e extraCommentsOption) apply(info *sourceCodeInfo) {
info.extraComments = true
}
type extraOptionLocationsOption struct{}
func (e extraOptionLocationsOption) apply(info *sourceCodeInfo) {
info.extraOptionLocs = true
}
func generateSourceInfoForFile(opts OptionIndex, sci *sourceCodeInfo, file *ast.FileNode) {
path := make([]int32, 0, 16)
sci.newLocWithoutComments(file, nil)
if file.Syntax != nil {
sci.newLocWithComments(file.Syntax, append(path, internal.FileSyntaxTag))
}
if file.Edition != nil {
sci.newLocWithComments(file.Edition, append(path, internal.FileEditionTag))
}
var depIndex, pubDepIndex, weakDepIndex, optIndex, msgIndex, enumIndex, extendIndex, svcIndex int32
for _, child := range file.Decls {
switch child := child.(type) {
case *ast.ImportNode:
sci.newLocWithComments(child, append(path, internal.FileDependencyTag, depIndex))
depIndex++
if child.Public != nil {
sci.newLoc(child.Public, append(path, internal.FilePublicDependencyTag, pubDepIndex))
pubDepIndex++
} else if child.Weak != nil {
sci.newLoc(child.Weak, append(path, internal.FileWeakDependencyTag, weakDepIndex))
weakDepIndex++
}
case *ast.PackageNode:
sci.newLocWithComments(child, append(path, internal.FilePackageTag))
case *ast.OptionNode:
generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.FileOptionsTag))
case *ast.MessageNode:
generateSourceCodeInfoForMessage(opts, sci, child, nil, append(path, internal.FileMessagesTag, msgIndex))
msgIndex++
case *ast.EnumNode:
generateSourceCodeInfoForEnum(opts, sci, child, append(path, internal.FileEnumsTag, enumIndex))
enumIndex++
case *ast.ExtendNode:
extsPath := append(path, internal.FileExtensionsTag) //nolint:gocritic // intentionally creating new slice var
// we clone the path here so that append can't mutate extsPath, since they may share storage
msgsPath := append(internal.ClonePath(path), internal.FileMessagesTag)
generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &msgIndex, extsPath, msgsPath)
case *ast.ServiceNode:
generateSourceCodeInfoForService(opts, sci, child, append(path, internal.FileServicesTag, svcIndex))
svcIndex++
}
}
}
func generateSourceCodeInfoForOption(opts OptionIndex, sci *sourceCodeInfo, n *ast.OptionNode, compact bool, uninterpIndex *int32, path []int32) {
if !compact {
sci.newLocWithoutComments(n, path)
}
optInfo := opts[n]
if optInfo != nil {
fullPath := combinePathsForOption(path, optInfo.Path)
if compact {
sci.newLoc(n, fullPath)
} else {
sci.newLocWithComments(n, fullPath)
}
if sci.extraOptionLocs {
generateSourceInfoForOptionChildren(sci, n.Val, path, fullPath, optInfo.Children)
}
return
}
// it's an uninterpreted option
optPath := path
optPath = append(optPath, internal.UninterpretedOptionsTag, *uninterpIndex)
*uninterpIndex++
sci.newLoc(n, optPath)
var valTag int32
switch n.Val.(type) {
case ast.IdentValueNode:
valTag = internal.UninterpretedIdentTag
case *ast.NegativeIntLiteralNode:
valTag = internal.UninterpretedNegIntTag
case ast.IntValueNode:
valTag = internal.UninterpretedPosIntTag
case ast.FloatValueNode:
valTag = internal.UninterpretedDoubleTag
case ast.StringValueNode:
valTag = internal.UninterpretedStringTag
case *ast.MessageLiteralNode:
valTag = internal.UninterpretedAggregateTag
}
if valTag != 0 {
sci.newLoc(n.Val, append(optPath, valTag))
}
for j, nn := range n.Name.Parts {
optNmPath := optPath
optNmPath = append(optNmPath, internal.UninterpretedNameTag, int32(j))
sci.newLoc(nn, optNmPath)
sci.newLoc(nn.Name, append(optNmPath, internal.UninterpretedNameNameTag))
}
}
func combinePathsForOption(prefix, optionPath []int32) []int32 {
fullPath := make([]int32, len(prefix), len(prefix)+len(optionPath))
copy(fullPath, prefix)
if optionPath[0] == -1 {
// used by "default" and "json_name" field pseudo-options
// to attribute path to parent element (since those are
// stored directly on the descriptor, not its options)
optionPath = optionPath[1:]
fullPath = fullPath[:len(prefix)-1]
}
return append(fullPath, optionPath...)
}
func generateSourceInfoForOptionChildren(sci *sourceCodeInfo, n ast.ValueNode, pathPrefix, path []int32, childInfo OptionChildrenSourceInfo) {
switch childInfo := childInfo.(type) {
case *ArrayLiteralSourceInfo:
if arrayLiteral, ok := n.(*ast.ArrayLiteralNode); ok {
for i, val := range arrayLiteral.Elements {
elementInfo := childInfo.Elements[i]
fullPath := combinePathsForOption(pathPrefix, elementInfo.Path)
sci.newLoc(val, fullPath)
generateSourceInfoForOptionChildren(sci, val, pathPrefix, fullPath, elementInfo.Children)
}
}
case *MessageLiteralSourceInfo:
if msgLiteral, ok := n.(*ast.MessageLiteralNode); ok {
for _, fieldNode := range msgLiteral.Elements {
fieldInfo, ok := childInfo.Fields[fieldNode]
if !ok {
continue
}
fullPath := combinePathsForOption(pathPrefix, fieldInfo.Path)
locationNode := ast.Node(fieldNode)
if fieldNode.Name.IsAnyTypeReference() && fullPath[len(fullPath)-1] == internal.AnyValueTag {
// This is a special expanded Any. So also insert a location
// for the type URL field.
typeURLPath := make([]int32, len(fullPath))
copy(typeURLPath, fullPath)
typeURLPath[len(typeURLPath)-1] = internal.AnyTypeURLTag
sci.newLoc(fieldNode.Name, fullPath)
// And create the next location so it's just the value,
// not the full field definition.
locationNode = fieldNode.Val
}
_, isArrayLiteral := fieldNode.Val.(*ast.ArrayLiteralNode)
if !isArrayLiteral {
// We don't include this with an array literal since the path
// is to the first element of the array. If we added it here,
// it would be redundant with the child info we add next, and
// it wouldn't be entirely correct since it only indicates the
// index of the first element in the array (and not the others).
sci.newLoc(locationNode, fullPath)
}
generateSourceInfoForOptionChildren(sci, fieldNode.Val, pathPrefix, fullPath, fieldInfo.Children)
}
}
case nil:
if arrayLiteral, ok := n.(*ast.ArrayLiteralNode); ok {
// an array literal without child source info is an array of scalars
for i, val := range arrayLiteral.Elements {
// last element of path is starting index for array literal
elementPath := append(([]int32)(nil), path...)
elementPath[len(elementPath)-1] += int32(i)
sci.newLoc(val, elementPath)
}
}
}
}
func generateSourceCodeInfoForMessage(opts OptionIndex, sci *sourceCodeInfo, n ast.MessageDeclNode, fieldPath []int32, path []int32) {
var openBrace ast.Node
var decls []ast.MessageElement
switch n := n.(type) {
case *ast.MessageNode:
openBrace = n.OpenBrace
decls = n.Decls
case *ast.SyntheticGroupMessageNode:
openBrace = n.OpenBrace
decls = n.Decls
case *ast.SyntheticMapEntryNode:
sci.newLoc(n, path)
// map entry so nothing else to do
return
}
sci.newBlockLocWithComments(n, openBrace, path)
sci.newLoc(n.MessageName(), append(path, internal.MessageNameTag))
// matching protoc, which emits the corresponding field type name (for group fields)
// right after the source location for the group message name
if fieldPath != nil {
sci.newLoc(n.MessageName(), append(fieldPath, internal.FieldTypeNameTag))
}
var optIndex, fieldIndex, oneofIndex, extendIndex, nestedMsgIndex int32
var nestedEnumIndex, extRangeIndex, reservedRangeIndex, reservedNameIndex int32
for _, child := range decls {
switch child := child.(type) {
case *ast.OptionNode:
generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.MessageOptionsTag))
case *ast.FieldNode:
generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex))
fieldIndex++
case *ast.GroupNode:
fldPath := append(path, internal.MessageFieldsTag, fieldIndex) //nolint:gocritic // intentionally creating new slice var
generateSourceCodeInfoForField(opts, sci, child, fldPath)
fieldIndex++
// we clone the path here so that append can't mutate fldPath, since they may share storage
msgPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag, nestedMsgIndex)
generateSourceCodeInfoForMessage(opts, sci, child.AsMessage(), fldPath, msgPath)
nestedMsgIndex++
case *ast.MapFieldNode:
generateSourceCodeInfoForField(opts, sci, child, append(path, internal.MessageFieldsTag, fieldIndex))
fieldIndex++
nestedMsgIndex++
case *ast.OneofNode:
fldsPath := append(path, internal.MessageFieldsTag) //nolint:gocritic // intentionally creating new slice var
// we clone the path here and below so that append ops can't mutate
// fldPath or msgsPath, since they may otherwise share storage
msgsPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag)
ooPath := append(internal.ClonePath(path), internal.MessageOneofsTag, oneofIndex)
generateSourceCodeInfoForOneof(opts, sci, child, &fieldIndex, &nestedMsgIndex, fldsPath, msgsPath, ooPath)
oneofIndex++
case *ast.MessageNode:
generateSourceCodeInfoForMessage(opts, sci, child, nil, append(path, internal.MessageNestedMessagesTag, nestedMsgIndex))
nestedMsgIndex++
case *ast.EnumNode:
generateSourceCodeInfoForEnum(opts, sci, child, append(path, internal.MessageEnumsTag, nestedEnumIndex))
nestedEnumIndex++
case *ast.ExtendNode:
extsPath := append(path, internal.MessageExtensionsTag) //nolint:gocritic // intentionally creating new slice var
// we clone the path here so that append can't mutate extsPath, since they may share storage
msgsPath := append(internal.ClonePath(path), internal.MessageNestedMessagesTag)
generateSourceCodeInfoForExtensions(opts, sci, child, &extendIndex, &nestedMsgIndex, extsPath, msgsPath)
case *ast.ExtensionRangeNode:
generateSourceCodeInfoForExtensionRanges(opts, sci, child, &extRangeIndex, append(path, internal.MessageExtensionRangesTag))
case *ast.ReservedNode:
if len(child.Names) > 0 {
resPath := path
resPath = append(resPath, internal.MessageReservedNamesTag)
sci.newLocWithComments(child, resPath)
for _, rn := range child.Names {
sci.newLoc(rn, append(resPath, reservedNameIndex))
reservedNameIndex++
}
}
if len(child.Ranges) > 0 {
resPath := path
resPath = append(resPath, internal.MessageReservedRangesTag)
sci.newLocWithComments(child, resPath)
for _, rr := range child.Ranges {
generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex))
reservedRangeIndex++
}
}
}
}
}
func generateSourceCodeInfoForEnum(opts OptionIndex, sci *sourceCodeInfo, n *ast.EnumNode, path []int32) {
sci.newBlockLocWithComments(n, n.OpenBrace, path)
sci.newLoc(n.Name, append(path, internal.EnumNameTag))
var optIndex, valIndex, reservedNameIndex, reservedRangeIndex int32
for _, child := range n.Decls {
switch child := child.(type) {
case *ast.OptionNode:
generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.EnumOptionsTag))
case *ast.EnumValueNode:
generateSourceCodeInfoForEnumValue(opts, sci, child, append(path, internal.EnumValuesTag, valIndex))
valIndex++
case *ast.ReservedNode:
if len(child.Names) > 0 {
resPath := path
resPath = append(resPath, internal.EnumReservedNamesTag)
sci.newLocWithComments(child, resPath)
for _, rn := range child.Names {
sci.newLoc(rn, append(resPath, reservedNameIndex))
reservedNameIndex++
}
}
if len(child.Ranges) > 0 {
resPath := path
resPath = append(resPath, internal.EnumReservedRangesTag)
sci.newLocWithComments(child, resPath)
for _, rr := range child.Ranges {
generateSourceCodeInfoForReservedRange(sci, rr, append(resPath, reservedRangeIndex))
reservedRangeIndex++
}
}
}
}
}
func generateSourceCodeInfoForEnumValue(opts OptionIndex, sci *sourceCodeInfo, n *ast.EnumValueNode, path []int32) {
sci.newLocWithComments(n, path)
sci.newLoc(n.Name, append(path, internal.EnumValNameTag))
sci.newLoc(n.Number, append(path, internal.EnumValNumberTag))
// enum value options
if n.Options != nil {
optsPath := path
optsPath = append(optsPath, internal.EnumValOptionsTag)
sci.newLoc(n.Options, optsPath)
var optIndex int32
for _, opt := range n.Options.GetElements() {
generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath)
}
}
}
func generateSourceCodeInfoForReservedRange(sci *sourceCodeInfo, n *ast.RangeNode, path []int32) {
sci.newLoc(n, path)
sci.newLoc(n.StartVal, append(path, internal.ReservedRangeStartTag))
switch {
case n.EndVal != nil:
sci.newLoc(n.EndVal, append(path, internal.ReservedRangeEndTag))
case n.Max != nil:
sci.newLoc(n.Max, append(path, internal.ReservedRangeEndTag))
default:
sci.newLoc(n.StartVal, append(path, internal.ReservedRangeEndTag))
}
}
func generateSourceCodeInfoForExtensions(opts OptionIndex, sci *sourceCodeInfo, n *ast.ExtendNode, extendIndex, msgIndex *int32, extendPath, msgPath []int32) {
sci.newBlockLocWithComments(n, n.OpenBrace, extendPath)
for _, decl := range n.Decls {
switch decl := decl.(type) {
case *ast.FieldNode:
generateSourceCodeInfoForField(opts, sci, decl, append(extendPath, *extendIndex))
*extendIndex++
case *ast.GroupNode:
fldPath := extendPath
fldPath = append(fldPath, *extendIndex)
generateSourceCodeInfoForField(opts, sci, decl, fldPath)
*extendIndex++
generateSourceCodeInfoForMessage(opts, sci, decl.AsMessage(), fldPath, append(msgPath, *msgIndex))
*msgIndex++
}
}
}
func generateSourceCodeInfoForOneof(opts OptionIndex, sci *sourceCodeInfo, n *ast.OneofNode, fieldIndex, nestedMsgIndex *int32, fieldPath, nestedMsgPath, oneofPath []int32) {
sci.newBlockLocWithComments(n, n.OpenBrace, oneofPath)
sci.newLoc(n.Name, append(oneofPath, internal.OneofNameTag))
var optIndex int32
for _, child := range n.Decls {
switch child := child.(type) {
case *ast.OptionNode:
generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(oneofPath, internal.OneofOptionsTag))
case *ast.FieldNode:
generateSourceCodeInfoForField(opts, sci, child, append(fieldPath, *fieldIndex))
*fieldIndex++
case *ast.GroupNode:
fldPath := fieldPath
fldPath = append(fldPath, *fieldIndex)
generateSourceCodeInfoForField(opts, sci, child, fldPath)
*fieldIndex++
generateSourceCodeInfoForMessage(opts, sci, child.AsMessage(), fldPath, append(nestedMsgPath, *nestedMsgIndex))
*nestedMsgIndex++
}
}
}
func generateSourceCodeInfoForField(opts OptionIndex, sci *sourceCodeInfo, n ast.FieldDeclNode, path []int32) {
var fieldType string
if f, ok := n.(*ast.FieldNode); ok {
fieldType = string(f.FldType.AsIdentifier())
}
if n.GetGroupKeyword() != nil {
// comments will appear on group message
sci.newLocWithoutComments(n, path)
if n.FieldExtendee() != nil {
sci.newLoc(n.FieldExtendee(), append(path, internal.FieldExtendeeTag))
}
if n.FieldLabel() != nil {
// no comments here either (label is first token for group, so we want
// to leave the comments to be associated with the group message instead)
sci.newLocWithoutComments(n.FieldLabel(), append(path, internal.FieldLabelTag))
}
sci.newLoc(n.FieldType(), append(path, internal.FieldTypeTag))
// let the name comments be attributed to the group name
sci.newLocWithoutComments(n.FieldName(), append(path, internal.FieldNameTag))
} else {
sci.newLocWithComments(n, path)
if n.FieldExtendee() != nil {
sci.newLoc(n.FieldExtendee(), append(path, internal.FieldExtendeeTag))
}
if n.FieldLabel() != nil {
sci.newLoc(n.FieldLabel(), append(path, internal.FieldLabelTag))
}
var tag int32
if _, isScalar := internal.FieldTypes[fieldType]; isScalar {
tag = internal.FieldTypeTag
} else {
// this is a message or an enum, so attribute type location
// to the type name field
tag = internal.FieldTypeNameTag
}
sci.newLoc(n.FieldType(), append(path, tag))
sci.newLoc(n.FieldName(), append(path, internal.FieldNameTag))
}
sci.newLoc(n.FieldTag(), append(path, internal.FieldNumberTag))
if n.GetOptions() != nil {
optsPath := path
optsPath = append(optsPath, internal.FieldOptionsTag)
sci.newLoc(n.GetOptions(), optsPath)
var optIndex int32
for _, opt := range n.GetOptions().GetElements() {
generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath)
}
}
}
func generateSourceCodeInfoForExtensionRanges(opts OptionIndex, sci *sourceCodeInfo, n *ast.ExtensionRangeNode, extRangeIndex *int32, path []int32) {
sci.newLocWithComments(n, path)
startExtRangeIndex := *extRangeIndex
for _, child := range n.Ranges {
path := append(path, *extRangeIndex)
*extRangeIndex++
sci.newLoc(child, path)
sci.newLoc(child.StartVal, append(path, internal.ExtensionRangeStartTag))
switch {
case child.EndVal != nil:
sci.newLoc(child.EndVal, append(path, internal.ExtensionRangeEndTag))
case child.Max != nil:
sci.newLoc(child.Max, append(path, internal.ExtensionRangeEndTag))
default:
sci.newLoc(child.StartVal, append(path, internal.ExtensionRangeEndTag))
}
}
// options for all ranges go after the start+end values
for range n.Ranges {
path := append(path, startExtRangeIndex)
startExtRangeIndex++
if n.Options != nil {
optsPath := path
optsPath = append(optsPath, internal.ExtensionRangeOptionsTag)
sci.newLoc(n.Options, optsPath)
var optIndex int32
for _, opt := range n.Options.GetElements() {
generateSourceCodeInfoForOption(opts, sci, opt, true, &optIndex, optsPath)
}
}
}
}
func generateSourceCodeInfoForService(opts OptionIndex, sci *sourceCodeInfo, n *ast.ServiceNode, path []int32) {
sci.newBlockLocWithComments(n, n.OpenBrace, path)
sci.newLoc(n.Name, append(path, internal.ServiceNameTag))
var optIndex, rpcIndex int32
for _, child := range n.Decls {
switch child := child.(type) {
case *ast.OptionNode:
generateSourceCodeInfoForOption(opts, sci, child, false, &optIndex, append(path, internal.ServiceOptionsTag))
case *ast.RPCNode:
generateSourceCodeInfoForMethod(opts, sci, child, append(path, internal.ServiceMethodsTag, rpcIndex))
rpcIndex++
}
}
}
func generateSourceCodeInfoForMethod(opts OptionIndex, sci *sourceCodeInfo, n *ast.RPCNode, path []int32) {
if n.OpenBrace != nil {
sci.newBlockLocWithComments(n, n.OpenBrace, path)
} else {
sci.newLocWithComments(n, path)
}
sci.newLoc(n.Name, append(path, internal.MethodNameTag))
if n.Input.Stream != nil {
sci.newLoc(n.Input.Stream, append(path, internal.MethodInputStreamTag))
}
sci.newLoc(n.Input.MessageType, append(path, internal.MethodInputTag))
if n.Output.Stream != nil {
sci.newLoc(n.Output.Stream, append(path, internal.MethodOutputStreamTag))
}
sci.newLoc(n.Output.MessageType, append(path, internal.MethodOutputTag))
optsPath := path
optsPath = append(optsPath, internal.MethodOptionsTag)
var optIndex int32
for _, decl := range n.Decls {
if opt, ok := decl.(*ast.OptionNode); ok {
generateSourceCodeInfoForOption(opts, sci, opt, false, &optIndex, optsPath)
}
}
}
type sourceCodeInfo struct {
file *ast.FileNode
extraComments bool
extraOptionLocs bool
locs []*descriptorpb.SourceCodeInfo_Location
commentsUsed map[ast.SourcePos]struct{}
}
func (sci *sourceCodeInfo) newLocWithoutComments(n ast.Node, path []int32) {
var start, end ast.SourcePos
if n == sci.file {
// For files, we don't want to consider trailing EOF token
// as part of the span. We want the span to only include
// actual lexical elements in the file (which also excludes
// whitespace and comments).
children := sci.file.Children()
if len(children) > 0 && isEOF(children[len(children)-1]) {
children = children[:len(children)-1]
}
if len(children) == 0 {
start = ast.SourcePos{Filename: sci.file.Name(), Line: 1, Col: 1}
end = start
} else {
start = sci.file.TokenInfo(n.Start()).Start()
end = sci.file.TokenInfo(children[len(children)-1].End()).End()
}
} else {
info := sci.file.NodeInfo(n)
start, end = info.Start(), info.End()
}
sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{
Path: internal.ClonePath(path),
Span: makeSpan(start, end),
})
}
func (sci *sourceCodeInfo) newLoc(n ast.Node, path []int32) {
info := sci.file.NodeInfo(n)
if !sci.extraComments {
start, end := info.Start(), info.End()
sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{
Path: internal.ClonePath(path),
Span: makeSpan(start, end),
})
} else {
detachedComments, leadingComments := sci.getLeadingComments(n)
trailingComments := sci.getTrailingComments(n)
sci.newLocWithGivenComments(info, detachedComments, leadingComments, trailingComments, path)
}
}
func isEOF(n ast.Node) bool {
r, ok := n.(*ast.RuneNode)
return ok && r.Rune == 0
}
func (sci *sourceCodeInfo) newBlockLocWithComments(n, openBrace ast.Node, path []int32) {
// Block definitions use trailing comments after the open brace "{" as the
// element's trailing comments. For example:
//
// message Foo { // this is a trailing comment for a message
//
// } // not this
//
nodeInfo := sci.file.NodeInfo(n)
detachedComments, leadingComments := sci.getLeadingComments(n)
trailingComments := sci.getTrailingComments(openBrace)
sci.newLocWithGivenComments(nodeInfo, detachedComments, leadingComments, trailingComments, path)
}
func (sci *sourceCodeInfo) newLocWithComments(n ast.Node, path []int32) {
nodeInfo := sci.file.NodeInfo(n)
detachedComments, leadingComments := sci.getLeadingComments(n)
trailingComments := sci.getTrailingComments(n)
sci.newLocWithGivenComments(nodeInfo, detachedComments, leadingComments, trailingComments, path)
}
func (sci *sourceCodeInfo) newLocWithGivenComments(nodeInfo ast.NodeInfo, detachedComments []comments, leadingComments comments, trailingComments comments, path []int32) {
if (len(detachedComments) > 0 && sci.commentUsed(detachedComments[0])) ||
(len(detachedComments) == 0 && sci.commentUsed(leadingComments)) {
detachedComments = nil
leadingComments = ast.EmptyComments
}
if sci.commentUsed(trailingComments) {
trailingComments = ast.EmptyComments
}
var trail *string
if trailingComments.Len() > 0 {
trail = proto.String(sci.combineComments(trailingComments))
}
var lead *string
if leadingComments.Len() > 0 {
lead = proto.String(sci.combineComments(leadingComments))
}
detached := make([]string, len(detachedComments))
for i, cmts := range detachedComments {
detached[i] = sci.combineComments(cmts)
}
sci.locs = append(sci.locs, &descriptorpb.SourceCodeInfo_Location{
LeadingDetachedComments: detached,
LeadingComments: lead,
TrailingComments: trail,
Path: internal.ClonePath(path),
Span: makeSpan(nodeInfo.Start(), nodeInfo.End()),
})
}
type comments interface {
Len() int
Index(int) ast.Comment
}
type subComments struct {
offs, n int
c ast.Comments
}
func (s subComments) Len() int {
return s.n
}
func (s subComments) Index(i int) ast.Comment {
if i < 0 || i >= s.n {
panic(fmt.Errorf("runtime error: index out of range [%d] with length %d", i, s.n))
}
return s.c.Index(i + s.offs)
}
func (sci *sourceCodeInfo) getLeadingComments(n ast.Node) ([]comments, comments) {
s := n.Start()
info := sci.file.TokenInfo(s)
var prevInfo ast.NodeInfo
if prev, ok := sci.file.Tokens().Previous(s); ok {
prevInfo = sci.file.TokenInfo(prev)
}
_, d, l := sci.attributeComments(prevInfo, info)
return d, l
}
func (sci *sourceCodeInfo) getTrailingComments(n ast.Node) comments {
e := n.End()
next, ok := sci.file.Tokens().Next(e)
if !ok {
return ast.EmptyComments
}
info := sci.file.TokenInfo(e)
nextInfo := sci.file.TokenInfo(next)
t, _, _ := sci.attributeComments(info, nextInfo)
return t
}
func (sci *sourceCodeInfo) attributeComments(prevInfo, info ast.NodeInfo) (t comments, d []comments, l comments) {
detached := groupComments(info.LeadingComments())
var trail comments
if prevInfo.IsValid() {
trail = comments(prevInfo.TrailingComments())
if trail.Len() == 0 {
trail, detached = sci.maybeDonate(prevInfo, info, detached)
}
} else {
trail = ast.EmptyComments
}
detached, lead := sci.maybeAttach(prevInfo, info, trail.Len() > 0, detached)
return trail, detached, lead
}
func (sci *sourceCodeInfo) maybeDonate(prevInfo ast.NodeInfo, info ast.NodeInfo, lead []comments) (t comments, l []comments) {
if len(lead) == 0 {
// nothing to donate
return ast.EmptyComments, nil
}
firstCommentPos := lead[0].Index(0)
if firstCommentPos.Start().Line > prevInfo.End().Line+1 {
// first comment is detached from previous token, so can't be a trailing comment
return ast.EmptyComments, lead
}
if len(lead) > 1 {
// multiple groups? then donate first comment to previous token
return lead[0], lead[1:]
}
// there is only one element in lead
comment := lead[0]
lastCommentPos := comment.Index(comment.Len() - 1)
if lastCommentPos.End().Line < info.Start().Line-1 {
// there is a blank line between the comments and subsequent token, so
// we can donate the comment to previous token
return comment, nil
}
if txt := info.RawText(); txt == "" || (len(txt) == 1 && strings.ContainsAny(txt, "}]),;")) {
// token is a symbol for the end of a scope or EOF, which doesn't need a leading comment
if !sci.extraComments && txt != "" &&
firstCommentPos.Start().Line == prevInfo.End().Line &&
lastCommentPos.End().Line == info.Start().Line {
// protoc does not donate if prev and next token are on the same line since it's
// ambiguous which one should get the comment; so we mirror that here
return ast.EmptyComments, lead
}
// But with extra comments, we always donate in this situation in order to capture
// more comments. Because otherwise, these comments are lost since these symbols
// don't map to a location in source code info.
return comment, nil
}
// cannot donate
return ast.EmptyComments, lead
}
func (sci *sourceCodeInfo) maybeAttach(prevInfo ast.NodeInfo, info ast.NodeInfo, hasTrail bool, lead []comments) (d []comments, l comments) {
if len(lead) == 0 {
return nil, ast.EmptyComments
}
if len(lead) == 1 && !hasTrail && prevInfo.IsValid() {
// If the one comment appears attached to both previous and next tokens,
// don't attach to either.
comment := lead[0]
attachedToPrevious := comment.Index(0).Start().Line == prevInfo.End().Line
attachedToNext := comment.Index(comment.Len()-1).End().Line == info.Start().Line
if attachedToPrevious && attachedToNext {
// Since attachment is ambiguous, leave it detached.
return lead, ast.EmptyComments
}
}
lastComment := lead[len(lead)-1]
if lastComment.Index(lastComment.Len()-1).End().Line >= info.Start().Line-1 {
return lead[:len(lead)-1], lastComment
}
return lead, ast.EmptyComments
}
func makeSpan(start, end ast.SourcePos) []int32 {
if start.Line == end.Line {
return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Col) - 1}
}
return []int32{int32(start.Line) - 1, int32(start.Col) - 1, int32(end.Line) - 1, int32(end.Col) - 1}
}
func (sci *sourceCodeInfo) commentUsed(c comments) bool {
if c.Len() == 0 {
return false
}
pos := c.Index(0).Start()
if _, ok := sci.commentsUsed[pos]; ok {
return true
}
sci.commentsUsed[pos] = struct{}{}
return false
}
func groupComments(cmts ast.Comments) []comments {
if cmts.Len() == 0 {
return nil
}
var groups []comments
singleLineStyle := cmts.Index(0).RawText()[:2] == "//"
line := cmts.Index(0).End().Line
start := 0
for i := 1; i < cmts.Len(); i++ {
c := cmts.Index(i)
prevSingleLine := singleLineStyle
singleLineStyle = strings.HasPrefix(c.RawText(), "//")
if !singleLineStyle || prevSingleLine != singleLineStyle || c.Start().Line > line+1 {
// new group!
groups = append(groups, subComments{offs: start, n: i - start, c: cmts})
start = i
}
line = c.End().Line
}
// don't forget last group
groups = append(groups, subComments{offs: start, n: cmts.Len() - start, c: cmts})
return groups
}
func (sci *sourceCodeInfo) combineComments(comments comments) string {
if comments.Len() == 0 {
return ""
}
var buf bytes.Buffer
for i, l := 0, comments.Len(); i < l; i++ {
c := comments.Index(i)
txt := c.RawText()
if txt[:2] == "//" {
buf.WriteString(txt[2:])
// protoc includes trailing newline for line comments,
// but it's not present in the AST comment. So we need
// to add it if present.
if i, ok := sci.file.Items().Next(c.AsItem()); ok {
info := sci.file.ItemInfo(i)
if strings.HasPrefix(info.LeadingWhitespace(), "\n") {
buf.WriteRune('\n')
}
}
} else {
lines := strings.Split(txt[2:len(txt)-2], "\n")
first := true
for _, l := range lines {
if first {
first = false
buf.WriteString(l)
continue
}
buf.WriteByte('\n')
// strip a prefix of whitespace followed by '*'
j := 0
for j < len(l) {
if l[j] != ' ' && l[j] != '\t' {
break
}
j++
}
switch {
case j == len(l):
l = ""
case l[j] == '*':
l = l[j+1:]
case j > 0:
l = l[j:]
}
buf.WriteString(l)
}
}
}
return buf.String()
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocompile
import (
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/reflect/protoregistry"
_ "google.golang.org/protobuf/types/gofeaturespb" // link in packages that include the standard protos included with protoc.
_ "google.golang.org/protobuf/types/known/anypb"
_ "google.golang.org/protobuf/types/known/apipb"
_ "google.golang.org/protobuf/types/known/durationpb"
_ "google.golang.org/protobuf/types/known/emptypb"
_ "google.golang.org/protobuf/types/known/fieldmaskpb"
_ "google.golang.org/protobuf/types/known/sourcecontextpb"
_ "google.golang.org/protobuf/types/known/structpb"
_ "google.golang.org/protobuf/types/known/timestamppb"
_ "google.golang.org/protobuf/types/known/typepb"
_ "google.golang.org/protobuf/types/known/wrapperspb"
_ "google.golang.org/protobuf/types/pluginpb"
"github.com/bufbuild/protocompile/internal/featuresext"
)
// All files that are included with protoc are also included with this package
// so that clients do not need to explicitly supply a copy of these protos (just
// like callers of protoc do not need to supply them).
var standardImports map[string]protoreflect.FileDescriptor
func init() {
standardFilenames := []string{
"google/protobuf/any.proto",
"google/protobuf/api.proto",
"google/protobuf/compiler/plugin.proto",
"google/protobuf/descriptor.proto",
"google/protobuf/duration.proto",
"google/protobuf/empty.proto",
"google/protobuf/field_mask.proto",
"google/protobuf/go_features.proto",
"google/protobuf/source_context.proto",
"google/protobuf/struct.proto",
"google/protobuf/timestamp.proto",
"google/protobuf/type.proto",
"google/protobuf/wrappers.proto",
}
standardImports = map[string]protoreflect.FileDescriptor{}
for _, fn := range standardFilenames {
fd, err := protoregistry.GlobalFiles.FindFileByPath(fn)
if err != nil {
panic(err.Error())
}
standardImports[fn] = fd
}
otherFeatures := []struct {
Name string
GetDescriptor func() (protoreflect.FileDescriptor, error)
}{
{
Name: "google/protobuf/cpp_features.proto",
GetDescriptor: featuresext.CppFeaturesDescriptor,
},
{
Name: "google/protobuf/java_features.proto",
GetDescriptor: featuresext.JavaFeaturesDescriptor,
},
}
for _, feature := range otherFeatures {
// First see if the program has generated Go code for this
// file linked in:
fd, err := protoregistry.GlobalFiles.FindFileByPath(feature.Name)
if err == nil {
standardImports[feature.Name] = fd
continue
}
fd, err = feature.GetDescriptor()
if err != nil {
// For these extensions to FeatureSet, we are lenient. If
// we can't load them, just ignore them.
continue
}
standardImports[feature.Name] = fd
}
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package protocompile
import (
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/internal/editions"
)
// IsEditionSupported returns true if this module can compile sources for
// the given edition. This returns true for the special EDITION_PROTO2 and
// EDITION_PROTO3 as well as all actual editions supported.
func IsEditionSupported(edition descriptorpb.Edition) bool {
return edition == descriptorpb.Edition_EDITION_PROTO2 ||
edition == descriptorpb.Edition_EDITION_PROTO3 ||
(edition >= editions.MinSupportedEdition && edition <= editions.MaxSupportedEdition)
}
// Copyright 2020-2025 Buf Technologies, Inc.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package walk provides helper functions for traversing all elements in a
// protobuf file descriptor. There are versions both for traversing "rich"
// descriptors (protoreflect.Descriptor) and for traversing the underlying
// "raw" descriptor protos.
//
// # Enter And Exit
//
// This package includes variants of the functions that accept two callback
// functions. These variants have names ending with "EnterAndExit". One function
// is called as each element is visited ("enter") and the other is called after
// the element and all of its descendants have been visited ("exit"). This
// can be useful when you need to track state that is scoped to the visitation
// of a single element.
//
// # Source Path
//
// When traversing raw descriptor protos, this package include variants whose
// callback accepts a protoreflect.SourcePath. These variants have names that
// include "WithPath". This path can be used to locate corresponding data in the
// file's source code info (if present).
package walk
import (
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/reflect/protoreflect"
"google.golang.org/protobuf/types/descriptorpb"
"github.com/bufbuild/protocompile/internal"
)
// Descriptors walks all descriptors in the given file using a depth-first
// traversal, calling the given function for each descriptor in the hierarchy.
// The walk ends when traversal is complete or when the function returns an
// error. If the function returns an error, that is returned as the result of the
// walk operation.
//
// Descriptors are visited using a pre-order traversal, where the function is
// called for a descriptor before it is called for any of its descendants.
func Descriptors(file protoreflect.FileDescriptor, fn func(protoreflect.Descriptor) error) error {
return DescriptorsEnterAndExit(file, fn, nil)
}
// DescriptorsEnterAndExit walks all descriptors in the given file using a
// depth-first traversal, calling the given functions on entry and on exit
// for each descriptor in the hierarchy. The walk ends when traversal is
// complete or when a function returns an error. If a function returns an error,
// that is returned as the result of the walk operation.
//
// The enter function is called using a pre-order traversal, where the function
// is called for a descriptor before it is called for any of its descendants.
// The exit function is called using a post-order traversal, where the function
// is called for a descriptor only after it is called for any descendants.
func DescriptorsEnterAndExit(file protoreflect.FileDescriptor, enter, exit func(protoreflect.Descriptor) error) error {
if err := walkContainer(file, enter, exit); err != nil {
return err
}
services := file.Services()
for i, length := 0, services.Len(); i < length; i++ {
svc := services.Get(i)
if err := enter(svc); err != nil {
return err
}
methods := svc.Methods()
for i, length := 0, methods.Len(); i < length; i++ {
mtd := methods.Get(i)
if err := enter(mtd); err != nil {
return err
}
if exit != nil {
if err := exit(mtd); err != nil {
return err
}
}
}
if exit != nil {
if err := exit(svc); err != nil {
return err
}
}
}
return nil
}
type container interface {
Messages() protoreflect.MessageDescriptors
Enums() protoreflect.EnumDescriptors
Extensions() protoreflect.ExtensionDescriptors
}
func walkContainer(container container, enter, exit func(protoreflect.Descriptor) error) error {
messages := container.Messages()
for i, length := 0, messages.Len(); i < length; i++ {
msg := messages.Get(i)
if err := messageDescriptor(msg, enter, exit); err != nil {
return err
}
}
enums := container.Enums()
for i, length := 0, enums.Len(); i < length; i++ {
en := enums.Get(i)
if err := enumDescriptor(en, enter, exit); err != nil {
return err
}
}
exts := container.Extensions()
for i, length := 0, exts.Len(); i < length; i++ {
ext := exts.Get(i)
if err := enter(ext); err != nil {
return err
}
if exit != nil {
if err := exit(ext); err != nil {
return err
}
}
}
return nil
}
func messageDescriptor(msg protoreflect.MessageDescriptor, enter, exit func(protoreflect.Descriptor) error) error {
if err := enter(msg); err != nil {
return err
}
fields := msg.Fields()
for i, length := 0, fields.Len(); i < length; i++ {
fld := fields.Get(i)
if err := enter(fld); err != nil {
return err
}
if exit != nil {
if err := exit(fld); err != nil {
return err
}
}
}
oneofs := msg.Oneofs()
for i, length := 0, oneofs.Len(); i < length; i++ {
oo := oneofs.Get(i)
if err := enter(oo); err != nil {
return err
}
if exit != nil {
if err := exit(oo); err != nil {
return err
}
}
}
if err := walkContainer(msg, enter, exit); err != nil {
return err
}
if exit != nil {
if err := exit(msg); err != nil {
return err
}
}
return nil
}
func enumDescriptor(en protoreflect.EnumDescriptor, enter, exit func(protoreflect.Descriptor) error) error {
if err := enter(en); err != nil {
return err
}
vals := en.Values()
for i, length := 0, vals.Len(); i < length; i++ {
enVal := vals.Get(i)
if err := enter(enVal); err != nil {
return err
}
if exit != nil {
if err := exit(enVal); err != nil {
return err
}
}
}
if exit != nil {
if err := exit(en); err != nil {
return err
}
}
return nil
}
// DescriptorProtosWithPath walks all descriptor protos in the given file using
// a depth-first traversal. This is the same as DescriptorProtos except that the
// callback function, fn, receives a protoreflect.SourcePath, that indicates the
// path for the element in the file's source code info.
func DescriptorProtosWithPath(file *descriptorpb.FileDescriptorProto, fn func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error) error {
return DescriptorProtosWithPathEnterAndExit(file, fn, nil)
}
// DescriptorProtosWithPathEnterAndExit walks all descriptor protos in the given
// file using a depth-first traversal. This is the same as
// DescriptorProtosEnterAndExit except that the callback function, fn, receives
// a protoreflect.SourcePath, that indicates the path for the element in the
// file's source code info.
func DescriptorProtosWithPathEnterAndExit(file *descriptorpb.FileDescriptorProto, enter, exit func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error) error {
w := &protoWalker{usePath: true, enter: enter, exit: exit}
return w.walkDescriptorProtos(file)
}
// DescriptorProtos walks all descriptor protos in the given file using a
// depth-first traversal, calling the given function for each descriptor proto
// in the hierarchy. The walk ends when traversal is complete or when the
// function returns an error. If the function returns an error, that is
// returned as the result of the walk operation.
//
// Descriptor protos are visited using a pre-order traversal, where the function
// is called for a descriptor before it is called for any of its descendants.
func DescriptorProtos(file *descriptorpb.FileDescriptorProto, fn func(protoreflect.FullName, proto.Message) error) error {
return DescriptorProtosEnterAndExit(file, fn, nil)
}
// DescriptorProtosEnterAndExit walks all descriptor protos in the given file
// using a depth-first traversal, calling the given functions on entry and on
// exit for each descriptor in the hierarchy. The walk ends when traversal is
// complete or when a function returns an error. If a function returns an error,
// that is returned as the result of the walk operation.
//
// The enter function is called using a pre-order traversal, where the function
// is called for a descriptor proto before it is called for any of its
// descendants. The exit function is called using a post-order traversal, where
// the function is called for a descriptor proto only after it is called for any
// descendants.
func DescriptorProtosEnterAndExit(file *descriptorpb.FileDescriptorProto, enter, exit func(protoreflect.FullName, proto.Message) error) error {
enterWithPath := func(n protoreflect.FullName, _ protoreflect.SourcePath, m proto.Message) error {
return enter(n, m)
}
var exitWithPath func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error
if exit != nil {
exitWithPath = func(n protoreflect.FullName, _ protoreflect.SourcePath, m proto.Message) error {
return exit(n, m)
}
}
w := &protoWalker{
enter: enterWithPath,
exit: exitWithPath,
}
return w.walkDescriptorProtos(file)
}
type protoWalker struct {
usePath bool
enter, exit func(protoreflect.FullName, protoreflect.SourcePath, proto.Message) error
}
func (w *protoWalker) walkDescriptorProtos(file *descriptorpb.FileDescriptorProto) error {
prefix := file.GetPackage()
if prefix != "" {
prefix += "."
}
var path protoreflect.SourcePath
for i, msg := range file.MessageType {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.FileMessagesTag, int32(i))
}
if err := w.walkDescriptorProto(prefix, p, msg); err != nil {
return err
}
}
for i, en := range file.EnumType {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.FileEnumsTag, int32(i))
}
if err := w.walkEnumDescriptorProto(prefix, p, en); err != nil {
return err
}
}
for i, ext := range file.Extension {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.FileExtensionsTag, int32(i))
}
fqn := prefix + ext.GetName()
if err := w.enter(protoreflect.FullName(fqn), p, ext); err != nil {
return err
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), p, ext); err != nil {
return err
}
}
}
for i, svc := range file.Service {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.FileServicesTag, int32(i))
}
fqn := prefix + svc.GetName()
if err := w.enter(protoreflect.FullName(fqn), p, svc); err != nil {
return err
}
for j, mtd := range svc.Method {
var mp protoreflect.SourcePath
if w.usePath {
mp = p
mp = append(mp, internal.ServiceMethodsTag, int32(j))
}
mtdFqn := fqn + "." + mtd.GetName()
if err := w.enter(protoreflect.FullName(mtdFqn), mp, mtd); err != nil {
return err
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(mtdFqn), mp, mtd); err != nil {
return err
}
}
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), p, svc); err != nil {
return err
}
}
}
return nil
}
func (w *protoWalker) walkDescriptorProto(prefix string, path protoreflect.SourcePath, msg *descriptorpb.DescriptorProto) error {
fqn := prefix + msg.GetName()
if err := w.enter(protoreflect.FullName(fqn), path, msg); err != nil {
return err
}
prefix = fqn + "."
for i, fld := range msg.Field {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.MessageFieldsTag, int32(i))
}
fqn := prefix + fld.GetName()
if err := w.enter(protoreflect.FullName(fqn), p, fld); err != nil {
return err
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), p, fld); err != nil {
return err
}
}
}
for i, oo := range msg.OneofDecl {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.MessageOneofsTag, int32(i))
}
fqn := prefix + oo.GetName()
if err := w.enter(protoreflect.FullName(fqn), p, oo); err != nil {
return err
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), p, oo); err != nil {
return err
}
}
}
for i, nested := range msg.NestedType {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.MessageNestedMessagesTag, int32(i))
}
if err := w.walkDescriptorProto(prefix, p, nested); err != nil {
return err
}
}
for i, en := range msg.EnumType {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.MessageEnumsTag, int32(i))
}
if err := w.walkEnumDescriptorProto(prefix, p, en); err != nil {
return err
}
}
for i, ext := range msg.Extension {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.MessageExtensionsTag, int32(i))
}
fqn := prefix + ext.GetName()
if err := w.enter(protoreflect.FullName(fqn), p, ext); err != nil {
return err
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), p, ext); err != nil {
return err
}
}
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), path, msg); err != nil {
return err
}
}
return nil
}
func (w *protoWalker) walkEnumDescriptorProto(prefix string, path protoreflect.SourcePath, en *descriptorpb.EnumDescriptorProto) error {
fqn := prefix + en.GetName()
if err := w.enter(protoreflect.FullName(fqn), path, en); err != nil {
return err
}
for i, val := range en.Value {
var p protoreflect.SourcePath
if w.usePath {
p = path
p = append(p, internal.EnumValuesTag, int32(i))
}
fqn := prefix + val.GetName()
if err := w.enter(protoreflect.FullName(fqn), p, val); err != nil {
return err
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), p, val); err != nil {
return err
}
}
}
if w.exit != nil {
if err := w.exit(protoreflect.FullName(fqn), path, en); err != nil {
return err
}
}
return nil
}